From 490dea45d00f01847ebebd007685d564aaf2cd98 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 24 Nov 2008 17:06:57 +0100 Subject: itimers: remove the per-cpu-ish-ness Either we bounce once cacheline per cpu per tick, yielding n^2 bounces or we just bounce a single.. Also, using per-cpu allocations for the thread-groups complicates the per-cpu allocator in that its currently aimed to be a fixed sized allocator and the only possible extention to that would be vmap based, which is seriously constrained on 32 bit archs. So making the per-cpu memory requirement depend on the number of processes is an issue. Lastly, it didn't deal with cpu-hotplug, although admittedly that might be fixable. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/fork.c | 15 +++++----- kernel/posix-cpu-timers.c | 70 ----------------------------------------------- kernel/sched_stats.h | 33 ++++++++++------------ 3 files changed, 22 insertions(+), 96 deletions(-) (limited to 'kernel') diff --git a/kernel/fork.c b/kernel/fork.c index 7b8f2a78be3..7087d8c0e5e 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -820,14 +820,15 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) int ret; if (clone_flags & CLONE_THREAD) { - ret = thread_group_cputime_clone_thread(current); - if (likely(!ret)) { - atomic_inc(¤t->signal->count); - atomic_inc(¤t->signal->live); - } - return ret; + atomic_inc(¤t->signal->count); + atomic_inc(¤t->signal->live); + return 0; } sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); + + if (sig) + posix_cpu_timers_init_group(sig); + tsk->signal = sig; if (!sig) return -ENOMEM; @@ -864,8 +865,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); task_unlock(current->group_leader); - posix_cpu_timers_init_group(sig); - acct_init_pacct(&sig->pacct); tty_audit_fork(sig); diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 157de3a4783..fa07da94d7b 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -9,76 +9,6 @@ #include #include -/* - * Allocate the thread_group_cputime structure appropriately and fill in the - * current values of the fields. Called from copy_signal() via - * thread_group_cputime_clone_thread() when adding a second or subsequent - * thread to a thread group. Assumes interrupts are enabled when called. - */ -int thread_group_cputime_alloc(struct task_struct *tsk) -{ - struct signal_struct *sig = tsk->signal; - struct task_cputime *cputime; - - /* - * If we have multiple threads and we don't already have a - * per-CPU task_cputime struct (checked in the caller), allocate - * one and fill it in with the times accumulated so far. We may - * race with another thread so recheck after we pick up the sighand - * lock. - */ - cputime = alloc_percpu(struct task_cputime); - if (cputime == NULL) - return -ENOMEM; - spin_lock_irq(&tsk->sighand->siglock); - if (sig->cputime.totals) { - spin_unlock_irq(&tsk->sighand->siglock); - free_percpu(cputime); - return 0; - } - sig->cputime.totals = cputime; - cputime = per_cpu_ptr(sig->cputime.totals, smp_processor_id()); - cputime->utime = tsk->utime; - cputime->stime = tsk->stime; - cputime->sum_exec_runtime = tsk->se.sum_exec_runtime; - spin_unlock_irq(&tsk->sighand->siglock); - return 0; -} - -/** - * thread_group_cputime - Sum the thread group time fields across all CPUs. - * - * @tsk: The task we use to identify the thread group. - * @times: task_cputime structure in which we return the summed fields. - * - * Walk the list of CPUs to sum the per-CPU time fields in the thread group - * time structure. - */ -void thread_group_cputime( - struct task_struct *tsk, - struct task_cputime *times) -{ - struct task_cputime *totals, *tot; - int i; - - totals = tsk->signal->cputime.totals; - if (!totals) { - times->utime = tsk->utime; - times->stime = tsk->stime; - times->sum_exec_runtime = tsk->se.sum_exec_runtime; - return; - } - - times->stime = times->utime = cputime_zero; - times->sum_exec_runtime = 0; - for_each_possible_cpu(i) { - tot = per_cpu_ptr(totals, i); - times->utime = cputime_add(times->utime, tot->utime); - times->stime = cputime_add(times->stime, tot->stime); - times->sum_exec_runtime += tot->sum_exec_runtime; - } -} - /* * Called after updating RLIMIT_CPU to set timer expiration if necessary. */ diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h index f2773b5d122..8ab0cef8eca 100644 --- a/kernel/sched_stats.h +++ b/kernel/sched_stats.h @@ -296,6 +296,7 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next) static inline void account_group_user_time(struct task_struct *tsk, cputime_t cputime) { + struct task_cputime *times; struct signal_struct *sig; /* tsk == current, ensure it is safe to use ->signal */ @@ -303,13 +304,11 @@ static inline void account_group_user_time(struct task_struct *tsk, return; sig = tsk->signal; - if (sig->cputime.totals) { - struct task_cputime *times; + times = &sig->cputime.totals; - times = per_cpu_ptr(sig->cputime.totals, get_cpu()); - times->utime = cputime_add(times->utime, cputime); - put_cpu_no_resched(); - } + spin_lock(×->lock); + times->utime = cputime_add(times->utime, cputime); + spin_unlock(×->lock); } /** @@ -325,6 +324,7 @@ static inline void account_group_user_time(struct task_struct *tsk, static inline void account_group_system_time(struct task_struct *tsk, cputime_t cputime) { + struct task_cputime *times; struct signal_struct *sig; /* tsk == current, ensure it is safe to use ->signal */ @@ -332,13 +332,11 @@ static inline void account_group_system_time(struct task_struct *tsk, return; sig = tsk->signal; - if (sig->cputime.totals) { - struct task_cputime *times; + times = &sig->cputime.totals; - times = per_cpu_ptr(sig->cputime.totals, get_cpu()); - times->stime = cputime_add(times->stime, cputime); - put_cpu_no_resched(); - } + spin_lock(×->lock); + times->stime = cputime_add(times->stime, cputime); + spin_unlock(×->lock); } /** @@ -354,6 +352,7 @@ static inline void account_group_system_time(struct task_struct *tsk, static inline void account_group_exec_runtime(struct task_struct *tsk, unsigned long long ns) { + struct task_cputime *times; struct signal_struct *sig; sig = tsk->signal; @@ -362,11 +361,9 @@ static inline void account_group_exec_runtime(struct task_struct *tsk, if (unlikely(!sig)) return; - if (sig->cputime.totals) { - struct task_cputime *times; + times = &sig->cputime.totals; - times = per_cpu_ptr(sig->cputime.totals, get_cpu()); - times->sum_exec_runtime += ns; - put_cpu_no_resched(); - } + spin_lock(×->lock); + times->sum_exec_runtime += ns; + spin_unlock(×->lock); } -- cgit v1.2.3 From 783adf42cf039083dd3c734c07c3bdc707e2bb15 Mon Sep 17 00:00:00 2001 From: Steven Noonan Date: Sun, 11 Jan 2009 01:04:21 -0800 Subject: kernel/fork.c: unused variable 'ret' Removed the unused variable. Signed-off-by: Steven Noonan Signed-off-by: Ingo Molnar --- kernel/fork.c | 1 - 1 file changed, 1 deletion(-) (limited to 'kernel') diff --git a/kernel/fork.c b/kernel/fork.c index e995899ea83..81da4aae85c 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -817,7 +817,6 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig) static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) { struct signal_struct *sig; - int ret; if (clone_flags & CLONE_THREAD) { atomic_inc(¤t->signal->count); -- cgit v1.2.3 From e4fa4c97016037620f9dc8bafe03e1086b665b4c Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Wed, 14 Jan 2009 14:58:15 +0800 Subject: rcu: add __cpuinit to rcu_init_percpu_data() Impact: reduce memory footprint add __cpuinit to rcu_init_percpu_data(), and this function's text will be discarded after boot when !CONFIG_HOTPLUG_CPU. Signed-off-by: Lai Jiangshan Signed-off-by: Ingo Molnar --- kernel/rcuclassic.c | 2 +- kernel/rcutree.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c index 490934fc7ac..bd5a9003497 100644 --- a/kernel/rcuclassic.c +++ b/kernel/rcuclassic.c @@ -716,7 +716,7 @@ void rcu_check_callbacks(int cpu, int user) raise_rcu_softirq(); } -static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, +static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, struct rcu_data *rdp) { unsigned long flags; diff --git a/kernel/rcutree.c b/kernel/rcutree.c index f2d8638e6c6..b2fd602a6f6 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -1314,7 +1314,7 @@ int rcu_needs_cpu(int cpu) * access due to the fact that this CPU cannot possibly have any RCU * callbacks in flight yet. */ -static void +static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_state *rsp) { unsigned long flags; -- cgit v1.2.3 From baf48f6577e581a9adb8fe849dc80e24b21d171d Mon Sep 17 00:00:00 2001 From: Mandeep Singh Baines Date: Mon, 12 Jan 2009 21:15:17 -0800 Subject: softlock: fix false panic which can occur if softlockup_thresh is reduced At run-time, if softlockup_thresh is changed to a much lower value, touch_timestamp is likely to be much older than the new softlock_thresh. This will cause a false softlockup to be detected. If softlockup_panic is enabled, the system will panic. The fix is to touch all watchdogs before changing softlockup_thresh. Signed-off-by: Mandeep Singh Baines Signed-off-by: Ingo Molnar --- include/linux/sched.h | 3 +++ kernel/softlockup.c | 9 +++++++++ kernel/sysctl.c | 2 +- 3 files changed, 13 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/include/linux/sched.h b/include/linux/sched.h index 4cae9b81a1f..54cbabf3b87 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -293,6 +293,9 @@ extern void sched_show_task(struct task_struct *p); extern void softlockup_tick(void); extern void touch_softlockup_watchdog(void); extern void touch_all_softlockup_watchdogs(void); +extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write, + struct file *filp, void __user *buffer, + size_t *lenp, loff_t *ppos); extern unsigned int softlockup_panic; extern unsigned long sysctl_hung_task_check_count; extern unsigned long sysctl_hung_task_timeout_secs; diff --git a/kernel/softlockup.c b/kernel/softlockup.c index d9188c66278..85d5a245510 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c @@ -16,6 +16,7 @@ #include #include #include +#include #include @@ -88,6 +89,14 @@ void touch_all_softlockup_watchdogs(void) } EXPORT_SYMBOL(touch_all_softlockup_watchdogs); +int proc_dosoftlockup_thresh(struct ctl_table *table, int write, + struct file *filp, void __user *buffer, + size_t *lenp, loff_t *ppos) +{ + touch_all_softlockup_watchdogs(); + return proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos); +} + /* * This callback runs from the timer interrupt, and checks * whether the watchdog thread has hung or not: diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 89d74436318..596dc31a711 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -800,7 +800,7 @@ static struct ctl_table kern_table[] = { .data = &softlockup_thresh, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = &proc_dosoftlockup_thresh, .strategy = &sysctl_intvec, .extra1 = &neg_one, .extra2 = &sixty, -- cgit v1.2.3 From 14819ea1e0bcbdc9b084cd60a6a24d5d786324ef Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 14 Jan 2009 12:34:21 +0100 Subject: irq: export __set_irq_handler() and handle_level_irq() Impact: build fix ARM updates broke x86 allmodconfig builds: ERROR: "__set_irq_handler" [drivers/mfd/pcf50633-core.ko] undefined! ERROR: "handle_level_irq" [drivers/mfd/pcf50633-core.ko] undefined! Signed-off-by: Ingo Molnar --- kernel/irq/chip.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index f63c706d25e..7de11bd64df 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -383,6 +383,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) out_unlock: spin_unlock(&desc->lock); } +EXPORT_SYMBOL_GPL(handle_level_irq); /** * handle_fasteoi_irq - irq handler for transparent controllers @@ -593,6 +594,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, } spin_unlock_irqrestore(&desc->lock, flags); } +EXPORT_SYMBOL_GPL(__set_irq_handler); void set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip, -- cgit v1.2.3 From 2ea038917bbdd51a7ae4a898c6a04641324dd033 Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Wed, 14 Jan 2009 21:38:20 +0100 Subject: Revert "kbuild: strip generated symbols from *.ko" This reverts commit ad7a953c522ceb496611d127e51e278bfe0ff483. And commit: ("allow stripping of generated symbols under CONFIG_KALLSYMS_ALL") 9bb482476c6c9d1ae033306440c51ceac93ea80c These stripping patches has caused a set of issues: 1) People have reported compatibility issues with binutils due to lack of support for `--strip-unneeded-symbols' with objcopy 2.15.92.0.2 Reported by: Wenji 2) ccache and distcc no longer works as expeced Reported by: Ted, Roland, + others 3) The installed modules increased a lot in size Reported by: Ted, Davej + others Reported-by: Wenji Huang Reported-by: "Theodore Ts'o" Reported-by: Dave Jones Reported-by: Roland McGrath Signed-off-by: Sam Ravnborg --- Makefile | 59 ++++------- arch/x86/scripts/strip-symbols | 1 - init/Kconfig | 7 -- kernel/kallsyms.c | 16 +-- scripts/Makefile.build | 55 ++++------- scripts/Makefile.modinst | 3 +- scripts/genksyms/genksyms.c | 21 ++-- scripts/genksyms/keywords.c_shipped | 189 ++++++++++++++++++------------------ scripts/genksyms/keywords.gperf | 2 - scripts/kallsyms.c | 21 ++-- scripts/mksysmap | 7 +- scripts/strip-symbols | 22 ----- 12 files changed, 166 insertions(+), 237 deletions(-) delete mode 100644 arch/x86/scripts/strip-symbols delete mode 100644 scripts/strip-symbols (limited to 'kernel') diff --git a/Makefile b/Makefile index c06e250eca1..c2c4bbeef59 100644 --- a/Makefile +++ b/Makefile @@ -606,25 +606,20 @@ export INSTALL_PATH ?= /boot MODLIB = $(INSTALL_MOD_PATH)/lib/modules/$(KERNELRELEASE) export MODLIB -strip-symbols := $(srctree)/scripts/strip-symbols \ - $(wildcard $(srctree)/arch/$(ARCH)/scripts/strip-symbols) - # -# INSTALL_MOD_STRIP, if defined, will cause modules to be stripped while -# they get installed. If INSTALL_MOD_STRIP is '1', then the default -# options (see below) will be used. Otherwise, INSTALL_MOD_STRIP will -# be used as the option(s) to the objcopy command. +# INSTALL_MOD_STRIP, if defined, will cause modules to be +# stripped after they are installed. If INSTALL_MOD_STRIP is '1', then +# the default option --strip-debug will be used. Otherwise, +# INSTALL_MOD_STRIP will used as the options to the strip command. + ifdef INSTALL_MOD_STRIP ifeq ($(INSTALL_MOD_STRIP),1) -mod_strip_cmd = $(OBJCOPY) --strip-debug -ifeq ($(CONFIG_KALLSYMS_ALL),$(CONFIG_KALLSYMS_STRIP_GENERATED)) -mod_strip_cmd += --wildcard $(addprefix --strip-symbols ,$(strip-symbols)) -endif +mod_strip_cmd = $(STRIP) --strip-debug else -mod_strip_cmd = $(OBJCOPY) $(INSTALL_MOD_STRIP) +mod_strip_cmd = $(STRIP) $(INSTALL_MOD_STRIP) endif # INSTALL_MOD_STRIP=1 else -mod_strip_cmd = false +mod_strip_cmd = true endif # INSTALL_MOD_STRIP export mod_strip_cmd @@ -754,7 +749,6 @@ last_kallsyms := 2 endif kallsyms.o := .tmp_kallsyms$(last_kallsyms).o -kallsyms.h := $(wildcard include/config/kallsyms/*.h) $(wildcard include/config/kallsyms/*/*.h) define verify_kallsyms $(Q)$(if $($(quiet)cmd_sysmap), \ @@ -779,41 +773,24 @@ endef # Generate .S file with all kernel symbols quiet_cmd_kallsyms = KSYM $@ - cmd_kallsyms = { test $* -eq 0 || $(NM) -n $<; } \ - | $(KALLSYMS) $(if $(CONFIG_KALLSYMS_ALL),--all-symbols) >$@ - -quiet_cmd_kstrip = STRIP $@ - cmd_kstrip = $(OBJCOPY) --wildcard $(addprefix --strip$(if $(CONFIG_RELOCATABLE),-unneeded)-symbols ,$(filter %/scripts/strip-symbols,$^)) $< $@ + cmd_kallsyms = $(NM) -n $< | $(KALLSYMS) \ + $(if $(CONFIG_KALLSYMS_ALL),--all-symbols) > $@ -$(foreach n,0 1 2 3,.tmp_kallsyms$(n).o): KBUILD_AFLAGS += -Wa,--strip-local-absolute -$(foreach n,0 1 2 3,.tmp_kallsyms$(n).o): %.o: %.S scripts FORCE +.tmp_kallsyms1.o .tmp_kallsyms2.o .tmp_kallsyms3.o: %.o: %.S scripts FORCE $(call if_changed_dep,as_o_S) -ifeq ($(CONFIG_KALLSYMS_STRIP_GENERATED),y) -strip-ext := .stripped -endif - -.tmp_kallsyms%.S: .tmp_vmlinux%$(strip-ext) $(KALLSYMS) $(kallsyms.h) +.tmp_kallsyms%.S: .tmp_vmlinux% $(KALLSYMS) $(call cmd,kallsyms) -# make -jN seems to have problems with intermediate files, see bug #3330. -.SECONDARY: $(foreach n,1 2 3,.tmp_vmlinux$(n).stripped) -.tmp_vmlinux%.stripped: .tmp_vmlinux% $(strip-symbols) $(kallsyms.h) - $(call cmd,kstrip) - -ifneq ($(CONFIG_DEBUG_INFO),y) -.tmp_vmlinux%: LDFLAGS_vmlinux += -S -endif # .tmp_vmlinux1 must be complete except kallsyms, so update vmlinux version -.tmp_vmlinux%: $(vmlinux-lds) $(vmlinux-all) FORCE - $(if $(filter 1,$*),$(call if_changed_rule,ksym_ld),$(call if_changed,vmlinux__)) +.tmp_vmlinux1: $(vmlinux-lds) $(vmlinux-all) FORCE + $(call if_changed_rule,ksym_ld) -.tmp_vmlinux0$(strip-ext): - $(Q)echo "placeholder" >$@ +.tmp_vmlinux2: $(vmlinux-lds) $(vmlinux-all) .tmp_kallsyms1.o FORCE + $(call if_changed,vmlinux__) -.tmp_vmlinux1: .tmp_kallsyms0.o -.tmp_vmlinux2: .tmp_kallsyms1.o -.tmp_vmlinux3: .tmp_kallsyms2.o +.tmp_vmlinux3: $(vmlinux-lds) $(vmlinux-all) .tmp_kallsyms2.o FORCE + $(call if_changed,vmlinux__) # Needs to visit scripts/ before $(KALLSYMS) can be used. $(KALLSYMS): scripts ; diff --git a/arch/x86/scripts/strip-symbols b/arch/x86/scripts/strip-symbols deleted file mode 100644 index a2f1ccb827c..00000000000 --- a/arch/x86/scripts/strip-symbols +++ /dev/null @@ -1 +0,0 @@ -__cpu_vendor_dev_X86_VENDOR_* diff --git a/init/Kconfig b/init/Kconfig index a724a149bf3..0e9924743a1 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -626,13 +626,6 @@ config KALLSYMS_ALL Say N. -config KALLSYMS_STRIP_GENERATED - bool "Strip machine generated symbols from kallsyms" - depends on KALLSYMS_ALL - default y - help - Say N if you want kallsyms to retain even machine generated symbols. - config KALLSYMS_EXTRA_PASS bool "Do an extra kallsyms pass" depends on KALLSYMS diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index e694afa0eb8..7b8b0f21a5b 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -30,19 +30,20 @@ #define all_var 0 #endif -extern const unsigned long kallsyms_addresses[]; -extern const u8 kallsyms_names[]; +/* These will be re-linked against their real values during the second link stage */ +extern const unsigned long kallsyms_addresses[] __attribute__((weak)); +extern const u8 kallsyms_names[] __attribute__((weak)); /* tell the compiler that the count isn't in the small data section if the arch * has one (eg: FRV) */ extern const unsigned long kallsyms_num_syms - __attribute__((__section__(".rodata"))); +__attribute__((weak, section(".rodata"))); -extern const u8 kallsyms_token_table[]; -extern const u16 kallsyms_token_index[]; +extern const u8 kallsyms_token_table[] __attribute__((weak)); +extern const u16 kallsyms_token_index[] __attribute__((weak)); -extern const unsigned long kallsyms_markers[]; +extern const unsigned long kallsyms_markers[] __attribute__((weak)); static inline int is_kernel_inittext(unsigned long addr) { @@ -167,6 +168,9 @@ static unsigned long get_symbol_pos(unsigned long addr, unsigned long symbol_start = 0, symbol_end = 0; unsigned long i, low, high, mid; + /* This kernel should never had been booted. */ + BUG_ON(!kallsyms_addresses); + /* do a binary search on the sorted kallsyms_addresses array */ low = 0; high = kallsyms_num_syms; diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 5d900307de3..c7de8b39fcf 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -151,16 +151,16 @@ cmd_cc_i_c = $(CPP) $(c_flags) -o $@ $< $(obj)/%.i: $(src)/%.c FORCE $(call if_changed_dep,cc_i_c) -cmd_genksyms = \ +cmd_gensymtypes = \ $(CPP) -D__GENKSYMS__ $(c_flags) $< | \ - $(GENKSYMS) -T $@ -A -a $(ARCH) \ + $(GENKSYMS) -T $@ -a $(ARCH) \ $(if $(KBUILD_PRESERVE),-p) \ $(if $(1),-r $(firstword $(wildcard $(@:.symtypes=.symref) /dev/null))) quiet_cmd_cc_symtypes_c = SYM $(quiet_modtag) $@ cmd_cc_symtypes_c = \ set -e; \ - $(call cmd_genksyms, true) >/dev/null; \ + $(call cmd_gensymtypes, true) >/dev/null; \ test -s $@ || rm -f $@ $(obj)/%.symtypes : $(src)/%.c FORCE @@ -177,38 +177,28 @@ cmd_cc_o_c = $(CC) $(c_flags) -c -o $@ $< else # When module versioning is enabled the following steps are executed: -# o compile a .tmp_.s from .c -# o if .tmp_.s doesn't contain a __ksymtab version, i.e. does -# not export symbols, we just assemble .tmp_.s to .o and +# o compile a .tmp_.o from .c +# o if .tmp_.o doesn't contain a __ksymtab version, i.e. does +# not export symbols, we just rename .tmp_.o to .o and # are done. # o otherwise, we calculate symbol versions using the good old # genksyms on the preprocessed source and postprocess them in a way -# that they are usable as assembly source -# o assemble .o from .tmp_.s forcing inclusion of directives -# defining the actual values of __crc_*, followed by objcopy-ing them -# to force these symbols to be local to permit stripping them later. -s_file = $(@D)/.tmp_$(@F:.o=.s) -v_file = $(@D)/.tmp_$(@F:.o=.v) -tmp_o_file = $(@D)/.tmp_$(@F) -no_g_c_flags = $(filter-out -g%,$(c_flags)) - -cmd_cc_o_c = $(CC) $(c_flags) -S -o $(s_file) $< +# that they are usable as a linker script +# o generate .o from .tmp_.o using the linker to +# replace the unresolved symbols __crc_exported_symbol with +# the actual value of the checksum generated by genksyms +cmd_cc_o_c = $(CC) $(c_flags) -c -o $(@D)/.tmp_$(@F) $< cmd_modversions = \ - if grep -q __ksymtab $(s_file); then \ - if $(call cmd_genksyms, $(KBUILD_SYMTYPES)) > $(v_file) \ - && $(CC) $(no_g_c_flags) -c -Wa,$(v_file) \ - -o $(tmp_o_file) $(s_file) \ - && $(OBJCOPY) -L '__crc_*' -L '___crc_*' -w \ - $(tmp_o_file) $@; \ - then \ - : ; \ - else \ - rm -f $@; exit 1; \ - fi; \ + if $(OBJDUMP) -h $(@D)/.tmp_$(@F) | grep -q __ksymtab; then \ + $(call cmd_gensymtypes, $(KBUILD_SYMTYPES)) \ + > $(@D)/.tmp_$(@F:.o=.ver); \ + \ + $(LD) $(LDFLAGS) -r -o $@ $(@D)/.tmp_$(@F) \ + -T $(@D)/.tmp_$(@F:.o=.ver); \ + rm -f $(@D)/.tmp_$(@F) $(@D)/.tmp_$(@F:.o=.ver); \ else \ - rm -f $(v_file); \ - $(CC) $(no_g_c_flags) -c -o $@ $(s_file); \ + mv -f $(@D)/.tmp_$(@F) $@; \ fi; endif @@ -225,12 +215,7 @@ define rule_cc_o_c $(cmd_record_mcount) \ scripts/basic/fixdep $(depfile) $@ '$(call make-cmd,cc_o_c)' > \ $(dot-target).tmp; \ - if [ -r $(@D)/.tmp_$(@F:.o=.v) ]; then \ - echo >> $(dot-target).tmp; \ - echo '$@: $(GENKSYMS)' >> $(dot-target).tmp; \ - echo '$(GENKSYMS):: ;' >> $(dot-target).tmp; \ - fi; \ - rm -f $(depfile) $(@D)/.tmp_$(@F:.o=.?); \ + rm -f $(depfile); \ mv -f $(dot-target).tmp $(dot-target).cmd endef diff --git a/scripts/Makefile.modinst b/scripts/Makefile.modinst index a5122dce126..efa5d940e63 100644 --- a/scripts/Makefile.modinst +++ b/scripts/Makefile.modinst @@ -17,8 +17,7 @@ __modinst: $(modules) @: quiet_cmd_modules_install = INSTALL $@ - cmd_modules_install = mkdir -p $(2); \ - $(mod_strip_cmd) $@ $(2)/$(notdir $@) || cp $@ $(2) + cmd_modules_install = mkdir -p $(2); cp $@ $(2) ; $(mod_strip_cmd) $(2)/$(notdir $@) # Modules built outside the kernel source tree go into extra by default INSTALL_MOD_DIR ?= extra diff --git a/scripts/genksyms/genksyms.c b/scripts/genksyms/genksyms.c index f8bb4cabd62..3a8297b5184 100644 --- a/scripts/genksyms/genksyms.c +++ b/scripts/genksyms/genksyms.c @@ -43,7 +43,7 @@ int cur_line = 1; char *cur_filename; static int flag_debug, flag_dump_defs, flag_reference, flag_dump_types, - flag_preserve, flag_warnings, flag_asm; + flag_preserve, flag_warnings; static const char *arch = ""; static const char *mod_prefix = ""; @@ -610,11 +610,8 @@ void export_symbol(const char *name) if (flag_dump_defs) fputs(">\n", debugfile); - /* Used as assembly source or a linker script. */ - printf(flag_asm - ? ".equiv %s__crc_%s, %#08lx\n" - : "%s__crc_%s = %#08lx ;\n", - mod_prefix, name, crc); + /* Used as a linker script. */ + printf("%s__crc_%s = 0x%08lx ;\n", mod_prefix, name, crc); } } @@ -651,10 +648,9 @@ void error_with_pos(const char *fmt, ...) static void genksyms_usage(void) { - fputs("Usage:\n" "genksyms [-aAdDTwqhV] > /path/to/.tmp_obj.ver\n" "\n" + fputs("Usage:\n" "genksyms [-adDTwqhV] > /path/to/.tmp_obj.ver\n" "\n" #ifdef __GNU_LIBRARY__ " -a, --arch Select architecture\n" - " -A, --asm Generate assembly rather than linker script\n" " -d, --debug Increment the debug level (repeatable)\n" " -D, --dump Dump expanded symbol defs (for debugging only)\n" " -r, --reference file Read reference symbols from a file\n" @@ -666,7 +662,6 @@ static void genksyms_usage(void) " -V, --version Print the release version\n" #else /* __GNU_LIBRARY__ */ " -a Select architecture\n" - " -A Generate assembly rather than linker script\n" " -d Increment the debug level (repeatable)\n" " -D Dump expanded symbol defs (for debugging only)\n" " -r file Read reference symbols from a file\n" @@ -688,7 +683,6 @@ int main(int argc, char **argv) #ifdef __GNU_LIBRARY__ struct option long_opts[] = { {"arch", 1, 0, 'a'}, - {"asm", 0, 0, 'A'}, {"debug", 0, 0, 'd'}, {"warnings", 0, 0, 'w'}, {"quiet", 0, 0, 'q'}, @@ -701,10 +695,10 @@ int main(int argc, char **argv) {0, 0, 0, 0} }; - while ((o = getopt_long(argc, argv, "a:dwqVADr:T:ph", + while ((o = getopt_long(argc, argv, "a:dwqVDr:T:ph", &long_opts[0], NULL)) != EOF) #else /* __GNU_LIBRARY__ */ - while ((o = getopt(argc, argv, "a:dwqVADr:T:ph")) != EOF) + while ((o = getopt(argc, argv, "a:dwqVDr:T:ph")) != EOF) #endif /* __GNU_LIBRARY__ */ switch (o) { case 'a': @@ -722,9 +716,6 @@ int main(int argc, char **argv) case 'V': fputs("genksyms version 2.5.60\n", stderr); break; - case 'A': - flag_asm = 1; - break; case 'D': flag_dump_defs = 1; break; diff --git a/scripts/genksyms/keywords.c_shipped b/scripts/genksyms/keywords.c_shipped index 83484fe93ed..971e0113ae7 100644 --- a/scripts/genksyms/keywords.c_shipped +++ b/scripts/genksyms/keywords.c_shipped @@ -1,4 +1,4 @@ -/* ANSI-C code produced by gperf version 3.0.1 */ +/* ANSI-C code produced by gperf version 3.0.2 */ /* Command-line: gperf -L ANSI-C -a -C -E -g -H is_reserved_hash -k '1,3,$' -N is_reserved_word -p -t scripts/genksyms/keywords.gperf */ #if !((' ' == 32) && ('!' == 33) && ('"' == 34) && ('#' == 35) \ @@ -32,7 +32,7 @@ #line 3 "scripts/genksyms/keywords.gperf" struct resword { const char *name; int token; }; -/* maximum key range = 64, duplicates = 0 */ +/* maximum key range = 62, duplicates = 0 */ #ifdef __GNUC__ __inline @@ -46,32 +46,32 @@ is_reserved_hash (register const char *str, register unsigned int len) { static const unsigned char asso_values[] = { - 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67, 67, 67, 67, 0, - 67, 67, 67, 67, 67, 67, 15, 67, 67, 67, - 0, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 0, 67, 0, 67, 5, - 25, 20, 15, 30, 67, 15, 67, 67, 10, 0, - 10, 40, 20, 67, 10, 5, 0, 10, 15, 67, - 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67 + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 5, + 65, 65, 65, 65, 65, 65, 35, 65, 65, 65, + 0, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 0, 65, 0, 65, 5, + 20, 15, 10, 30, 65, 15, 65, 65, 20, 0, + 10, 35, 20, 65, 10, 5, 0, 10, 5, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65 }; return len + asso_values[(unsigned char)str[2]] + asso_values[(unsigned char)str[0]] + asso_values[(unsigned char)str[len - 1]]; } @@ -84,119 +84,116 @@ is_reserved_word (register const char *str, register unsigned int len) { enum { - TOTAL_KEYWORDS = 45, + TOTAL_KEYWORDS = 43, MIN_WORD_LENGTH = 3, MAX_WORD_LENGTH = 24, MIN_HASH_VALUE = 3, - MAX_HASH_VALUE = 66 + MAX_HASH_VALUE = 64 }; static const struct resword wordlist[] = { {""}, {""}, {""}, -#line 28 "scripts/genksyms/keywords.gperf" +#line 26 "scripts/genksyms/keywords.gperf" {"asm", ASM_KEYW}, {""}, -#line 10 "scripts/genksyms/keywords.gperf" +#line 8 "scripts/genksyms/keywords.gperf" {"__asm", ASM_KEYW}, {""}, -#line 11 "scripts/genksyms/keywords.gperf" +#line 9 "scripts/genksyms/keywords.gperf" {"__asm__", ASM_KEYW}, {""}, {""}, -#line 54 "scripts/genksyms/keywords.gperf" +#line 52 "scripts/genksyms/keywords.gperf" {"__typeof__", TYPEOF_KEYW}, {""}, -#line 14 "scripts/genksyms/keywords.gperf" +#line 12 "scripts/genksyms/keywords.gperf" {"__const", CONST_KEYW}, -#line 13 "scripts/genksyms/keywords.gperf" +#line 11 "scripts/genksyms/keywords.gperf" {"__attribute__", ATTRIBUTE_KEYW}, -#line 15 "scripts/genksyms/keywords.gperf" +#line 13 "scripts/genksyms/keywords.gperf" {"__const__", CONST_KEYW}, -#line 20 "scripts/genksyms/keywords.gperf" +#line 18 "scripts/genksyms/keywords.gperf" {"__signed__", SIGNED_KEYW}, -#line 46 "scripts/genksyms/keywords.gperf" +#line 44 "scripts/genksyms/keywords.gperf" {"static", STATIC_KEYW}, - {""}, -#line 41 "scripts/genksyms/keywords.gperf" +#line 20 "scripts/genksyms/keywords.gperf" + {"__volatile__", VOLATILE_KEYW}, +#line 39 "scripts/genksyms/keywords.gperf" {"int", INT_KEYW}, -#line 34 "scripts/genksyms/keywords.gperf" +#line 32 "scripts/genksyms/keywords.gperf" {"char", CHAR_KEYW}, -#line 35 "scripts/genksyms/keywords.gperf" +#line 33 "scripts/genksyms/keywords.gperf" {"const", CONST_KEYW}, -#line 47 "scripts/genksyms/keywords.gperf" +#line 45 "scripts/genksyms/keywords.gperf" {"struct", STRUCT_KEYW}, -#line 26 "scripts/genksyms/keywords.gperf" +#line 24 "scripts/genksyms/keywords.gperf" {"__restrict__", RESTRICT_KEYW}, -#line 27 "scripts/genksyms/keywords.gperf" - {"restrict", RESTRICT_KEYW}, -#line 7 "scripts/genksyms/keywords.gperf" - {"EXPORT_SYMBOL_GPL_FUTURE", EXPORT_SYMBOL_KEYW}, -#line 18 "scripts/genksyms/keywords.gperf" - {"__inline__", INLINE_KEYW}, - {""}, -#line 22 "scripts/genksyms/keywords.gperf" - {"__volatile__", VOLATILE_KEYW}, -#line 5 "scripts/genksyms/keywords.gperf" - {"EXPORT_SYMBOL", EXPORT_SYMBOL_KEYW}, #line 25 "scripts/genksyms/keywords.gperf" + {"restrict", RESTRICT_KEYW}, +#line 23 "scripts/genksyms/keywords.gperf" {"_restrict", RESTRICT_KEYW}, - {""}, -#line 12 "scripts/genksyms/keywords.gperf" - {"__attribute", ATTRIBUTE_KEYW}, -#line 6 "scripts/genksyms/keywords.gperf" - {"EXPORT_SYMBOL_GPL", EXPORT_SYMBOL_KEYW}, #line 16 "scripts/genksyms/keywords.gperf" + {"__inline__", INLINE_KEYW}, +#line 10 "scripts/genksyms/keywords.gperf" + {"__attribute", ATTRIBUTE_KEYW}, + {""}, +#line 14 "scripts/genksyms/keywords.gperf" {"__extension__", EXTENSION_KEYW}, -#line 37 "scripts/genksyms/keywords.gperf" +#line 35 "scripts/genksyms/keywords.gperf" {"enum", ENUM_KEYW}, -#line 8 "scripts/genksyms/keywords.gperf" - {"EXPORT_UNUSED_SYMBOL", EXPORT_SYMBOL_KEYW}, -#line 38 "scripts/genksyms/keywords.gperf" +#line 19 "scripts/genksyms/keywords.gperf" + {"__volatile", VOLATILE_KEYW}, +#line 36 "scripts/genksyms/keywords.gperf" {"extern", EXTERN_KEYW}, {""}, -#line 19 "scripts/genksyms/keywords.gperf" +#line 17 "scripts/genksyms/keywords.gperf" {"__signed", SIGNED_KEYW}, -#line 9 "scripts/genksyms/keywords.gperf" - {"EXPORT_UNUSED_SYMBOL_GPL", EXPORT_SYMBOL_KEYW}, -#line 49 "scripts/genksyms/keywords.gperf" - {"union", UNION_KEYW}, -#line 53 "scripts/genksyms/keywords.gperf" +#line 7 "scripts/genksyms/keywords.gperf" + {"EXPORT_SYMBOL_GPL_FUTURE", EXPORT_SYMBOL_KEYW}, + {""}, +#line 51 "scripts/genksyms/keywords.gperf" {"typeof", TYPEOF_KEYW}, -#line 48 "scripts/genksyms/keywords.gperf" +#line 46 "scripts/genksyms/keywords.gperf" {"typedef", TYPEDEF_KEYW}, -#line 17 "scripts/genksyms/keywords.gperf" +#line 15 "scripts/genksyms/keywords.gperf" {"__inline", INLINE_KEYW}, -#line 33 "scripts/genksyms/keywords.gperf" +#line 31 "scripts/genksyms/keywords.gperf" {"auto", AUTO_KEYW}, -#line 21 "scripts/genksyms/keywords.gperf" - {"__volatile", VOLATILE_KEYW}, +#line 47 "scripts/genksyms/keywords.gperf" + {"union", UNION_KEYW}, {""}, {""}, -#line 50 "scripts/genksyms/keywords.gperf" +#line 48 "scripts/genksyms/keywords.gperf" {"unsigned", UNSIGNED_KEYW}, - {""}, -#line 44 "scripts/genksyms/keywords.gperf" +#line 49 "scripts/genksyms/keywords.gperf" + {"void", VOID_KEYW}, +#line 42 "scripts/genksyms/keywords.gperf" {"short", SHORT_KEYW}, -#line 40 "scripts/genksyms/keywords.gperf" + {""}, {""}, +#line 50 "scripts/genksyms/keywords.gperf" + {"volatile", VOLATILE_KEYW}, + {""}, +#line 37 "scripts/genksyms/keywords.gperf" + {"float", FLOAT_KEYW}, +#line 34 "scripts/genksyms/keywords.gperf" + {"double", DOUBLE_KEYW}, + {""}, +#line 5 "scripts/genksyms/keywords.gperf" + {"EXPORT_SYMBOL", EXPORT_SYMBOL_KEYW}, + {""}, {""}, +#line 38 "scripts/genksyms/keywords.gperf" {"inline", INLINE_KEYW}, +#line 6 "scripts/genksyms/keywords.gperf" + {"EXPORT_SYMBOL_GPL", EXPORT_SYMBOL_KEYW}, +#line 41 "scripts/genksyms/keywords.gperf" + {"register", REGISTER_KEYW}, {""}, -#line 52 "scripts/genksyms/keywords.gperf" - {"volatile", VOLATILE_KEYW}, -#line 42 "scripts/genksyms/keywords.gperf" - {"long", LONG_KEYW}, -#line 24 "scripts/genksyms/keywords.gperf" +#line 22 "scripts/genksyms/keywords.gperf" {"_Bool", BOOL_KEYW}, - {""}, {""}, #line 43 "scripts/genksyms/keywords.gperf" - {"register", REGISTER_KEYW}, -#line 51 "scripts/genksyms/keywords.gperf" - {"void", VOID_KEYW}, -#line 39 "scripts/genksyms/keywords.gperf" - {"float", FLOAT_KEYW}, -#line 36 "scripts/genksyms/keywords.gperf" - {"double", DOUBLE_KEYW}, - {""}, {""}, {""}, {""}, -#line 45 "scripts/genksyms/keywords.gperf" - {"signed", SIGNED_KEYW} + {"signed", SIGNED_KEYW}, + {""}, {""}, +#line 40 "scripts/genksyms/keywords.gperf" + {"long", LONG_KEYW} }; if (len <= MAX_WORD_LENGTH && len >= MIN_WORD_LENGTH) diff --git a/scripts/genksyms/keywords.gperf b/scripts/genksyms/keywords.gperf index 8abe7ab8d88..5ef3733225f 100644 --- a/scripts/genksyms/keywords.gperf +++ b/scripts/genksyms/keywords.gperf @@ -5,8 +5,6 @@ struct resword { const char *name; int token; } EXPORT_SYMBOL, EXPORT_SYMBOL_KEYW EXPORT_SYMBOL_GPL, EXPORT_SYMBOL_KEYW EXPORT_SYMBOL_GPL_FUTURE, EXPORT_SYMBOL_KEYW -EXPORT_UNUSED_SYMBOL, EXPORT_SYMBOL_KEYW -EXPORT_UNUSED_SYMBOL_GPL, EXPORT_SYMBOL_KEYW __asm, ASM_KEYW __asm__, ASM_KEYW __attribute, ATTRIBUTE_KEYW diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c index 92758120a76..ad2434b2697 100644 --- a/scripts/kallsyms.c +++ b/scripts/kallsyms.c @@ -130,9 +130,18 @@ static int read_symbol(FILE *in, struct sym_entry *s) static int symbol_valid(struct sym_entry *s) { /* Symbols which vary between passes. Passes 1 and 2 must have - * identical symbol lists. + * identical symbol lists. The kallsyms_* symbols below are only added + * after pass 1, they would be included in pass 2 when --all-symbols is + * specified so exclude them to get a stable symbol list. */ static char *special_symbols[] = { + "kallsyms_addresses", + "kallsyms_num_syms", + "kallsyms_names", + "kallsyms_markers", + "kallsyms_token_table", + "kallsyms_token_index", + /* Exclude linker generated symbols which vary between passes */ "_SDA_BASE_", /* ppc */ "_SDA2_BASE_", /* ppc */ @@ -164,9 +173,7 @@ static int symbol_valid(struct sym_entry *s) } /* Exclude symbols which vary between passes. */ - if (strstr((char *)s->sym + offset, "_compiled.") || - strncmp((char*)s->sym + offset, "__compound_literal.", 19) == 0 || - strncmp((char*)s->sym + offset, "__compound_literal$", 19) == 0) + if (strstr((char *)s->sym + offset, "_compiled.")) return 0; for (i = 0; special_symbols[i]; i++) @@ -543,10 +550,8 @@ int main(int argc, char **argv) usage(); read_map(stdin); - if (table_cnt) { - sort_symbols(); - optimize_token_table(); - } + sort_symbols(); + optimize_token_table(); write_src(); return 0; diff --git a/scripts/mksysmap b/scripts/mksysmap index 1db316a3712..6e133a0bae7 100644 --- a/scripts/mksysmap +++ b/scripts/mksysmap @@ -37,6 +37,9 @@ # readprofile starts reading symbols when _stext is found, and # continue until it finds a symbol which is not either of 'T', 't', -# 'W' or 'w'. +# 'W' or 'w'. __crc_ are 'A' and placed in the middle +# so we just ignore them to let readprofile continue to work. +# (At least sparc64 has __crc_ in the middle). + +$NM -n $1 | grep -v '\( [aNUw] \)\|\(__crc_\)\|\( \$[adt]\)' > $2 -$NM -n $1 | grep -v '\( [aNUw] \)\|\( \$[adt]\)' > $2 diff --git a/scripts/strip-symbols b/scripts/strip-symbols deleted file mode 100644 index 29ee8c1a014..00000000000 --- a/scripts/strip-symbols +++ /dev/null @@ -1,22 +0,0 @@ -<*> -*.h -__compound_literal[$.][0-9]* -__crc_[a-zA-Z_]* -__exitcall_[a-zA-Z_]* -__func__[$.][0-9]* -__FUNCTION__[$.][0-9]* -gcc[0-9]_compiled[$.] -__initcall_[a-zA-Z_]* -__kcrctab_[a-zA-Z_]* -__kstrtab_[a-zA-Z_]* -__ksymtab_[a-zA-Z_]* -__mod_[a-zA-Z_]*[0-9] -__module_depends -__param_[a-zA-Z_]* -__pci_fixup_*PCI_ANY_IDPCI_ANY_ID* -__pci_fixup_*PCI_ANY_IDPCI_DEVICE_ID_* -__pci_fixup_*PCI_VENDOR_ID_*PCI_ANY_ID* -__pci_fixup_*PCI_VENDOR_ID_*PCI_DEVICE_ID_* -__PRETTY_FUNCTION__[$.][0-9]* -__setup_[a-zA-Z_]* -____versions -- cgit v1.2.3 From 934d96eafadcf3eb3ccd094af9919f020907fc41 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Wed, 14 Jan 2009 20:38:17 +0530 Subject: time-sched.c: tick_nohz_update_jiffies should be static Impact: cleanup, reduce kernel size a bit, avoid sparse warning Fixes sparse warning: kernel/time/tick-sched.c:137:6: warning: symbol 'tick_nohz_update_jiffies' was not declared. Should it be static? Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- kernel/time/tick-sched.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 1b6c05bd0d0..d3f1ef4d5cb 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -134,7 +134,7 @@ __setup("nohz=", setup_tick_nohz); * value. We do this unconditionally on any cpu, as we don't know whether the * cpu, which has the update task assigned is in a long sleep. */ -void tick_nohz_update_jiffies(void) +static void tick_nohz_update_jiffies(void) { int cpu = smp_processor_id(); struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); -- cgit v1.2.3 From 091d71e023557136e96f0e54f301497a3fc95dc3 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Sat, 17 Jan 2009 00:10:45 +0100 Subject: PM: Fix compilation warning in kernel/power/main.c Reorder the code in kernel/power/main.c to fix compilation warning triggered by unsetting CONFIG_SUSPEND. Signed-off-by: Rafael J. Wysocki Signed-off-by: Len Brown --- kernel/power/main.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'kernel') diff --git a/kernel/power/main.c b/kernel/power/main.c index 23998887397..b4d219016b6 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -57,16 +57,6 @@ int pm_notifier_call_chain(unsigned long val) #ifdef CONFIG_PM_DEBUG int pm_test_level = TEST_NONE; -static int suspend_test(int level) -{ - if (pm_test_level == level) { - printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n"); - mdelay(5000); - return 1; - } - return 0; -} - static const char * const pm_tests[__TEST_AFTER_LAST] = { [TEST_NONE] = "none", [TEST_CORE] = "core", @@ -125,14 +115,24 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr, } power_attr(pm_test); -#else /* !CONFIG_PM_DEBUG */ -static inline int suspend_test(int level) { return 0; } -#endif /* !CONFIG_PM_DEBUG */ +#endif /* CONFIG_PM_DEBUG */ #endif /* CONFIG_PM_SLEEP */ #ifdef CONFIG_SUSPEND +static int suspend_test(int level) +{ +#ifdef CONFIG_PM_DEBUG + if (pm_test_level == level) { + printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n"); + mdelay(5000); + return 1; + } +#endif /* !CONFIG_PM_DEBUG */ + return 0; +} + #ifdef CONFIG_PM_TEST_SUSPEND /* -- cgit v1.2.3 From b786c6a98ef6fa81114ba7b9fbfc0d67060775e3 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Sat, 17 Jan 2009 12:04:36 +0100 Subject: relay: fix lock imbalance in relay_late_setup_files One fail path in relay_late_setup_files() omits mutex_unlock(&relay_channels_mutex); Add it. Signed-off-by: Jiri Slaby Signed-off-by: Ingo Molnar --- kernel/relay.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/relay.c b/kernel/relay.c index 09ac2008f77..9d79b7854fa 100644 --- a/kernel/relay.c +++ b/kernel/relay.c @@ -663,8 +663,10 @@ int relay_late_setup_files(struct rchan *chan, mutex_lock(&relay_channels_mutex); /* Is chan already set up? */ - if (unlikely(chan->has_base_filename)) + if (unlikely(chan->has_base_filename)) { + mutex_unlock(&relay_channels_mutex); return -EEXIST; + } chan->has_base_filename = 1; chan->parent = parent; curr_cpu = get_cpu(); -- cgit v1.2.3 From 1d4a7f1c4faf53eb9e822743ec8a70b3019a26d2 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sun, 18 Jan 2009 16:39:29 +0100 Subject: hrtimers: fix inconsistent lock state on resume in hres_timers_resume Andrey Borzenkov reported this lockdep assert: > [17854.688347] ================================= > [17854.688347] [ INFO: inconsistent lock state ] > [17854.688347] 2.6.29-rc2-1avb #1 > [17854.688347] --------------------------------- > [17854.688347] inconsistent {in-hardirq-W} -> {hardirq-on-W} usage. > [17854.688347] pm-suspend/18240 [HC0[0]:SC0[0]:HE1:SE1] takes: > [17854.688347] (&cpu_base->lock){++..}, at: [] retrigger_next_event+0x5c/0xa0 > [17854.688347] {in-hardirq-W} state was registered at: > [17854.688347] [] __lock_acquire+0x79d/0x1930 > [17854.688347] [] lock_acquire+0x5c/0x80 > [17854.688347] [] _spin_lock+0x35/0x70 > [17854.688347] [] hrtimer_run_queues+0x31/0x140 > [17854.688347] [] run_local_timers+0x8/0x20 > [17854.688347] [] update_process_times+0x23/0x60 > [17854.688347] [] tick_periodic+0x24/0x80 > [17854.688347] [] tick_handle_periodic+0x12/0x70 > [17854.688347] [] timer_interrupt+0x14/0x20 > [17854.688347] [] handle_IRQ_event+0x29/0x60 > [17854.688347] [] handle_level_irq+0x69/0xe0 > [17854.688347] [] 0xffffffff > [17854.688347] irq event stamp: 55771 > [17854.688347] hardirqs last enabled at (55771): [] _spin_unlock_irqrestore+0x35/0x60 > [17854.688347] hardirqs last disabled at (55770): [] _spin_lock_irqsave+0x19/0x80 > [17854.688347] softirqs last enabled at (54836): [] __do_softirq+0xc4/0x110 > [17854.688347] softirqs last disabled at (54831): [] do_softirq+0x8e/0xe0 > [17854.688347] > [17854.688347] other info that might help us debug this: > [17854.688347] 3 locks held by pm-suspend/18240: > [17854.688347] #0: (&buffer->mutex){--..}, at: [] sysfs_write_file+0x25/0x100 > [17854.688347] #1: (pm_mutex){--..}, at: [] enter_state+0x4f/0x140 > [17854.688347] #2: (dpm_list_mtx){--..}, at: [] device_pm_lock+0xf/0x20 > [17854.688347] > [17854.688347] stack backtrace: > [17854.688347] Pid: 18240, comm: pm-suspend Not tainted 2.6.29-rc2-1avb #1 > [17854.688347] Call Trace: > [17854.688347] [] ? printk+0x18/0x20 > [17854.688347] [] print_usage_bug+0x16c/0x1d0 > [17854.688347] [] mark_lock+0x8bf/0xc90 > [17854.688347] [] ? pit_next_event+0x2f/0x40 > [17854.688347] [] __lock_acquire+0x580/0x1930 > [17854.688347] [] ? _spin_unlock+0x1d/0x20 > [17854.688347] [] ? pit_next_event+0x2f/0x40 > [17854.688347] [] ? clockevents_program_event+0x98/0x160 > [17854.688347] [] ? mark_held_locks+0x48/0x90 > [17854.688347] [] ? _spin_unlock_irqrestore+0x35/0x60 > [17854.688347] [] ? trace_hardirqs_on_caller+0x139/0x190 > [17854.688347] [] ? trace_hardirqs_on+0xb/0x10 > [17854.688347] [] lock_acquire+0x5c/0x80 > [17854.688347] [] ? retrigger_next_event+0x5c/0xa0 > [17854.688347] [] _spin_lock+0x35/0x70 > [17854.688347] [] ? retrigger_next_event+0x5c/0xa0 > [17854.688347] [] retrigger_next_event+0x5c/0xa0 > [17854.688347] [] hres_timers_resume+0xa/0x10 > [17854.688347] [] timekeeping_resume+0xee/0x150 > [17854.688347] [] __sysdev_resume+0x14/0x50 > [17854.688347] [] sysdev_resume+0x47/0x80 > [17854.688347] [] device_power_up+0xb/0x20 > [17854.688347] [] suspend_devices_and_enter+0xcf/0x150 > [17854.688347] [] ? freeze_processes+0x3f/0x90 > [17854.688347] [] enter_state+0xf4/0x140 > [17854.688347] [] state_store+0x7d/0xc0 > [17854.688347] [] ? state_store+0x0/0xc0 > [17854.688347] [] kobj_attr_store+0x24/0x30 > [17854.688347] [] sysfs_write_file+0x9c/0x100 > [17854.688347] [] vfs_write+0x9c/0x160 > [17854.688347] [] ? restore_nocheck_notrace+0x0/0xe > [17854.688347] [] ? sysfs_write_file+0x0/0x100 > [17854.688347] [] sys_write+0x3d/0x70 > [17854.688347] [] sysenter_do_call+0x12/0x31 Andrey's analysis: > timekeeping_resume() is called via class ->resume > method; and according to comments in sysdev_resume() and > device_power_up(), they are called with interrupts disabled. > > Looking at suspend_enter, irqs *are* disabled at this point. > > So it actually looks like something (may be some driver) > unconditionally enabled irqs in resume path. Add a debug check to test this theory. If it triggers then it triggers because the resume code calls it with irqs enabled, which is a no-no not just for timekeeping_resume(), but also bad for a number of other resume handlers. Reported-by: Andrey Borzenkov Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/hrtimer.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 1455b7651b6..cb83c6d4c07 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -614,7 +614,9 @@ void clock_was_set(void) */ void hres_timers_resume(void) { - /* Retrigger the CPU local events: */ + WARN_ONCE(!irqs_disabled(), + KERN_INFO "hres_timers_resume() called with IRQs enabled!"); + retrigger_next_event(NULL); } -- cgit v1.2.3 From f90d4118bacef87894621a3e8aba853fa0c89abc Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Fri, 16 Jan 2009 10:24:10 +0800 Subject: cpuset: fix possible deadlock in async_rebuild_sched_domains Lockdep reported some possible circular locking info when we tested cpuset on NUMA/fake NUMA box. ======================================================= [ INFO: possible circular locking dependency detected ] 2.6.29-rc1-00224-ga652504 #111 ------------------------------------------------------- bash/2968 is trying to acquire lock: (events){--..}, at: [] flush_work+0x24/0xd8 but task is already holding lock: (cgroup_mutex){--..}, at: [] cgroup_lock_live_group+0x12/0x29 which lock already depends on the new lock. ...... ------------------------------------------------------- Steps to reproduce: # mkdir /dev/cpuset # mount -t cpuset xxx /dev/cpuset # mkdir /dev/cpuset/0 # echo 0 > /dev/cpuset/0/cpus # echo 0 > /dev/cpuset/0/mems # echo 1 > /dev/cpuset/0/memory_migrate # cat /dev/zero > /dev/null & # echo $! > /dev/cpuset/0/tasks This is because async_rebuild_sched_domains has the following lock sequence: run_workqueue(async_rebuild_sched_domains) -> do_rebuild_sched_domains -> cgroup_lock But, attaching tasks when memory_migrate is set has following: cgroup_lock_live_group(cgroup_tasks_write) -> do_migrate_pages -> flush_work This patch fixes it by using a separate workqueue thread. Signed-off-by: Miao Xie Signed-off-by: Lai Jiangshan Signed-off-by: Ingo Molnar --- kernel/cpuset.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/cpuset.c b/kernel/cpuset.c index a85678865c5..f76db9dcaa0 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -60,6 +60,14 @@ #include #include +/* + * Workqueue for cpuset related tasks. + * + * Using kevent workqueue may cause deadlock when memory_migrate + * is set. So we create a separate workqueue thread for cpuset. + */ +static struct workqueue_struct *cpuset_wq; + /* * Tracks how many cpusets are currently defined in system. * When there is only one cpuset (the root cpuset) we can @@ -831,7 +839,7 @@ static DECLARE_WORK(rebuild_sched_domains_work, do_rebuild_sched_domains); */ static void async_rebuild_sched_domains(void) { - schedule_work(&rebuild_sched_domains_work); + queue_work(cpuset_wq, &rebuild_sched_domains_work); } /* @@ -2111,6 +2119,9 @@ void __init cpuset_init_smp(void) hotcpu_notifier(cpuset_track_online_cpus, 0); hotplug_memory_notifier(cpuset_track_online_nodes, 10); + + cpuset_wq = create_singlethread_workqueue("cpuset"); + BUG_ON(!cpuset_wq); } /** -- cgit v1.2.3 From 31ad9081200c06ccc350625d41d1f8b2d1cef29f Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 16 Jan 2009 15:31:15 -0800 Subject: work_on_cpu: don't try to get_online_cpus() in work_on_cpu. Impact: remove potential circular lock dependency with cpu hotplug lock This has caused more problems than it solved, with a pile of cpu hotplug locking issues. Followup patches will get_online_cpus() in callers that need it, but if they don't do it they're no worse than before when they were using set_cpus_allowed without locking. Signed-off-by: Rusty Russell Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar --- kernel/workqueue.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 2f445833ae3..a35afdbc016 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -991,8 +991,8 @@ static void do_work_for_cpu(struct work_struct *w) * @fn: the function to run * @arg: the function arg * - * This will return -EINVAL in the cpu is not online, or the return value - * of @fn otherwise. + * This will return the value @fn returns. + * It is up to the caller to ensure that the cpu doesn't go offline. */ long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) { @@ -1001,14 +1001,8 @@ long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) INIT_WORK(&wfc.work, do_work_for_cpu); wfc.fn = fn; wfc.arg = arg; - get_online_cpus(); - if (unlikely(!cpu_online(cpu))) - wfc.ret = -EINVAL; - else { - schedule_work_on(cpu, &wfc.work); - flush_work(&wfc.work); - } - put_online_cpus(); + schedule_work_on(cpu, &wfc.work); + flush_work(&wfc.work); return wfc.ret; } -- cgit v1.2.3 From 8ccad40df8d314f786fdb06bdbedd4f43f3257cd Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 16 Jan 2009 15:31:15 -0800 Subject: work_on_cpu: Use our own workqueue. Impact: remove potential clashes with generic kevent workqueue Annoyingly, some places we want to use work_on_cpu are already in workqueues. As per Ingo's suggestion, we create a different workqueue for work_on_cpu. Signed-off-by: Rusty Russell Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar --- kernel/workqueue.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index a35afdbc016..1f0c509b40d 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -971,6 +971,8 @@ undo: } #ifdef CONFIG_SMP +static struct workqueue_struct *work_on_cpu_wq __read_mostly; + struct work_for_cpu { struct work_struct work; long (*fn)(void *); @@ -1001,7 +1003,7 @@ long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) INIT_WORK(&wfc.work, do_work_for_cpu); wfc.fn = fn; wfc.arg = arg; - schedule_work_on(cpu, &wfc.work); + queue_work_on(cpu, work_on_cpu_wq, &wfc.work); flush_work(&wfc.work); return wfc.ret; @@ -1019,4 +1021,8 @@ void __init init_workqueues(void) hotcpu_notifier(workqueue_cpu_callback, 0); keventd_wq = create_workqueue("events"); BUG_ON(!keventd_wq); +#ifdef CONFIG_SMP + work_on_cpu_wq = create_workqueue("work_on_cpu"); + BUG_ON(!work_on_cpu_wq); +#endif } -- cgit v1.2.3 From 082605de5f82eb692cc90f7fda071cc01bb5ac34 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 19 Jan 2009 14:32:51 -0500 Subject: ring-buffer: fix alignment problem Impact: fix to allow some archs to use the ring buffer Commits in the ring buffer are checked by pointer arithmetic. If the calculation is incorrect, then the commits will never take place and the buffer will simply fill up and report an error. Each page in the ring buffer has a small header: struct buffer_data_page { u64 time_stamp; local_t commit; unsigned char data[]; }; Unfortuntely, some of the calculations used sizeof(struct buffer_data_page) to know the size of the header. But this is incorrect on some archs, where sizeof(struct buffer_data_page) does not equal offsetof(struct buffer_data_page, data), and on those archs, the commits are never processed. This patch replaces the sizeof with offsetof. Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/ring_buffer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 8b0daf0662e..1d6526361d0 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -246,7 +246,7 @@ static inline int test_time_stamp(u64 delta) return 0; } -#define BUF_PAGE_SIZE (PAGE_SIZE - sizeof(struct buffer_data_page)) +#define BUF_PAGE_SIZE (PAGE_SIZE - offsetof(struct buffer_data_page, data)) /* * head_page == tail_page && head == tail then buffer is empty. -- cgit v1.2.3 From cdf57cab27aef72f13a19c86858c6cac9951dc24 Mon Sep 17 00:00:00 2001 From: Adrian McMenamin Date: Wed, 21 Jan 2009 18:47:38 +0900 Subject: dma-coherent: per-device coherent area is in pages, not bytes. Commit 58c6d3dfe436eb8cfb451981d8fdc9044eaf42da ("dma-coherent: catch oversized requests to dma_alloc_from_coherent()") attempted to add a sanity check to bail out on allocations larger than the coherent area. Unfortunately when this was implemented, the fact the coherent area is tracked in pages rather than bytes was overlooked, which subsequently broke every single dma_alloc_from_coherent() user, forcing the allocation silently through generic memory instead. Signed-off-by: Adrian McMenamin Signed-off-by: Paul Mundt --- kernel/dma-coherent.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/dma-coherent.c b/kernel/dma-coherent.c index 038707404b7..38fa292c6aa 100644 --- a/kernel/dma-coherent.c +++ b/kernel/dma-coherent.c @@ -118,8 +118,8 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size, mem = dev->dma_mem; if (!mem) return 0; - if (unlikely(size > mem->size)) - return 0; + if (unlikely(size > (mem->size << PAGE_SHIFT))) + return 0; pageno = bitmap_find_free_region(mem->bitmap, mem->size, order); if (pageno >= 0) { -- cgit v1.2.3 From 0609697eab9775564845d4c94f9e3780fb791ffd Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 21 Jan 2009 18:51:53 +0900 Subject: dma-coherent: Restore dma_alloc_from_coherent() large alloc fall back policy. When doing large allocations (larger than the per-device coherent area) the generic memory allocators are silently fallen back on regardless of consideration for the per-device constraints. In the DMA_MEMORY_EXCLUSIVE case falling back on generic memory is not an option, as it tends not to be addressable by the DMA hardware in question. This issue showed up with the 8139too breakage on the Dreamcast, where non-addressable buffers were silently allocated due to the size mismatch calculation -- while it should have simply errored out upon being unable to satisfy the allocation with the given device constraints. This restores fall back behaviour to what it was before the oversized request change caused multiple regressions. Signed-off-by: Paul Mundt --- kernel/dma-coherent.c | 45 +++++++++++++++++++++++---------------------- 1 file changed, 23 insertions(+), 22 deletions(-) (limited to 'kernel') diff --git a/kernel/dma-coherent.c b/kernel/dma-coherent.c index 38fa292c6aa..962a3b574f2 100644 --- a/kernel/dma-coherent.c +++ b/kernel/dma-coherent.c @@ -98,7 +98,7 @@ EXPORT_SYMBOL(dma_mark_declared_memory_occupied); * @size: size of requested memory area * @dma_handle: This will be filled with the correct dma handle * @ret: This pointer will be filled with the virtual address - * to allocated area. + * to allocated area. * * This function should be only called from per-arch dma_alloc_coherent() * to support allocation from per-device coherent memory pools. @@ -118,31 +118,32 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size, mem = dev->dma_mem; if (!mem) return 0; + + *ret = NULL; + if (unlikely(size > (mem->size << PAGE_SHIFT))) - return 0; + goto err; pageno = bitmap_find_free_region(mem->bitmap, mem->size, order); - if (pageno >= 0) { - /* - * Memory was found in the per-device arena. - */ - *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); - *ret = mem->virt_base + (pageno << PAGE_SHIFT); - memset(*ret, 0, size); - } else if (mem->flags & DMA_MEMORY_EXCLUSIVE) { - /* - * The per-device arena is exhausted and we are not - * permitted to fall back to generic memory. - */ - *ret = NULL; - } else { - /* - * The per-device arena is exhausted and we are - * permitted to fall back to generic memory. - */ - return 0; - } + if (unlikely(pageno < 0)) + goto err; + + /* + * Memory was found in the per-device area. + */ + *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); + *ret = mem->virt_base + (pageno << PAGE_SHIFT); + memset(*ret, 0, size); + return 1; + +err: + /* + * In the case where the allocation can not be satisfied from the + * per-device area, try to fall back to generic memory if the + * constraints allow it. + */ + return mem->flags & DMA_MEMORY_EXCLUSIVE; } EXPORT_SYMBOL(dma_alloc_from_coherent); -- cgit v1.2.3 From 00f57f545afa422db3003b0d0b30a30f8de7ecb2 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 14 Jan 2009 13:33:27 -0800 Subject: tracing/function-graph-tracer: fix a regression while suspend to disk Impact: fix a crash while kernel image restore When the function graph tracer is running and while suspend to disk, some racy and dangerous things happen against this tracer. The current task will save its registers including the stack pointer which contains the return address hooked by the tracer. But the current task will continue to enter other functions after that to save the memory, and then it will store other return addresses, and finally loose the old depth which matches the return address saved in the old stack (during the registers saving). So on image restore, the code will return to wrong addresses. And there are other things: on restore, the task will have it's "current" pointer overwritten during registers restoring....switching from one task to another... That would be insane to try to trace function graphs at these stages. This patch makes the function graph tracer listening on power events, making it's tracing disabled for the current task (the one that performs the hibernation work) while suspend/resume to disk, making the tracing safe during hibernation. Signed-off-by: Frederic Weisbecker Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/ftrace.c | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) (limited to 'kernel') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 2f32969c09d..7dcf6e9f2b0 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -1965,6 +1966,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, #ifdef CONFIG_FUNCTION_GRAPH_TRACER static atomic_t ftrace_graph_active; +static struct notifier_block ftrace_suspend_notifier; int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) { @@ -2043,6 +2045,27 @@ static int start_graph_tracing(void) return ret; } +/* + * Hibernation protection. + * The state of the current task is too much unstable during + * suspend/restore to disk. We want to protect against that. + */ +static int +ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state, + void *unused) +{ + switch (state) { + case PM_HIBERNATION_PREPARE: + pause_graph_tracing(); + break; + + case PM_POST_HIBERNATION: + unpause_graph_tracing(); + break; + } + return NOTIFY_DONE; +} + int register_ftrace_graph(trace_func_graph_ret_t retfunc, trace_func_graph_ent_t entryfunc) { @@ -2050,6 +2073,9 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc, mutex_lock(&ftrace_sysctl_lock); + ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call; + register_pm_notifier(&ftrace_suspend_notifier); + atomic_inc(&ftrace_graph_active); ret = start_graph_tracing(); if (ret) { @@ -2075,6 +2101,7 @@ void unregister_ftrace_graph(void) ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; ftrace_graph_entry = ftrace_graph_entry_stub; ftrace_shutdown(FTRACE_STOP_FUNC_RET); + unregister_pm_notifier(&ftrace_suspend_notifier); mutex_unlock(&ftrace_sysctl_lock); } -- cgit v1.2.3 From 551b4048b3d4acf15aff9fe4aed89b892c135b02 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Mon, 12 Jan 2009 11:06:18 +0800 Subject: ring_buffer: reset write when reserve buffer fail Impact: reset struct buffer_page.write when interrupt storm if struct buffer_page.write is not reset, any succedent committing will corrupted ring_buffer: static inline void rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) { ...... cpu_buffer->commit_page->commit = cpu_buffer->commit_page->write; ...... } when "if (RB_WARN_ON(cpu_buffer, next_page == reader_page))", ring_buffer is disabled, but some reserved buffers may haven't been committed. we need reset struct buffer_page.write. when "if (unlikely(next_page == cpu_buffer->commit_page))", ring_buffer is still available, we should not corrupt it. Signed-off-by: Lai Jiangshan Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/ring_buffer.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 1d6526361d0..9c1e73da4e3 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -1025,12 +1025,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, } if (next_page == head_page) { - if (!(buffer->flags & RB_FL_OVERWRITE)) { - /* reset write */ - if (tail <= BUF_PAGE_SIZE) - local_set(&tail_page->write, tail); + if (!(buffer->flags & RB_FL_OVERWRITE)) goto out_unlock; - } /* tail_page has not moved yet? */ if (tail_page == cpu_buffer->tail_page) { @@ -1105,6 +1101,10 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, return event; out_unlock: + /* reset write */ + if (tail <= BUF_PAGE_SIZE) + local_set(&tail_page->write, tail); + __raw_spin_unlock(&cpu_buffer->lock); local_irq_restore(flags); return NULL; -- cgit v1.2.3 From faf6861ebd776871e77b761c43ec045cd20b5716 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 14 Jan 2009 12:24:42 -0500 Subject: trace: print ftrace_dump at KERN_EMERG log level Impact: fix to print out ftrace_dump when expected I was debugging a hard race condition to only find out that after I hit the race, my log level was not at level to show KERN_INFO. The time it took to trigger the race was wasted because I did not capture the trace. Since ftrace_dump is only called from kernel oops (and only when it is set in the kernel command line to do so), or when a developer adds it to their own local tree, the log level of the print should be at KERN_EMERG to make sure the print appears. ftrace_dump is not called by a normal user setup, and will not add extra unwanted print out to the console. There is no reason it should be at KERN_INFO. Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index c580233add9..1a1c5a6ab24 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3736,7 +3736,7 @@ static struct notifier_block trace_die_notifier = { * it if we decide to change what log level the ftrace dump * should be at. */ -#define KERN_TRACE KERN_INFO +#define KERN_TRACE KERN_EMERG static void trace_printk_seq(struct trace_seq *s) -- cgit v1.2.3 From a442e5e0a2011af5b2d1f118fee0a8f9079f1d88 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 14 Jan 2009 14:50:19 -0500 Subject: trace: stop all recording to ring buffer on ftrace_dump Impact: limit ftrace dump output Currently ftrace_dump only calls ftrace_kill that is a fast way to prevent the function tracer functions from being called (just sets a flag and clears the function to call, nothing else). It is better to also turn off any recording to the ring buffers as well. Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 1a1c5a6ab24..4d89e84f0f4 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3770,6 +3770,7 @@ void ftrace_dump(void) dump_ran = 1; /* No turning back! */ + tracing_off(); ftrace_kill(); for_each_tracing_cpu(cpu) { -- cgit v1.2.3 From 1092307d582a7566d23779c304cf86f3075ac5f0 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 15 Jan 2009 23:40:11 -0500 Subject: trace: set max latency variable to zero on default Impact: trace max latencies on start of latency tracing This patch sets the max latency to zero whenever one of the irq variant tracers or the wakeup tracer is set to current tracer. Most developers expect to see output when starting up a latency tracer. But since the max_latency is already set to max, and it takes a latency greater than max_latency to be recorded, there is no trace. This is not the expected behavior and has even confused myself. Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 2 +- kernel/trace/trace_irqsoff.c | 1 + kernel/trace/trace_sched_wakeup.c | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 4d89e84f0f4..17bb88d86ac 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -40,7 +40,7 @@ #define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE) -unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; +unsigned long __read_mostly tracing_max_latency; unsigned long __read_mostly tracing_thresh; /* diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 7c2e326bbc8..62a78d94353 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c @@ -380,6 +380,7 @@ static void stop_irqsoff_tracer(struct trace_array *tr) static void __irqsoff_tracer_init(struct trace_array *tr) { + tracing_max_latency = 0; irqsoff_trace = tr; /* make sure that the tracer is visible */ smp_wmb(); diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 43586b689e3..42ae1e77b6b 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -333,6 +333,7 @@ static void stop_wakeup_tracer(struct trace_array *tr) static int wakeup_tracer_init(struct trace_array *tr) { + tracing_max_latency = 0; wakeup_trace = tr; start_wakeup_tracer(tr); return 0; -- cgit v1.2.3 From 91a8d07d82cac3aae3ef2ea1aaba5c9c4a934e91 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 21 Jan 2009 18:45:57 -0500 Subject: ring-buffer: reset timestamps when ring buffer is reset Impact: fix bad times of recent resets The ring buffer needs to reset its timestamps when reseting of the buffer, otherwise the timestamps are stale and might be used to calculate times in the buffer causing funny timestamps to appear. Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/ring_buffer.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'kernel') diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 9c1e73da4e3..bd38c5cfd8a 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -2174,6 +2174,9 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) cpu_buffer->overrun = 0; cpu_buffer->entries = 0; + + cpu_buffer->write_stamp = 0; + cpu_buffer->read_stamp = 0; } /** -- cgit v1.2.3 From 3a9f84d354ce1e19956083c8e691727dea33bd5a Mon Sep 17 00:00:00 2001 From: Ed Swierk Date: Mon, 26 Jan 2009 15:33:31 -0800 Subject: signals, debug: fix BUG: using smp_processor_id() in preemptible code in print_fatal_signal() With print-fatal-signals=1 on a kernel with CONFIG_PREEMPT=y, sending an unexpected signal to a process causes a BUG: using smp_processor_id() in preemptible code. get_signal_to_deliver() releases the siglock before calling print_fatal_signal(), which calls show_regs(), which calls smp_processor_id(), which is not supposed to be called from a preemptible thread. Make sure show_regs() runs with preemption disabled. Signed-off-by: Ed Swierk Signed-off-by: Ingo Molnar --- kernel/signal.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/signal.c b/kernel/signal.c index e73759783dc..b6b36768b75 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -909,7 +909,9 @@ static void print_fatal_signal(struct pt_regs *regs, int signr) } #endif printk("\n"); + preempt_disable(); show_regs(regs); + preempt_enable(); } static int __init setup_print_fatal_signals(char *str) -- cgit v1.2.3 From abfe2d7b915c872f3a1fd203267cedebf90daa45 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 19 Jan 2009 20:54:54 +0100 Subject: Hibernation: Introduce system_entering_hibernation Introduce boolean function system_entering_hibernation() returning 'true' during the last phase of hibernation, in which devices are being put into low power states and the sleep state (for example, ACPI S4) is finally entered. Some device drivers need such a function to check if the system is in the final phase of hibernation. In particular, some SATA drivers are going to use it for blacklisting systems in which the disks should not be spun down during the last phase of hibernation (the BIOS will do that anyway). Signed-off-by: Rafael J. Wysocki Signed-off-by: Jeff Garzik --- include/linux/suspend.h | 2 ++ kernel/power/disk.c | 10 ++++++++++ 2 files changed, 12 insertions(+) (limited to 'kernel') diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 2b409c44db8..c7d9bb1832b 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -237,6 +237,7 @@ extern int hibernate_nvs_alloc(void); extern void hibernate_nvs_free(void); extern void hibernate_nvs_save(void); extern void hibernate_nvs_restore(void); +extern bool system_entering_hibernation(void); #else /* CONFIG_HIBERNATION */ static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } static inline void swsusp_set_page_free(struct page *p) {} @@ -252,6 +253,7 @@ static inline int hibernate_nvs_alloc(void) { return 0; } static inline void hibernate_nvs_free(void) {} static inline void hibernate_nvs_save(void) {} static inline void hibernate_nvs_restore(void) {} +static inline bool system_entering_hibernation(void) { return false; } #endif /* CONFIG_HIBERNATION */ #ifdef CONFIG_PM_SLEEP diff --git a/kernel/power/disk.c b/kernel/power/disk.c index 45e8541ab7e..432ee575c9e 100644 --- a/kernel/power/disk.c +++ b/kernel/power/disk.c @@ -71,6 +71,14 @@ void hibernation_set_ops(struct platform_hibernation_ops *ops) mutex_unlock(&pm_mutex); } +static bool entering_platform_hibernation; + +bool system_entering_hibernation(void) +{ + return entering_platform_hibernation; +} +EXPORT_SYMBOL(system_entering_hibernation); + #ifdef CONFIG_PM_DEBUG static void hibernation_debug_sleep(void) { @@ -411,6 +419,7 @@ int hibernation_platform_enter(void) if (error) goto Close; + entering_platform_hibernation = true; suspend_console(); error = device_suspend(PMSG_HIBERNATE); if (error) { @@ -445,6 +454,7 @@ int hibernation_platform_enter(void) Finish: hibernation_ops->finish(); Resume_devices: + entering_platform_hibernation = false; device_resume(PMSG_RESTORE); resume_console(); Close: -- cgit v1.2.3 From 1267a8df209c7453d65acbdd56e3588954bf890b Mon Sep 17 00:00:00 2001 From: David Daney Date: Tue, 27 Jan 2009 09:53:21 -0800 Subject: Make irq_*_affinity depend on CONFIG_GENERIC_HARDIRQS too. In interrupt.h these functions are declared only if CONFIG_GENERIC_HARDIRQS is set. We should define them under identical conditions. Signed-off-by: David Daney Signed-off-by: Linus Torvalds --- kernel/irq/manage.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index cd0cd8dcb34..618a64f1915 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -15,7 +15,7 @@ #include "internals.h" -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) cpumask_var_t irq_default_affinity; static int init_irq_default_affinity(void) -- cgit v1.2.3 From 97179fd46da7ddedd18e95388130ed3e06c5a0c7 Mon Sep 17 00:00:00 2001 From: David Daney Date: Tue, 27 Jan 2009 09:53:22 -0800 Subject: cpumask fallout: Initialize irq_default_affinity earlier Move the initialization of irq_default_affinity to early_irq_init as core_initcall is too late. irq_default_affinity can be used in init_IRQ and potentially timer and SMP init as well. All of these happen before core_initcall. Moving the initialization to early_irq_init ensures that it is initialized before it is used. Signed-off-by: David Daney Acked-by: Mike Travis Signed-off-by: Linus Torvalds --- kernel/irq/handle.c | 16 ++++++++++++++++ kernel/irq/manage.c | 8 -------- 2 files changed, 16 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index c20db0be917..3aba8d12f32 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -39,6 +39,18 @@ void handle_bad_irq(unsigned int irq, struct irq_desc *desc) ack_bad_irq(irq); } +#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) +static void __init init_irq_default_affinity(void) +{ + alloc_bootmem_cpumask_var(&irq_default_affinity); + cpumask_setall(irq_default_affinity); +} +#else +static void __init init_irq_default_affinity(void) +{ +} +#endif + /* * Linux has a controller-independent interrupt architecture. * Every controller has a 'controller-template', that is used @@ -134,6 +146,8 @@ int __init early_irq_init(void) int legacy_count; int i; + init_irq_default_affinity(); + desc = irq_desc_legacy; legacy_count = ARRAY_SIZE(irq_desc_legacy); @@ -219,6 +233,8 @@ int __init early_irq_init(void) int count; int i; + init_irq_default_affinity(); + desc = irq_desc; count = ARRAY_SIZE(irq_desc); diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 618a64f1915..291f0366455 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -18,14 +18,6 @@ #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) cpumask_var_t irq_default_affinity; -static int init_irq_default_affinity(void) -{ - alloc_cpumask_var(&irq_default_affinity, GFP_KERNEL); - cpumask_setall(irq_default_affinity); - return 0; -} -core_initcall(init_irq_default_affinity); - /** * synchronize_irq - wait for pending IRQ handlers (on other CPUs) * @irq: interrupt number to wait for -- cgit v1.2.3 From baef99a08a2e23d9386b47e53fa5f0d44fc98f66 Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Thu, 29 Jan 2009 14:25:10 -0800 Subject: cgroups: use hierarchy mutex in creation failure path Now, cgrp->sibling is handled under hierarchy mutex. error route should do so, too. Signed-off-by: KAMEZAWA Hiroyuki Cc: Li Zefan Acked-by Paul Menage Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/cgroup.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index c29831076e7..2ae7cb47dbf 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -2434,7 +2434,9 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, err_remove: + cgroup_lock_hierarchy(root); list_del(&cgrp->sibling); + cgroup_unlock_hierarchy(root); root->number_of_cgroups--; err_destroy: -- cgit v1.2.3 From 1404f06565ee89e0ce04d4a5859c00b0e3a0dc8d Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Thu, 29 Jan 2009 14:25:21 -0800 Subject: cgroups: fix lock inconsistency in cgroup_clone() I fixed a bug in cgroup_clone() in Linus' tree in commit 7b574b7 ("cgroups: fix a race between cgroup_clone and umount") without noticing there was a cleanup patch in -mm tree that should be rebased (now commit 104cbd5, "cgroups: use task_lock() for access tsk->cgroups safe in cgroup_clone()"), thus resulted in lock inconsistency. Signed-off-by: Li Zefan Acked-by: Paul Menage Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/cgroup.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 2ae7cb47dbf..0066092de19 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -2993,20 +2993,21 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys, mutex_unlock(&cgroup_mutex); return 0; } - task_lock(tsk); - cg = tsk->cgroups; - parent = task_cgroup(tsk, subsys->subsys_id); /* Pin the hierarchy */ - if (!atomic_inc_not_zero(&parent->root->sb->s_active)) { + if (!atomic_inc_not_zero(&root->sb->s_active)) { /* We race with the final deactivate_super() */ mutex_unlock(&cgroup_mutex); return 0; } /* Keep the cgroup alive */ + task_lock(tsk); + parent = task_cgroup(tsk, subsys->subsys_id); + cg = tsk->cgroups; get_css_set(cg); task_unlock(tsk); + mutex_unlock(&cgroup_mutex); /* Now do the VFS work to create a cgroup */ @@ -3045,7 +3046,7 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys, mutex_unlock(&inode->i_mutex); put_css_set(cg); - deactivate_super(parent->root->sb); + deactivate_super(root->sb); /* The cgroup is still accessible in the VFS, but * we're not going to try to rmdir() it at this * point. */ @@ -3071,7 +3072,7 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys, mutex_lock(&cgroup_mutex); put_css_set(cg); mutex_unlock(&cgroup_mutex); - deactivate_super(parent->root->sb); + deactivate_super(root->sb); return ret; } -- cgit v1.2.3 From 804b3c28a4e4fa1c224571bf76edb534b9c4b1ed Mon Sep 17 00:00:00 2001 From: Paul Menage Date: Thu, 29 Jan 2009 14:25:21 -0800 Subject: cgroups: add cpu_relax() calls in css_tryget() and cgroup_clear_css_refs() css_tryget() and cgroup_clear_css_refs() contain polling loops; these loops should have cpu_relax calls in them to reduce cross-cache traffic. Signed-off-by: Paul Menage Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/cgroup.h | 1 + kernel/cgroup.c | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index e267e62827b..e4e8e117d27 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -99,6 +99,7 @@ static inline bool css_tryget(struct cgroup_subsys_state *css) while (!atomic_inc_not_zero(&css->refcnt)) { if (test_bit(CSS_REMOVED, &css->flags)) return false; + cpu_relax(); } return true; } diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 0066092de19..492215d67fa 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -2509,7 +2509,7 @@ static int cgroup_clear_css_refs(struct cgroup *cgrp) for_each_subsys(cgrp->root, ss) { struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id]; int refcnt; - do { + while (1) { /* We can only remove a CSS with a refcnt==1 */ refcnt = atomic_read(&css->refcnt); if (refcnt > 1) { @@ -2523,7 +2523,10 @@ static int cgroup_clear_css_refs(struct cgroup *cgrp) * css_tryget() to spin until we set the * CSS_REMOVED bits or abort */ - } while (atomic_cmpxchg(&css->refcnt, refcnt, 0) != refcnt); + if (atomic_cmpxchg(&css->refcnt, refcnt, 0) == refcnt) + break; + cpu_relax(); + } } done: for_each_subsys(cgrp->root, ss) { -- cgit v1.2.3 From 839ec5452ebfd5905b9c69b20ceb640903a8ea1a Mon Sep 17 00:00:00 2001 From: Paul Menage Date: Thu, 29 Jan 2009 14:25:22 -0800 Subject: cgroup: fix root_count when mount fails due to busy subsystem root_count was being incremented in cgroup_get_sb() after all error checking was complete, but decremented in cgroup_kill_sb(), which can be called on a superblock that we gave up on due to an error. This patch changes cgroup_kill_sb() to only decrement root_count if the root was previously linked into the list of roots. Signed-off-by: Paul Menage Tested-by: Serge Hallyn Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/cgroup.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 492215d67fa..5a54ff42874 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1115,8 +1115,10 @@ static void cgroup_kill_sb(struct super_block *sb) { } write_unlock(&css_set_lock); - list_del(&root->root_list); - root_count--; + if (!list_empty(&root->root_list)) { + list_del(&root->root_list); + root_count--; + } mutex_unlock(&cgroup_mutex); -- cgit v1.2.3 From d7240b988017521ebf89edfadd42c0942f166850 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 29 Jan 2009 10:08:01 -0500 Subject: generic-ipi: use per cpu data for single cpu ipi calls The smp_call_function can be passed a wait parameter telling it to wait for all the functions running on other CPUs to complete before returning, or to return without waiting. Unfortunately, this is currently just a suggestion and not manditory. That is, the smp_call_function can decide not to return and wait instead. The reason for this is because it uses kmalloc to allocate storage to send to the called CPU and that CPU will free it when it is done. But if we fail to allocate the storage, the stack is used instead. This means we must wait for the called CPU to finish before continuing. Unfortunatly, some callers do no abide by this hint and act as if the non-wait option is mandatory. The MTRR code for instance will deadlock if the smp_call_function is set to wait. This is because the smp_call_function will wait for the other CPUs to finish their called functions, but those functions are waiting on the caller to continue. This patch changes the generic smp_call_function code to use per cpu variables if the allocation of the data fails for a single CPU call. The smp_call_function_many will fall back to the smp_call_function_single if it fails its alloc. The smp_call_function_single is modified to not force the wait state. Since we now are using a single data per cpu we must synchronize the callers to prevent a second caller modifying the data before the first called IPI functions complete. To do so, I added a flag to the call_single_data called CSD_FLAG_LOCK. When the single CPU is called (which can be called when a many call fails an alloc), we set the LOCK bit on this per cpu data. When the caller finishes it clears the LOCK bit. The caller must wait till the LOCK bit is cleared before setting it. When it is cleared, there is no IPI function using it. Signed-off-by: Steven Rostedt Signed-off-by: Peter Zijlstra Acked-by: Jens Axboe Acked-by: Linus Torvalds Signed-off-by: Ingo Molnar --- kernel/smp.c | 36 +++++++++++++++++++++++++++++++++--- 1 file changed, 33 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/smp.c b/kernel/smp.c index 5cfa0e5e3e8..bbedbb7efe3 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -18,6 +18,7 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_function_lock); enum { CSD_FLAG_WAIT = 0x01, CSD_FLAG_ALLOC = 0x02, + CSD_FLAG_LOCK = 0x04, }; struct call_function_data { @@ -186,6 +187,9 @@ void generic_smp_call_function_single_interrupt(void) if (data_flags & CSD_FLAG_WAIT) { smp_wmb(); data->flags &= ~CSD_FLAG_WAIT; + } else if (data_flags & CSD_FLAG_LOCK) { + smp_wmb(); + data->flags &= ~CSD_FLAG_LOCK; } else if (data_flags & CSD_FLAG_ALLOC) kfree(data); } @@ -196,6 +200,8 @@ void generic_smp_call_function_single_interrupt(void) } } +static DEFINE_PER_CPU(struct call_single_data, csd_data); + /* * smp_call_function_single - Run a function on a specific CPU * @func: The function to run. This must be fast and non-blocking. @@ -224,14 +230,38 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, func(info); local_irq_restore(flags); } else if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) { - struct call_single_data *data = NULL; + struct call_single_data *data; if (!wait) { + /* + * We are calling a function on a single CPU + * and we are not going to wait for it to finish. + * We first try to allocate the data, but if we + * fail, we fall back to use a per cpu data to pass + * the information to that CPU. Since all callers + * of this code will use the same data, we must + * synchronize the callers to prevent a new caller + * from corrupting the data before the callee + * can access it. + * + * The CSD_FLAG_LOCK is used to let us know when + * the IPI handler is done with the data. + * The first caller will set it, and the callee + * will clear it. The next caller must wait for + * it to clear before we set it again. This + * will make sure the callee is done with the + * data before a new caller will use it. + */ data = kmalloc(sizeof(*data), GFP_ATOMIC); if (data) data->flags = CSD_FLAG_ALLOC; - } - if (!data) { + else { + data = &per_cpu(csd_data, me); + while (data->flags & CSD_FLAG_LOCK) + cpu_relax(); + data->flags = CSD_FLAG_LOCK; + } + } else { data = &d; data->flags = CSD_FLAG_WAIT; } -- cgit v1.2.3 From 7f22391cbe82a80a9f891d8bd10fc28ff248d1e2 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Mon, 22 Dec 2008 02:24:48 +0100 Subject: hrtimers: increase clock min delta threshold while interrupt hanging Impact: avoid timer IRQ hanging slow systems While using the function graph tracer on a virtualized system, the hrtimer_interrupt can hang the system on an infinite loop. This can be caused in several situations: - the hardware is very slow and HZ is set too high - something intrusive is slowing the system down (tracing under emulation) ... and the next clock events to program are always before the current time. This patch implements a reasonable compromise: if such a situation is detected, we share the CPUs time in 1/4 to process the hrtimer interrupts. This is enough to let the system running without serious starvation. It has been successfully tested under VirtualBox with 1000 HZ and 100 HZ with function graph tracer launched. On both cases, the clock events were increased until about 25 ms periodic ticks, which means 40 HZ. So we change a hard to debug hang into a warning message and a system that still manages to limp along. Signed-off-by: Frederic Weisbecker Signed-off-by: Ingo Molnar --- kernel/hrtimer.c | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index f33afb0407b..8fea312ca36 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -1158,6 +1158,29 @@ static void __run_hrtimer(struct hrtimer *timer) #ifdef CONFIG_HIGH_RES_TIMERS +static int force_clock_reprogram; + +/* + * After 5 iteration's attempts, we consider that hrtimer_interrupt() + * is hanging, which could happen with something that slows the interrupt + * such as the tracing. Then we force the clock reprogramming for each future + * hrtimer interrupts to avoid infinite loops and use the min_delta_ns + * threshold that we will overwrite. + * The next tick event will be scheduled to 3 times we currently spend on + * hrtimer_interrupt(). This gives a good compromise, the cpus will spend + * 1/4 of their time to process the hrtimer interrupts. This is enough to + * let it running without serious starvation. + */ + +static inline void +hrtimer_interrupt_hanging(struct clock_event_device *dev, + ktime_t try_time) +{ + force_clock_reprogram = 1; + dev->min_delta_ns = (unsigned long)try_time.tv64 * 3; + printk(KERN_WARNING "hrtimer: interrupt too slow, " + "forcing clock min delta to %lu ns\n", dev->min_delta_ns); +} /* * High resolution timer interrupt * Called with interrupts disabled @@ -1167,6 +1190,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); struct hrtimer_clock_base *base; ktime_t expires_next, now; + int nr_retries = 0; int i; BUG_ON(!cpu_base->hres_active); @@ -1174,6 +1198,10 @@ void hrtimer_interrupt(struct clock_event_device *dev) dev->next_event.tv64 = KTIME_MAX; retry: + /* 5 retries is enough to notice a hang */ + if (!(++nr_retries % 5)) + hrtimer_interrupt_hanging(dev, ktime_sub(ktime_get(), now)); + now = ktime_get(); expires_next.tv64 = KTIME_MAX; @@ -1226,7 +1254,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) /* Reprogramming necessary ? */ if (expires_next.tv64 != KTIME_MAX) { - if (tick_program_event(expires_next, 0)) + if (tick_program_event(expires_next, force_clock_reprogram)) goto retry; } } -- cgit v1.2.3 From 94df7de0289bc2df3d6e85cd2ece52bf42682f45 Mon Sep 17 00:00:00 2001 From: Sebastien Dugue Date: Mon, 1 Dec 2008 14:09:07 +0100 Subject: hrtimers: allow the hot-unplugging of all cpus Impact: fix CPU hotplug hang on Power6 testbox On architectures that support offlining all cpus (at least powerpc/pseries), hot-unpluging the tick_do_timer_cpu can result in a system hang. This comes from the fact that if the cpu going down happens to be the cpu doing the tick, then as the tick_do_timer_cpu handover happens after the cpu is dead (via the CPU_DEAD notification), we're left without ticks, jiffies are frozen and any task relying on timers (msleep, ...) is stuck. That's particularly the case for the cpu looping in __cpu_die() waiting for the dying cpu to be dead. This patch addresses this by having the tick_do_timer_cpu handover happen earlier during the CPU_DYING notification. For this, a new clockevent notification type is introduced (CLOCK_EVT_NOTIFY_CPU_DYING) which is triggered in hrtimer_cpu_notify(). Signed-off-by: Sebastien Dugue Cc: Signed-off-by: Ingo Molnar --- include/linux/clockchips.h | 1 + kernel/hrtimer.c | 4 ++++ kernel/time/tick-common.c | 26 +++++++++++++++++++------- 3 files changed, 24 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index cea153697ec..3a1dbba4d3a 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h @@ -36,6 +36,7 @@ enum clock_event_nofitiers { CLOCK_EVT_NOTIFY_BROADCAST_EXIT, CLOCK_EVT_NOTIFY_SUSPEND, CLOCK_EVT_NOTIFY_RESUME, + CLOCK_EVT_NOTIFY_CPU_DYING, CLOCK_EVT_NOTIFY_CPU_DEAD, }; diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 8fea312ca36..647a40e2fea 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -1608,6 +1608,10 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self, break; #ifdef CONFIG_HOTPLUG_CPU + case CPU_DYING: + case CPU_DYING_FROZEN: + clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DYING, &scpu); + break; case CPU_DEAD: case CPU_DEAD_FROZEN: { diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 63e05d423a0..21a5ca84951 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -273,6 +273,21 @@ out_bc: return ret; } +/* + * Transfer the do_timer job away from a dying cpu. + * + * Called with interrupts disabled. + */ +static void tick_handover_do_timer(int *cpup) +{ + if (*cpup == tick_do_timer_cpu) { + int cpu = cpumask_first(cpu_online_mask); + + tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu : + TICK_DO_TIMER_NONE; + } +} + /* * Shutdown an event device on a given cpu: * @@ -297,13 +312,6 @@ static void tick_shutdown(unsigned int *cpup) clockevents_exchange_device(dev, NULL); td->evtdev = NULL; } - /* Transfer the do_timer job away from this cpu */ - if (*cpup == tick_do_timer_cpu) { - int cpu = cpumask_first(cpu_online_mask); - - tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu : - TICK_DO_TIMER_NONE; - } spin_unlock_irqrestore(&tick_device_lock, flags); } @@ -357,6 +365,10 @@ static int tick_notify(struct notifier_block *nb, unsigned long reason, tick_broadcast_oneshot_control(reason); break; + case CLOCK_EVT_NOTIFY_CPU_DYING: + tick_handover_do_timer(dev); + break; + case CLOCK_EVT_NOTIFY_CPU_DEAD: tick_shutdown_broadcast_oneshot(dev); tick_shutdown_broadcast(dev); -- cgit v1.2.3 From b0a9b5111abf60ef07eade834f480e89004c7920 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 25 Jan 2009 11:31:36 +0100 Subject: hrtimer: prevent negative expiry value after clock_was_set() Impact: prevent false positive WARN_ON() in clockevents_program_event() clock_was_set() changes the base->offset of CLOCK_REALTIME and enforces the reprogramming of the clockevent device to expire timers which are based on CLOCK_REALTIME. If the clock change is large enough then the subtraction of the timer expiry value and base->offset can become negative which triggers the warning in clockevents_program_event(). Check the subtraction result and set a negative value to 0. Signed-off-by: Thomas Gleixner --- kernel/hrtimer.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'kernel') diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 647a40e2fea..f394d2a42ca 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -501,6 +501,13 @@ static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base) continue; timer = rb_entry(base->first, struct hrtimer, node); expires = ktime_sub(hrtimer_get_expires(timer), base->offset); + /* + * clock_was_set() has changed base->offset so the + * result might be negative. Fix it up to prevent a + * false positive in clockevents_program_event() + */ + if (expires.tv64 < 0) + expires.tv64 = 0; if (expires.tv64 < cpu_base->expires_next.tv64) cpu_base->expires_next = expires; } -- cgit v1.2.3 From d942fb6c7d391baba3dddb566eb735fbf3df8528 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 26 Jan 2009 17:56:17 +0100 Subject: sched: fix sync wakeups Pawel Dziekonski reported that the openssl benchmark and his quantum chemistry application both show slowdowns due to the scheduler under-parallelizing execution. The reason are pipe wakeups still doing 'sync' wakeups which overrides the normal buddy wakeup logic - even if waker and wakee are loosely coupled. Fix an inversion of logic in the buddy wakeup code. Reported-by: Pawel Dziekonski Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/sched.c | 4 ++++ kernel/sched_fair.c | 11 ++--------- 2 files changed, 6 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 52bbf1c842a..770b1f9ebe1 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2266,6 +2266,10 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) if (!sched_feat(SYNC_WAKEUPS)) sync = 0; + if (!sync && (current->se.avg_overlap < sysctl_sched_migration_cost && + p->se.avg_overlap < sysctl_sched_migration_cost)) + sync = 1; + #ifdef CONFIG_SMP if (sched_feat(LB_WAKEUP_UPDATE)) { struct sched_domain *sd; diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 5cc1c162044..fdc41750468 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1179,20 +1179,15 @@ wake_affine(struct sched_domain *this_sd, struct rq *this_rq, int idx, unsigned long load, unsigned long this_load, unsigned int imbalance) { - struct task_struct *curr = this_rq->curr; - struct task_group *tg; unsigned long tl = this_load; unsigned long tl_per_task; + struct task_group *tg; unsigned long weight; int balanced; if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS)) return 0; - if (sync && (curr->se.avg_overlap > sysctl_sched_migration_cost || - p->se.avg_overlap > sysctl_sched_migration_cost)) - sync = 0; - /* * If sync wakeup then subtract the (maximum possible) * effect of the currently running task from the load @@ -1419,9 +1414,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) if (!sched_feat(WAKEUP_PREEMPT)) return; - if (sched_feat(WAKEUP_OVERLAP) && (sync || - (se->avg_overlap < sysctl_sched_migration_cost && - pse->avg_overlap < sysctl_sched_migration_cost))) { + if (sched_feat(WAKEUP_OVERLAP) && sync) { resched_task(curr); return; } -- cgit v1.2.3 From 1596e29773eadd96b0a5fc6e736afa52394cafda Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 28 Jan 2009 14:51:38 +0100 Subject: sched: symmetric sync vs avg_overlap Reinstate the weakening of the sync hint if set. This yields a more symmetric usage of avg_overlap. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/sched.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 770b1f9ebe1..242d0d47a70 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2266,9 +2266,15 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) if (!sched_feat(SYNC_WAKEUPS)) sync = 0; - if (!sync && (current->se.avg_overlap < sysctl_sched_migration_cost && - p->se.avg_overlap < sysctl_sched_migration_cost)) - sync = 1; + if (!sync) { + if (current->se.avg_overlap < sysctl_sched_migration_cost && + p->se.avg_overlap < sysctl_sched_migration_cost) + sync = 1; + } else { + if (current->se.avg_overlap >= sysctl_sched_migration_cost || + p->se.avg_overlap >= sysctl_sched_migration_cost) + sync = 0; + } #ifdef CONFIG_SMP if (sched_feat(LB_WAKEUP_UPDATE)) { -- cgit v1.2.3 From a9f3e2b549f83a9cdab873abf4140be27c05a3f2 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Wed, 28 Jan 2009 14:51:39 +0100 Subject: sched: clear buddies more aggressively It was noticed that a task could get re-elected past its run quota due to buddy affinities. This could increase latency a little. Cure it by more aggresively clearing buddy state. We do so in two situations: - when we force preempt - when we select a buddy to run Signed-off-by: Mike Galbraith Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/sched_fair.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index fdc41750468..75248b9ff4c 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -768,8 +768,14 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) ideal_runtime = sched_slice(cfs_rq, curr); delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; - if (delta_exec > ideal_runtime) + if (delta_exec > ideal_runtime) { resched_task(rq_of(cfs_rq)->curr); + /* + * The current task ran long enough, ensure it doesn't get + * re-elected due to buddy favours. + */ + clear_buddies(cfs_rq, curr); + } } static void @@ -1445,6 +1451,11 @@ static struct task_struct *pick_next_task_fair(struct rq *rq) do { se = pick_next_entity(cfs_rq); + /* + * If se was a buddy, clear it so that it will have to earn + * the favour again. + */ + clear_buddies(cfs_rq, se); set_next_entity(cfs_rq, se); cfs_rq = group_cfs_rq(se); } while (cfs_rq); -- cgit v1.2.3 From a571bbeafbcc501d9989fbce1cddcd810bd51d71 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 28 Jan 2009 14:51:40 +0100 Subject: sched: fix buddie group latency Similar to the previous patch, by not clearing buddies we can select entities past their run quota, which can increase latency. This means we have to clear group buddies as well. Do not use the group clear for pick_next_task(), otherwise that'll get O(n^2). Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/sched_fair.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 75248b9ff4c..a7e50ba185a 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -719,7 +719,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup) __enqueue_entity(cfs_rq, se); } -static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) +static void __clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) { if (cfs_rq->last == se) cfs_rq->last = NULL; @@ -728,6 +728,12 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) cfs_rq->next = NULL; } +static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) +{ + for_each_sched_entity(se) + __clear_buddies(cfs_rq_of(se), se); +} + static void dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) { @@ -1455,7 +1461,7 @@ static struct task_struct *pick_next_task_fair(struct rq *rq) * If se was a buddy, clear it so that it will have to earn * the favour again. */ - clear_buddies(cfs_rq, se); + __clear_buddies(cfs_rq, se); set_next_entity(cfs_rq, se); cfs_rq = group_cfs_rq(se); } while (cfs_rq); -- cgit v1.2.3 From 3d398703ef06fd97b4c28c86b580546d5b57e7b7 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Sat, 31 Jan 2009 23:21:24 +1030 Subject: sched_rt: don't use first_cpu on cpumask created with cpumask_and cpumask_and() only initializes nr_cpu_ids bits, so the (deprecated) first_cpu() might find one of those uninitialized bits if nr_cpu_ids is less than NR_CPUS (as it can be for CONFIG_CPUMASK_OFFSTACK). Signed-off-by: Rusty Russell Signed-off-by: Ingo Molnar --- kernel/sched_rt.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 954e1a81b79..bac1061cea2 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -968,8 +968,8 @@ static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask) if ((this_cpu != -1) && cpu_isset(this_cpu, *mask)) return this_cpu; - first = first_cpu(*mask); - if (first != NR_CPUS) + first = cpumask_first(mask); + if (first < nr_cpu_ids) return first; return -1; -- cgit v1.2.3 From 10b888d6cec2688e65e9e128b14bf98ecd199da2 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Sat, 31 Jan 2009 14:50:07 -0800 Subject: irq, x86: fix lock status with numa_migrate_irq_desc Eric Paris reported: > I have an hp dl785g5 which is unable to successfully run > 2.6.29-0.66.rc3.fc11.x86_64 or 2.6.29-rc2-next-20090126. During bootup > (early in userspace daemons starting) I get the below BUG, which quickly > renders the machine dead. I assume it is because sparse_irq_lock never > gets released when the BUG kills that task. Adjust lock sequence when migrating a descriptor with CONFIG_NUMA_MIGRATE_IRQ_DESC enabled. Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/kernel/io_apic.c | 5 +++-- kernel/irq/numa_migrate.c | 7 ++++++- 2 files changed, 9 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 1c4a1302536..9b0c480c383 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -2528,14 +2528,15 @@ static void irq_complete_move(struct irq_desc **descp) vector = ~get_irq_regs()->orig_ax; me = smp_processor_id(); + + if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) { #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC *descp = desc = move_irq_desc(desc, me); /* get the new one */ cfg = desc->chip_data; #endif - - if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) send_cleanup_vector(cfg); + } } #else static inline void irq_complete_move(struct irq_desc **descp) {} diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c index ecf765c6a77..acd88356ac7 100644 --- a/kernel/irq/numa_migrate.c +++ b/kernel/irq/numa_migrate.c @@ -71,7 +71,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, desc = irq_desc_ptrs[irq]; if (desc && old_desc != desc) - goto out_unlock; + goto out_unlock; node = cpu_to_node(cpu); desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); @@ -84,10 +84,15 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, init_copy_one_irq_desc(irq, old_desc, desc, cpu); irq_desc_ptrs[irq] = desc; + spin_unlock_irqrestore(&sparse_irq_lock, flags); /* free the old one */ free_one_irq_desc(old_desc, desc); + spin_unlock(&old_desc->lock); kfree(old_desc); + spin_lock(&desc->lock); + + return desc; out_unlock: spin_unlock_irqrestore(&sparse_irq_lock, flags); -- cgit v1.2.3 From 720eba31f47aeade8ec130ca7f4353223c49170f Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 3 Feb 2009 13:31:36 +1030 Subject: modules: Use a better scheme for refcounting Current refcounting for modules (done if CONFIG_MODULE_UNLOAD=y) is using a lot of memory. Each 'struct module' contains an [NR_CPUS] array of full cache lines. This patch uses existing infrastructure (percpu_modalloc() & percpu_modfree()) to allocate percpu space for the refcount storage. Instead of wasting NR_CPUS*128 bytes (on i386), we now use nr_cpu_ids*sizeof(local_t) bytes. On a typical distro, where NR_CPUS=8, shiping 2000 modules, we reduce size of module files by about 2 Mbytes. (1Kb per module) Instead of having all refcounters in the same memory node - with TLB misses because of vmalloc() - this new implementation permits to have better NUMA properties, since each CPU will use storage on its preferred node, thanks to percpu storage. Signed-off-by: Eric Dumazet Signed-off-by: Rusty Russell Signed-off-by: Linus Torvalds --- include/linux/module.h | 25 ++++++++++++++++--------- kernel/module.c | 35 +++++++++++++++++++++++++---------- 2 files changed, 41 insertions(+), 19 deletions(-) (limited to 'kernel') diff --git a/include/linux/module.h b/include/linux/module.h index 4f7ea12463d..f3b8329eb5b 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -219,11 +219,6 @@ void *__symbol_get_gpl(const char *symbol); #endif -struct module_ref -{ - local_t count; -} ____cacheline_aligned; - enum module_state { MODULE_STATE_LIVE, @@ -344,8 +339,11 @@ struct module /* Destruction function. */ void (*exit)(void); - /* Reference counts */ - struct module_ref ref[NR_CPUS]; +#ifdef CONFIG_SMP + char *refptr; +#else + local_t ref; +#endif #endif }; #ifndef MODULE_ARCH_INIT @@ -395,13 +393,22 @@ void __symbol_put(const char *symbol); #define symbol_put(x) __symbol_put(MODULE_SYMBOL_PREFIX #x) void symbol_put_addr(void *addr); +static inline local_t *__module_ref_addr(struct module *mod, int cpu) +{ +#ifdef CONFIG_SMP + return (local_t *) (mod->refptr + per_cpu_offset(cpu)); +#else + return &mod->ref; +#endif +} + /* Sometimes we know we already have a refcount, and it's easier not to handle the error case (which only happens with rmmod --wait). */ static inline void __module_get(struct module *module) { if (module) { BUG_ON(module_refcount(module) == 0); - local_inc(&module->ref[get_cpu()].count); + local_inc(__module_ref_addr(module, get_cpu())); put_cpu(); } } @@ -413,7 +420,7 @@ static inline int try_module_get(struct module *module) if (module) { unsigned int cpu = get_cpu(); if (likely(module_is_live(module))) - local_inc(&module->ref[cpu].count); + local_inc(__module_ref_addr(module, cpu)); else ret = 0; put_cpu(); diff --git a/kernel/module.c b/kernel/module.c index e8b51d41dd7..ba22484a987 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -573,13 +573,13 @@ static char last_unloaded_module[MODULE_NAME_LEN+1]; /* Init the unload section of the module. */ static void module_unload_init(struct module *mod) { - unsigned int i; + int cpu; INIT_LIST_HEAD(&mod->modules_which_use_me); - for (i = 0; i < NR_CPUS; i++) - local_set(&mod->ref[i].count, 0); + for_each_possible_cpu(cpu) + local_set(__module_ref_addr(mod, cpu), 0); /* Hold reference count during initialization. */ - local_set(&mod->ref[raw_smp_processor_id()].count, 1); + local_set(__module_ref_addr(mod, raw_smp_processor_id()), 1); /* Backwards compatibility macros put refcount during init. */ mod->waiter = current; } @@ -717,10 +717,11 @@ static int try_stop_module(struct module *mod, int flags, int *forced) unsigned int module_refcount(struct module *mod) { - unsigned int i, total = 0; + unsigned int total = 0; + int cpu; - for (i = 0; i < NR_CPUS; i++) - total += local_read(&mod->ref[i].count); + for_each_possible_cpu(cpu) + total += local_read(__module_ref_addr(mod, cpu)); return total; } EXPORT_SYMBOL(module_refcount); @@ -894,7 +895,7 @@ void module_put(struct module *module) { if (module) { unsigned int cpu = get_cpu(); - local_dec(&module->ref[cpu].count); + local_dec(__module_ref_addr(module, cpu)); /* Maybe they're waiting for us to drop reference? */ if (unlikely(!module_is_live(module))) wake_up_process(module->waiter); @@ -1464,7 +1465,10 @@ static void free_module(struct module *mod) kfree(mod->args); if (mod->percpu) percpu_modfree(mod->percpu); - +#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) + if (mod->refptr) + percpu_modfree(mod->refptr); +#endif /* Free lock-classes: */ lockdep_free_key_range(mod->module_core, mod->core_size); @@ -2011,6 +2015,14 @@ static noinline struct module *load_module(void __user *umod, if (err < 0) goto free_mod; +#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) + mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t), + mod->name); + if (!mod->refptr) { + err = -ENOMEM; + goto free_mod; + } +#endif if (pcpuindex) { /* We have a special allocation for this section. */ percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size, @@ -2018,7 +2030,7 @@ static noinline struct module *load_module(void __user *umod, mod->name); if (!percpu) { err = -ENOMEM; - goto free_mod; + goto free_percpu; } sechdrs[pcpuindex].sh_flags &= ~(unsigned long)SHF_ALLOC; mod->percpu = percpu; @@ -2282,6 +2294,9 @@ static noinline struct module *load_module(void __user *umod, free_percpu: if (percpu) percpu_modfree(percpu); +#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) + percpu_modfree(mod->refptr); +#endif free_mod: kfree(args); free_hdr: -- cgit v1.2.3 From 229c4ef8ae56d69f8dec64533bf1c7f8070c1a4a Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 3 Feb 2009 20:39:04 +0100 Subject: ftrace: do_each_pid_task() needs rcu lock "ftrace: use struct pid" commit 978f3a45d9499c7a447ca7615455cefb63d44165 converted ftrace_pid_trace to "struct pid*". But we can't use do_each_pid_task() without rcu_read_lock() even if we know the pid itself can't go away (it was pinned in ftrace_pid_write). The exiting task can detach itself from this pid at any moment. Signed-off-by: Oleg Nesterov Signed-off-by: Ingo Molnar --- kernel/trace/ftrace.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'kernel') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 7dcf6e9f2b0..9a236ffe2aa 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1737,9 +1737,12 @@ static void clear_ftrace_pid(struct pid *pid) { struct task_struct *p; + rcu_read_lock(); do_each_pid_task(pid, PIDTYPE_PID, p) { clear_tsk_trace_trace(p); } while_each_pid_task(pid, PIDTYPE_PID, p); + rcu_read_unlock(); + put_pid(pid); } @@ -1747,9 +1750,11 @@ static void set_ftrace_pid(struct pid *pid) { struct task_struct *p; + rcu_read_lock(); do_each_pid_task(pid, PIDTYPE_PID, p) { set_tsk_trace_trace(p); } while_each_pid_task(pid, PIDTYPE_PID, p); + rcu_read_unlock(); } static void clear_ftrace_pid_task(struct pid **pid) -- cgit v1.2.3 From 58763a297405024d23d8f1d0bba3e6603660c4b6 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 4 Feb 2009 15:11:58 -0800 Subject: kernel/async.c: fix printk warnings alpha: kernel/async.c: In function 'run_one_entry': kernel/async.c:141: warning: format '%lli' expects type 'long long int', but argument 2 has type 'async_cookie_t' kernel/async.c:149: warning: format '%lli' expects type 'long long int', but argument 2 has type 'async_cookie_t' kernel/async.c:149: warning: format '%lld' expects type 'long long int', but argument 4 has type 's64' kernel/async.c: In function 'async_synchronize_cookie_special': kernel/async.c:250: warning: format '%lli' expects type 'long long int', but argument 3 has type 's64' Cc: Arjan van de Ven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/async.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/async.c b/kernel/async.c index 608b32b4281..67a2be71f51 100644 --- a/kernel/async.c +++ b/kernel/async.c @@ -138,15 +138,18 @@ static void run_one_entry(void) /* 3) run it (and print duration)*/ if (initcall_debug && system_state == SYSTEM_BOOTING) { - printk("calling %lli_%pF @ %i\n", entry->cookie, entry->func, task_pid_nr(current)); + printk("calling %lli_%pF @ %i\n", (long long)entry->cookie, + entry->func, task_pid_nr(current)); calltime = ktime_get(); } entry->func(entry->data, entry->cookie); if (initcall_debug && system_state == SYSTEM_BOOTING) { rettime = ktime_get(); delta = ktime_sub(rettime, calltime); - printk("initcall %lli_%pF returned 0 after %lld usecs\n", entry->cookie, - entry->func, ktime_to_ns(delta) >> 10); + printk("initcall %lli_%pF returned 0 after %lld usecs\n", + (long long)entry->cookie, + entry->func, + (long long)ktime_to_ns(delta) >> 10); } /* 4) remove it from the running queue */ @@ -247,7 +250,8 @@ void async_synchronize_cookie_special(async_cookie_t cookie, struct list_head *r delta = ktime_sub(endtime, starttime); printk("async_continuing @ %i after %lli usec\n", - task_pid_nr(current), ktime_to_ns(delta) >> 10); + task_pid_nr(current), + (long long)ktime_to_ns(delta) >> 10); } } EXPORT_SYMBOL_GPL(async_synchronize_cookie_special); -- cgit v1.2.3 From 60fd760fb9ff7034360bab7137c917c0330628c2 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 4 Feb 2009 15:12:06 -0800 Subject: revert "rlimit: permit setting RLIMIT_NOFILE to RLIM_INFINITY" Revert commit 0c2d64fb6cae9aae480f6a46cfe79f8d7d48b59f because it causes (arguably poorly designed) existing userspace to spend interminable periods closing billions of not-open file descriptors. We could bring this back, with some sort of opt-in tunable in /proc, which defaults to "off". Peter's alanysis follows: : I spent several hours trying to get to the bottom of a serious : performance issue that appeared on one of our servers after upgrading to : 2.6.28. In the end it's what could be considered a userspace bug that : was triggered by a change in 2.6.28. Since this might also affect other : people I figured I'd at least document what I found here, and maybe we : can even do something about it: : : : So, I upgraded some of debian.org's machines to 2.6.28.1 and immediately : the team maintaining our ftp archive complained that one of their : scripts that previously ran in a few minutes still hadn't even come : close to being done after an hour or so. Downgrading to 2.6.27 fixed : that. : : Turns out that script is forking a lot and something in it or python or : whereever closes all the file descriptors it doesn't want to pass on. : That is, it starts at zero and goes up to ulimit -n/RLIMIT_NOFILE and : closes them all with a few exceptions. : : Turns out that takes a long time when your limit -n is now 2^20 (1048576). : : With 2.6.27.* the ulimit -n was the standard 1024, but with 2.6.28 it is : now a thousand times that. : : 2.6.28 included a patch titled "rlimit: permit setting RLIMIT_NOFILE to : RLIM_INFINITY" (0c2d64fb6cae9aae480f6a46cfe79f8d7d48b59f)[1] that : allows, as the title implies, to set the limit for number of files to : infinity. : : Closer investigation showed that the broken default ulimit did not apply : to "system" processes (like stuff started from init). In the end I : could establish that all processes that passed through pam_limit at one : point had the bad resource limit. : : Apparently the pam library in Debian etch (4.0) initializes the limits : to some default values when it doesn't have any settings in limit.conf : to override them. Turns out that for nofiles this is RLIM_INFINITY. : Commenting out "case RLIMIT_NOFILE" in pam_limit.c:267 of our pam : package version 0.79-5 fixes that - tho I'm not sure what side effects : that has. : : Debian lenny (the upcoming 5.0 version) doesn't have this issue as it : uses a different pam (version). Reported-by: Peter Palfrader Cc: Adam Tkac Cc: Michael Kerrisk Cc: [2.6.28.x] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sys.c | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) (limited to 'kernel') diff --git a/kernel/sys.c b/kernel/sys.c index e7dc0e10a48..f145c415bc1 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1525,22 +1525,14 @@ SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim) return -EINVAL; if (copy_from_user(&new_rlim, rlim, sizeof(*rlim))) return -EFAULT; + if (new_rlim.rlim_cur > new_rlim.rlim_max) + return -EINVAL; old_rlim = current->signal->rlim + resource; if ((new_rlim.rlim_max > old_rlim->rlim_max) && !capable(CAP_SYS_RESOURCE)) return -EPERM; - - if (resource == RLIMIT_NOFILE) { - if (new_rlim.rlim_max == RLIM_INFINITY) - new_rlim.rlim_max = sysctl_nr_open; - if (new_rlim.rlim_cur == RLIM_INFINITY) - new_rlim.rlim_cur = sysctl_nr_open; - if (new_rlim.rlim_max > sysctl_nr_open) - return -EPERM; - } - - if (new_rlim.rlim_cur > new_rlim.rlim_max) - return -EINVAL; + if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > sysctl_nr_open) + return -EPERM; retval = security_task_setrlimit(resource, &new_rlim); if (retval) -- cgit v1.2.3 From 777c6c5f1f6e757ae49ecca2ed72d6b1f523c007 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 4 Feb 2009 15:12:14 -0800 Subject: wait: prevent exclusive waiter starvation With exclusive waiters, every process woken up through the wait queue must ensure that the next waiter down the line is woken when it has finished. Interruptible waiters don't do that when aborting due to a signal. And if an aborting waiter is concurrently woken up through the waitqueue, noone will ever wake up the next waiter. This has been observed with __wait_on_bit_lock() used by lock_page_killable(): the first contender on the queue was aborting when the actual lock holder woke it up concurrently. The aborted contender didn't acquire the lock and therefor never did an unlock followed by waking up the next waiter. Add abort_exclusive_wait() which removes the process' wait descriptor from the waitqueue, iff still queued, or wakes up the next waiter otherwise. It does so under the waitqueue lock. Racing with a wake up means the aborting process is either already woken (removed from the queue) and will wake up the next waiter, or it will remove itself from the queue and the concurrent wake up will apply to the next waiter after it. Use abort_exclusive_wait() in __wait_event_interruptible_exclusive() and __wait_on_bit_lock() when they were interrupted by other means than a wake up through the queue. [akpm@linux-foundation.org: coding-style fixes] Reported-by: Chris Mason Signed-off-by: Johannes Weiner Mentored-by: Oleg Nesterov Cc: Peter Zijlstra Cc: Matthew Wilcox Cc: Chuck Lever Cc: Nick Piggin Cc: Ingo Molnar Cc: ["after some testing"] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/wait.h | 11 ++++++++-- kernel/sched.c | 4 ++-- kernel/wait.c | 59 +++++++++++++++++++++++++++++++++++++++++++++------- 3 files changed, 63 insertions(+), 11 deletions(-) (limited to 'kernel') diff --git a/include/linux/wait.h b/include/linux/wait.h index ef609f842fa..a210ede73b5 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -132,6 +132,8 @@ static inline void __remove_wait_queue(wait_queue_head_t *head, list_del(&old->task_list); } +void __wake_up_common(wait_queue_head_t *q, unsigned int mode, + int nr_exclusive, int sync, void *key); void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); extern void __wake_up_locked(wait_queue_head_t *q, unsigned int mode); extern void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); @@ -333,16 +335,19 @@ do { \ for (;;) { \ prepare_to_wait_exclusive(&wq, &__wait, \ TASK_INTERRUPTIBLE); \ - if (condition) \ + if (condition) { \ + finish_wait(&wq, &__wait); \ break; \ + } \ if (!signal_pending(current)) { \ schedule(); \ continue; \ } \ ret = -ERESTARTSYS; \ + abort_exclusive_wait(&wq, &__wait, \ + TASK_INTERRUPTIBLE, NULL); \ break; \ } \ - finish_wait(&wq, &__wait); \ } while (0) #define wait_event_interruptible_exclusive(wq, condition) \ @@ -431,6 +436,8 @@ extern long interruptible_sleep_on_timeout(wait_queue_head_t *q, void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state); void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state); void finish_wait(wait_queue_head_t *q, wait_queue_t *wait); +void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, + unsigned int mode, void *key); int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); diff --git a/kernel/sched.c b/kernel/sched.c index 242d0d47a70..8ee437a5ec1 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4697,8 +4697,8 @@ EXPORT_SYMBOL(default_wake_function); * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns * zero in this (rare) case, and we handle it by continuing to scan the queue. */ -static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, - int nr_exclusive, int sync, void *key) +void __wake_up_common(wait_queue_head_t *q, unsigned int mode, + int nr_exclusive, int sync, void *key) { wait_queue_t *curr, *next; diff --git a/kernel/wait.c b/kernel/wait.c index cd87131f2fc..42a2dbc181c 100644 --- a/kernel/wait.c +++ b/kernel/wait.c @@ -91,6 +91,15 @@ prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state) } EXPORT_SYMBOL(prepare_to_wait_exclusive); +/* + * finish_wait - clean up after waiting in a queue + * @q: waitqueue waited on + * @wait: wait descriptor + * + * Sets current thread back to running state and removes + * the wait descriptor from the given waitqueue if still + * queued. + */ void finish_wait(wait_queue_head_t *q, wait_queue_t *wait) { unsigned long flags; @@ -117,6 +126,39 @@ void finish_wait(wait_queue_head_t *q, wait_queue_t *wait) } EXPORT_SYMBOL(finish_wait); +/* + * abort_exclusive_wait - abort exclusive waiting in a queue + * @q: waitqueue waited on + * @wait: wait descriptor + * @state: runstate of the waiter to be woken + * @key: key to identify a wait bit queue or %NULL + * + * Sets current thread back to running state and removes + * the wait descriptor from the given waitqueue if still + * queued. + * + * Wakes up the next waiter if the caller is concurrently + * woken up through the queue. + * + * This prevents waiter starvation where an exclusive waiter + * aborts and is woken up concurrently and noone wakes up + * the next waiter. + */ +void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, + unsigned int mode, void *key) +{ + unsigned long flags; + + __set_current_state(TASK_RUNNING); + spin_lock_irqsave(&q->lock, flags); + if (!list_empty(&wait->task_list)) + list_del_init(&wait->task_list); + else if (waitqueue_active(q)) + __wake_up_common(q, mode, 1, 0, key); + spin_unlock_irqrestore(&q->lock, flags); +} +EXPORT_SYMBOL(abort_exclusive_wait); + int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) { int ret = default_wake_function(wait, mode, sync, key); @@ -177,17 +219,20 @@ int __sched __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q, int (*action)(void *), unsigned mode) { - int ret = 0; - do { + int ret; + prepare_to_wait_exclusive(wq, &q->wait, mode); - if (test_bit(q->key.bit_nr, q->key.flags)) { - if ((ret = (*action)(q->key.flags))) - break; - } + if (!test_bit(q->key.bit_nr, q->key.flags)) + continue; + ret = action(q->key.flags); + if (!ret) + continue; + abort_exclusive_wait(wq, &q->wait, mode, &q->key); + return ret; } while (test_and_set_bit(q->key.bit_nr, q->key.flags)); finish_wait(wq, &q->wait); - return ret; + return 0; } EXPORT_SYMBOL(__wait_on_bit_lock); -- cgit v1.2.3 From 04ec93fe9bc98e3bd8560f79f56fed66dfae40d5 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Fri, 6 Feb 2009 08:17:19 +0000 Subject: fork.c: fix NULL pointer dereference when nr_threads == threads-max I happened to forked lots of processes, and hit NULL pointer dereference. It is because in copy_process() after checking max_threads, 0 is returned but not -EAGAIN. The bug is introduced by "CRED: Detach the credentials from task_struct" (commit f1752eec6145c97163dbce62d17cf5d928e28a27). Signed-off-by: Li Zefan Signed-off-by: David Howells Acked-by: James Morris Signed-off-by: Linus Torvalds --- kernel/fork.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/fork.c b/kernel/fork.c index 242a706e772..6d5dbb7a13e 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1005,6 +1005,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, * triggers too late. This doesn't hurt, the check is only there * to stop root fork bombs. */ + retval = -EAGAIN; if (nr_threads >= max_threads) goto bad_fork_cleanup_count; -- cgit v1.2.3