summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/time/tick-broadcast.c24
-rw-r--r--kernel/time/tick-sched.c12
-rw-r--r--kernel/time/timekeeping.c10
-rw-r--r--kernel/user.c45
-rw-r--r--kernel/user_namespace.c4
-rw-r--r--kernel/utsname.c2
6 files changed, 74 insertions, 23 deletions
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index db8e0f3d409b..aab881c86a1a 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -382,12 +382,23 @@ static int tick_broadcast_set_event(ktime_t expires, int force)
int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
{
+ int cpu = smp_processor_id();
+
+ /*
+ * If the CPU is marked for broadcast, enforce oneshot
+ * broadcast mode. The jinxed VAIO does not resume otherwise.
+ * No idea why it ends up in a lower C State during resume
+ * without notifying the clock events layer.
+ */
+ if (cpu_isset(cpu, tick_broadcast_mask))
+ cpu_set(cpu, tick_broadcast_oneshot_mask);
+
clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
if(!cpus_empty(tick_broadcast_oneshot_mask))
tick_broadcast_set_event(ktime_get(), 1);
- return cpu_isset(smp_processor_id(), tick_broadcast_oneshot_mask);
+ return cpu_isset(cpu, tick_broadcast_oneshot_mask);
}
/*
@@ -549,20 +560,17 @@ void tick_broadcast_switch_to_oneshot(void)
*/
void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
{
- struct clock_event_device *bc;
unsigned long flags;
unsigned int cpu = *cpup;
spin_lock_irqsave(&tick_broadcast_lock, flags);
- bc = tick_broadcast_device.evtdev;
+ /*
+ * Clear the broadcast mask flag for the dead cpu, but do not
+ * stop the broadcast device!
+ */
cpu_clear(cpu, tick_broadcast_oneshot_mask);
- if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT) {
- if (bc && cpus_empty(tick_broadcast_oneshot_mask))
- clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
- }
-
spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index b416995b9757..8c3fef1db09c 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -160,6 +160,18 @@ void tick_nohz_stop_sched_tick(void)
cpu = smp_processor_id();
ts = &per_cpu(tick_cpu_sched, cpu);
+ /*
+ * If this cpu is offline and it is the one which updates
+ * jiffies, then give up the assignment and let it be taken by
+ * the cpu which runs the tick timer next. If we don't drop
+ * this here the jiffies might be stale and do_timer() never
+ * invoked.
+ */
+ if (unlikely(!cpu_online(cpu))) {
+ if (cpu == tick_do_timer_cpu)
+ tick_do_timer_cpu = -1;
+ }
+
if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
goto end;
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index acc417b5a9b7..4ad79f6bdec6 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -217,6 +217,7 @@ static void change_clocksource(void)
}
#else
static inline void change_clocksource(void) { }
+static inline s64 __get_nsec_offset(void) { return 0; }
#endif
/**
@@ -280,6 +281,8 @@ void __init timekeeping_init(void)
static int timekeeping_suspended;
/* time in seconds when suspend began */
static unsigned long timekeeping_suspend_time;
+/* xtime offset when we went into suspend */
+static s64 timekeeping_suspend_nsecs;
/**
* timekeeping_resume - Resumes the generic timekeeping subsystem.
@@ -305,6 +308,8 @@ static int timekeeping_resume(struct sys_device *dev)
wall_to_monotonic.tv_sec -= sleep_length;
total_sleep_time += sleep_length;
}
+ /* Make sure that we have the correct xtime reference */
+ timespec_add_ns(&xtime, timekeeping_suspend_nsecs);
/* re-base the last cycle value */
clock->cycle_last = clocksource_read(clock);
clock->error = 0;
@@ -325,9 +330,12 @@ static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
{
unsigned long flags;
+ timekeeping_suspend_time = read_persistent_clock();
+
write_seqlock_irqsave(&xtime_lock, flags);
+ /* Get the current xtime offset */
+ timekeeping_suspend_nsecs = __get_nsec_offset();
timekeeping_suspended = 1;
- timekeeping_suspend_time = read_persistent_clock();
write_sequnlock_irqrestore(&xtime_lock, flags);
clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
diff --git a/kernel/user.c b/kernel/user.c
index e7d11cef6998..9ca2848fc356 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -55,25 +55,22 @@ struct user_struct root_user = {
/*
* These routines must be called with the uidhash spinlock held!
*/
-static inline void uid_hash_insert(struct user_struct *up, struct list_head *hashent)
+static inline void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
{
- list_add(&up->uidhash_list, hashent);
+ hlist_add_head(&up->uidhash_node, hashent);
}
static inline void uid_hash_remove(struct user_struct *up)
{
- list_del(&up->uidhash_list);
+ hlist_del_init(&up->uidhash_node);
}
-static inline struct user_struct *uid_hash_find(uid_t uid, struct list_head *hashent)
+static inline struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
{
- struct list_head *up;
-
- list_for_each(up, hashent) {
- struct user_struct *user;
-
- user = list_entry(up, struct user_struct, uidhash_list);
+ struct user_struct *user;
+ struct hlist_node *h;
+ hlist_for_each_entry(user, h, hashent, uidhash_node) {
if(user->uid == uid) {
atomic_inc(&user->__count);
return user;
@@ -122,7 +119,7 @@ void free_uid(struct user_struct *up)
struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
{
- struct list_head *hashent = uidhashentry(ns, uid);
+ struct hlist_head *hashent = uidhashentry(ns, uid);
struct user_struct *up;
spin_lock_irq(&uidhash_lock);
@@ -202,6 +199,30 @@ void switch_uid(struct user_struct *new_user)
suid_keys(current);
}
+void release_uids(struct user_namespace *ns)
+{
+ int i;
+ unsigned long flags;
+ struct hlist_head *head;
+ struct hlist_node *nd;
+
+ spin_lock_irqsave(&uidhash_lock, flags);
+ /*
+ * collapse the chains so that the user_struct-s will
+ * be still alive, but not in hashes. subsequent free_uid()
+ * will free them.
+ */
+ for (i = 0; i < UIDHASH_SZ; i++) {
+ head = ns->uidhash_table + i;
+ while (!hlist_empty(head)) {
+ nd = head->first;
+ hlist_del_init(nd);
+ }
+ }
+ spin_unlock_irqrestore(&uidhash_lock, flags);
+
+ free_uid(ns->root_user);
+}
static int __init uid_cache_init(void)
{
@@ -211,7 +232,7 @@ static int __init uid_cache_init(void)
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
for(n = 0; n < UIDHASH_SZ; ++n)
- INIT_LIST_HEAD(init_user_ns.uidhash_table + n);
+ INIT_HLIST_HEAD(init_user_ns.uidhash_table + n);
/* Insert the root user immediately (init already runs as root) */
spin_lock_irq(&uidhash_lock);
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 85af9422ea6e..7af90fc4f0fd 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -39,7 +39,7 @@ static struct user_namespace *clone_user_ns(struct user_namespace *old_ns)
kref_init(&ns->kref);
for (n = 0; n < UIDHASH_SZ; ++n)
- INIT_LIST_HEAD(ns->uidhash_table + n);
+ INIT_HLIST_HEAD(ns->uidhash_table + n);
/* Insert new root user. */
ns->root_user = alloc_uid(ns, 0);
@@ -81,7 +81,7 @@ void free_user_ns(struct kref *kref)
struct user_namespace *ns;
ns = container_of(kref, struct user_namespace, kref);
- free_uid(ns->root_user);
+ release_uids(ns);
kfree(ns);
}
diff --git a/kernel/utsname.c b/kernel/utsname.c
index 9d8180a0f0d8..816d7b24fa03 100644
--- a/kernel/utsname.c
+++ b/kernel/utsname.c
@@ -28,7 +28,9 @@ static struct uts_namespace *clone_uts_ns(struct uts_namespace *old_ns)
if (!ns)
return ERR_PTR(-ENOMEM);
+ down_read(&uts_sem);
memcpy(&ns->name, &old_ns->name, sizeof(ns->name));
+ up_read(&uts_sem);
kref_init(&ns->kref);
return ns;
}