summaryrefslogtreecommitdiff
path: root/kernel/exit.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/exit.c')
-rw-r--r--kernel/exit.c180
1 files changed, 125 insertions, 55 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 073005b1cfb..d3ad54677f9 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -52,6 +52,11 @@
static void exit_mm(struct task_struct * tsk);
+static inline int task_detached(struct task_struct *p)
+{
+ return p->exit_signal == -1;
+}
+
static void __unhash_process(struct task_struct *p)
{
nr_threads--;
@@ -160,7 +165,7 @@ repeat:
zap_leader = 0;
leader = p->group_leader;
if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
- BUG_ON(leader->exit_signal == -1);
+ BUG_ON(task_detached(leader));
do_notify_parent(leader, leader->exit_signal);
/*
* If we were the last child thread and the leader has
@@ -170,7 +175,7 @@ repeat:
* do_notify_parent() will have marked it self-reaping in
* that case.
*/
- zap_leader = (leader->exit_signal == -1);
+ zap_leader = task_detached(leader);
}
write_unlock_irq(&tasklist_lock);
@@ -329,13 +334,11 @@ void __set_special_pids(struct pid *pid)
pid_t nr = pid_nr(pid);
if (task_session(curr) != pid) {
- detach_pid(curr, PIDTYPE_SID);
- attach_pid(curr, PIDTYPE_SID, pid);
+ change_pid(curr, PIDTYPE_SID, pid);
set_task_session(curr, nr);
}
if (task_pgrp(curr) != pid) {
- detach_pid(curr, PIDTYPE_PGID);
- attach_pid(curr, PIDTYPE_PGID, pid);
+ change_pid(curr, PIDTYPE_PGID, pid);
set_task_pgrp(curr, nr);
}
}
@@ -507,10 +510,9 @@ void put_files_struct(struct files_struct *files)
}
}
-EXPORT_SYMBOL(put_files_struct);
-
-void reset_files_struct(struct task_struct *tsk, struct files_struct *files)
+void reset_files_struct(struct files_struct *files)
{
+ struct task_struct *tsk = current;
struct files_struct *old;
old = tsk->files;
@@ -519,9 +521,8 @@ void reset_files_struct(struct task_struct *tsk, struct files_struct *files)
task_unlock(tsk);
put_files_struct(old);
}
-EXPORT_SYMBOL(reset_files_struct);
-static void __exit_files(struct task_struct *tsk)
+void exit_files(struct task_struct *tsk)
{
struct files_struct * files = tsk->files;
@@ -533,12 +534,7 @@ static void __exit_files(struct task_struct *tsk)
}
}
-void exit_files(struct task_struct *tsk)
-{
- __exit_files(tsk);
-}
-
-static void __put_fs_struct(struct fs_struct *fs)
+void put_fs_struct(struct fs_struct *fs)
{
/* No need to hold fs->lock if we are killing it */
if (atomic_dec_and_test(&fs->count)) {
@@ -550,12 +546,7 @@ static void __put_fs_struct(struct fs_struct *fs)
}
}
-void put_fs_struct(struct fs_struct *fs)
-{
- __put_fs_struct(fs);
-}
-
-static void __exit_fs(struct task_struct *tsk)
+void exit_fs(struct task_struct *tsk)
{
struct fs_struct * fs = tsk->fs;
@@ -563,16 +554,93 @@ static void __exit_fs(struct task_struct *tsk)
task_lock(tsk);
tsk->fs = NULL;
task_unlock(tsk);
- __put_fs_struct(fs);
+ put_fs_struct(fs);
}
}
-void exit_fs(struct task_struct *tsk)
+EXPORT_SYMBOL_GPL(exit_fs);
+
+#ifdef CONFIG_MM_OWNER
+/*
+ * Task p is exiting and it owned mm, lets find a new owner for it
+ */
+static inline int
+mm_need_new_owner(struct mm_struct *mm, struct task_struct *p)
{
- __exit_fs(tsk);
+ /*
+ * If there are other users of the mm and the owner (us) is exiting
+ * we need to find a new owner to take on the responsibility.
+ */
+ if (!mm)
+ return 0;
+ if (atomic_read(&mm->mm_users) <= 1)
+ return 0;
+ if (mm->owner != p)
+ return 0;
+ return 1;
}
-EXPORT_SYMBOL_GPL(exit_fs);
+void mm_update_next_owner(struct mm_struct *mm)
+{
+ struct task_struct *c, *g, *p = current;
+
+retry:
+ if (!mm_need_new_owner(mm, p))
+ return;
+
+ read_lock(&tasklist_lock);
+ /*
+ * Search in the children
+ */
+ list_for_each_entry(c, &p->children, sibling) {
+ if (c->mm == mm)
+ goto assign_new_owner;
+ }
+
+ /*
+ * Search in the siblings
+ */
+ list_for_each_entry(c, &p->parent->children, sibling) {
+ if (c->mm == mm)
+ goto assign_new_owner;
+ }
+
+ /*
+ * Search through everything else. We should not get
+ * here often
+ */
+ do_each_thread(g, c) {
+ if (c->mm == mm)
+ goto assign_new_owner;
+ } while_each_thread(g, c);
+
+ read_unlock(&tasklist_lock);
+ return;
+
+assign_new_owner:
+ BUG_ON(c == p);
+ get_task_struct(c);
+ /*
+ * The task_lock protects c->mm from changing.
+ * We always want mm->owner->mm == mm
+ */
+ task_lock(c);
+ /*
+ * Delay read_unlock() till we have the task_lock()
+ * to ensure that c does not slip away underneath us
+ */
+ read_unlock(&tasklist_lock);
+ if (c->mm != mm) {
+ task_unlock(c);
+ put_task_struct(c);
+ goto retry;
+ }
+ cgroup_mm_owner_callbacks(mm->owner, c);
+ mm->owner = c;
+ task_unlock(c);
+ put_task_struct(c);
+}
+#endif /* CONFIG_MM_OWNER */
/*
* Turn us into a lazy TLB process if we
@@ -613,6 +681,7 @@ static void exit_mm(struct task_struct * tsk)
/* We don't want this task to be frozen prematurely */
clear_freeze_flag(tsk);
task_unlock(tsk);
+ mm_update_next_owner(mm);
mmput(mm);
}
@@ -627,7 +696,7 @@ reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
if (unlikely(traced)) {
/* Preserve ptrace links if someone else is tracing this child. */
list_del_init(&p->ptrace_list);
- if (p->parent != p->real_parent)
+ if (ptrace_reparented(p))
list_add(&p->ptrace_list, &p->real_parent->ptrace_children);
} else {
/* If this child is being traced, then we're the one tracing it
@@ -651,18 +720,18 @@ reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
/* If this is a threaded reparent there is no need to
* notify anyone anything has happened.
*/
- if (p->real_parent->group_leader == father->group_leader)
+ if (same_thread_group(p->real_parent, father))
return;
/* We don't want people slaying init. */
- if (p->exit_signal != -1)
+ if (!task_detached(p))
p->exit_signal = SIGCHLD;
/* If we'd notified the old parent about this child's death,
* also notify the new parent.
*/
if (!traced && p->exit_state == EXIT_ZOMBIE &&
- p->exit_signal != -1 && thread_group_empty(p))
+ !task_detached(p) && thread_group_empty(p))
do_notify_parent(p, p->exit_signal);
kill_orphaned_pgrp(p, father);
@@ -715,18 +784,18 @@ static void forget_original_parent(struct task_struct *father)
} else {
/* reparent ptraced task to its real parent */
__ptrace_unlink (p);
- if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 &&
+ if (p->exit_state == EXIT_ZOMBIE && !task_detached(p) &&
thread_group_empty(p))
do_notify_parent(p, p->exit_signal);
}
/*
- * if the ptraced child is a zombie with exit_signal == -1
- * we must collect it before we exit, or it will remain
- * zombie forever since we prevented it from self-reap itself
- * while it was being traced by us, to be able to see it in wait4.
+ * if the ptraced child is a detached zombie we must collect
+ * it before we exit, or it will remain zombie forever since
+ * we prevented it from self-reap itself while it was being
+ * traced by us, to be able to see it in wait4.
*/
- if (unlikely(ptrace && p->exit_state == EXIT_ZOMBIE && p->exit_signal == -1))
+ if (unlikely(ptrace && p->exit_state == EXIT_ZOMBIE && task_detached(p)))
list_add(&p->ptrace_list, &ptrace_dead);
}
@@ -783,29 +852,30 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
* we have changed execution domain as these two values started
* the same after a fork.
*/
- if (tsk->exit_signal != SIGCHLD && tsk->exit_signal != -1 &&
+ if (tsk->exit_signal != SIGCHLD && !task_detached(tsk) &&
(tsk->parent_exec_id != tsk->real_parent->self_exec_id ||
- tsk->self_exec_id != tsk->parent_exec_id)
- && !capable(CAP_KILL))
+ tsk->self_exec_id != tsk->parent_exec_id) &&
+ !capable(CAP_KILL))
tsk->exit_signal = SIGCHLD;
-
/* If something other than our normal parent is ptracing us, then
* send it a SIGCHLD instead of honoring exit_signal. exit_signal
* only has special meaning to our real parent.
*/
- if (tsk->exit_signal != -1 && thread_group_empty(tsk)) {
- int signal = tsk->parent == tsk->real_parent ? tsk->exit_signal : SIGCHLD;
+ if (!task_detached(tsk) && thread_group_empty(tsk)) {
+ int signal = ptrace_reparented(tsk) ?
+ SIGCHLD : tsk->exit_signal;
do_notify_parent(tsk, signal);
} else if (tsk->ptrace) {
do_notify_parent(tsk, SIGCHLD);
}
state = EXIT_ZOMBIE;
- if (tsk->exit_signal == -1 && likely(!tsk->ptrace))
+ if (task_detached(tsk) && likely(!tsk->ptrace))
state = EXIT_DEAD;
tsk->exit_state = state;
+ /* mt-exec, de_thread() is waiting for us */
if (thread_group_leader(tsk) &&
tsk->signal->notify_count < 0 &&
tsk->signal->group_exit_task)
@@ -967,8 +1037,8 @@ NORET_TYPE void do_exit(long code)
if (group_dead)
acct_process();
exit_sem(tsk);
- __exit_files(tsk);
- __exit_fs(tsk);
+ exit_files(tsk);
+ exit_fs(tsk);
check_stack_usage();
exit_thread();
cgroup_exit(tsk, 1);
@@ -984,7 +1054,7 @@ NORET_TYPE void do_exit(long code)
proc_exit_connector(tsk);
exit_notify(tsk, group_dead);
#ifdef CONFIG_NUMA
- mpol_free(tsk->mempolicy);
+ mpol_put(tsk->mempolicy);
tsk->mempolicy = NULL;
#endif
#ifdef CONFIG_FUTEX
@@ -1049,12 +1119,13 @@ asmlinkage long sys_exit(int error_code)
NORET_TYPE void
do_group_exit(int exit_code)
{
+ struct signal_struct *sig = current->signal;
+
BUG_ON(exit_code & 0x80); /* core dumps don't get here */
- if (current->signal->flags & SIGNAL_GROUP_EXIT)
- exit_code = current->signal->group_exit_code;
+ if (signal_group_exit(sig))
+ exit_code = sig->group_exit_code;
else if (!thread_group_empty(current)) {
- struct signal_struct *const sig = current->signal;
struct sighand_struct *const sighand = current->sighand;
spin_lock_irq(&sighand->siglock);
if (signal_group_exit(sig))
@@ -1106,7 +1177,7 @@ static int eligible_child(enum pid_type type, struct pid *pid, int options,
* Do not consider detached threads that are
* not ptraced:
*/
- if (p->exit_signal == -1 && !p->ptrace)
+ if (task_detached(p) && !p->ptrace)
return 0;
/* Wait for all children (clone and not) if __WALL is set;
@@ -1196,8 +1267,7 @@ static int wait_task_zombie(struct task_struct *p, int noreap,
return 0;
}
- /* traced means p->ptrace, but not vice versa */
- traced = (p->real_parent != p->parent);
+ traced = ptrace_reparented(p);
if (likely(!traced)) {
struct signal_struct *psig;
@@ -1298,9 +1368,9 @@ static int wait_task_zombie(struct task_struct *p, int noreap,
* If it's still not detached after that, don't release
* it now.
*/
- if (p->exit_signal != -1) {
+ if (!task_detached(p)) {
do_notify_parent(p, p->exit_signal);
- if (p->exit_signal != -1) {
+ if (!task_detached(p)) {
p->exit_state = EXIT_ZOMBIE;
p = NULL;
}