summaryrefslogtreecommitdiffstats
path: root/kernel/kthread.c
diff options
context:
space:
mode:
authorOleg Nesterov2016-11-29 18:50:57 +0100
committerThomas Gleixner2016-12-08 14:36:18 +0100
commit1da5c46fa965ff90f5ffc080b6ab3fae5e227bc3 (patch)
tree907dd44b43d3cf5f49be920c46a5592364bcb1e8 /kernel/kthread.c
parentx86/uaccess, sched/preempt: Verify access_ok() context (diff)
downloadkernel-qcow2-linux-1da5c46fa965ff90f5ffc080b6ab3fae5e227bc3.tar.gz
kernel-qcow2-linux-1da5c46fa965ff90f5ffc080b6ab3fae5e227bc3.tar.xz
kernel-qcow2-linux-1da5c46fa965ff90f5ffc080b6ab3fae5e227bc3.zip
kthread: Make struct kthread kmalloc'ed
commit 23196f2e5f5d "kthread: Pin the stack via try_get_task_stack() / put_task_stack() in to_live_kthread() function" is a workaround for the fragile design of struct kthread being allocated on the task stack. struct kthread in its current form should be removed, but this needs cleanups outside of kthread.c. As a first step move struct kthread away from the task stack by making it kmalloc'ed. This allows to access kthread.exited without the magic of trying to pin task stack and the try logic in to_live_kthread(). Signed-off-by: Oleg Nesterov <oleg@redhat.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: Chunming Zhou <David1.Zhou@amd.com> Cc: Roman Pen <roman.penyaev@profitbricks.com> Cc: Petr Mladek <pmladek@suse.com> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Tejun Heo <tj@kernel.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Alex Deucher <alexander.deucher@amd.com> Cc: Andrew Morton <akpm@linux-foundation.org> Link: http://lkml.kernel.org/r/20161129175057.GA5330@redhat.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/kthread.c')
-rw-r--r--kernel/kthread.c58
1 files changed, 45 insertions, 13 deletions
diff --git a/kernel/kthread.c b/kernel/kthread.c
index be2cc1f9dd57..9d64b6526d0b 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -53,14 +53,38 @@ enum KTHREAD_BITS {
KTHREAD_IS_PARKED,
};
-#define __to_kthread(vfork) \
- container_of(vfork, struct kthread, exited)
+static inline void set_kthread_struct(void *kthread)
+{
+ /*
+ * We abuse ->set_child_tid to avoid the new member and because it
+ * can't be wrongly copied by copy_process(). We also rely on fact
+ * that the caller can't exec, so PF_KTHREAD can't be cleared.
+ */
+ current->set_child_tid = (__force void __user *)kthread;
+}
static inline struct kthread *to_kthread(struct task_struct *k)
{
- return __to_kthread(k->vfork_done);
+ WARN_ON(!(k->flags & PF_KTHREAD));
+ return (__force void *)k->set_child_tid;
+}
+
+void free_kthread_struct(struct task_struct *k)
+{
+ /*
+ * Can be NULL if this kthread was created by kernel_thread()
+ * or if kmalloc() in kthread() failed.
+ */
+ kfree(to_kthread(k));
}
+#define __to_kthread(vfork) \
+ container_of(vfork, struct kthread, exited)
+
+/*
+ * TODO: kill it and use to_kthread(). But we still need the users
+ * like kthread_stop() which has to sync with the exiting kthread.
+ */
static struct kthread *to_live_kthread(struct task_struct *k)
{
struct completion *vfork = ACCESS_ONCE(k->vfork_done);
@@ -181,14 +205,11 @@ static int kthread(void *_create)
int (*threadfn)(void *data) = create->threadfn;
void *data = create->data;
struct completion *done;
- struct kthread self;
+ struct kthread *self;
int ret;
- self.flags = 0;
- self.data = data;
- init_completion(&self.exited);
- init_completion(&self.parked);
- current->vfork_done = &self.exited;
+ self = kmalloc(sizeof(*self), GFP_KERNEL);
+ set_kthread_struct(self);
/* If user was SIGKILLed, I release the structure. */
done = xchg(&create->done, NULL);
@@ -196,6 +217,19 @@ static int kthread(void *_create)
kfree(create);
do_exit(-EINTR);
}
+
+ if (!self) {
+ create->result = ERR_PTR(-ENOMEM);
+ complete(done);
+ do_exit(-ENOMEM);
+ }
+
+ self->flags = 0;
+ self->data = data;
+ init_completion(&self->exited);
+ init_completion(&self->parked);
+ current->vfork_done = &self->exited;
+
/* OK, tell user we're spawned, wait for stop or wakeup */
__set_current_state(TASK_UNINTERRUPTIBLE);
create->result = current;
@@ -203,12 +237,10 @@ static int kthread(void *_create)
schedule();
ret = -EINTR;
-
- if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) {
- __kthread_parkme(&self);
+ if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
+ __kthread_parkme(self);
ret = threadfn(data);
}
- /* we can't just return, we must preserve "self" on stack */
do_exit(ret);
}