summaryrefslogtreecommitdiff
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c35
1 files changed, 24 insertions, 11 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 800b4208dba9..ae975a7c9f69 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -371,6 +371,9 @@ static bool workqueue_freezing; /* PL: have wqs started freezing? */
/* PL&A: allowable cpus for unbound wqs and work items */
static cpumask_var_t wq_unbound_cpumask;
+/* for further constrain wq_unbound_cpumask by cmdline parameter*/
+static struct cpumask wq_cmdline_cpumask __initdata;
+
/* CPU where unbound work was last round robin scheduled from this CPU */
static DEFINE_PER_CPU(int, wq_rr_cpu_last);
@@ -2304,9 +2307,8 @@ static void idle_worker_timeout(struct timer_list *t)
static void idle_cull_fn(struct work_struct *work)
{
struct worker_pool *pool = container_of(work, struct worker_pool, idle_cull_work);
- struct list_head cull_list;
+ LIST_HEAD(cull_list);
- INIT_LIST_HEAD(&cull_list);
/*
* Grabbing wq_pool_attach_mutex here ensures an already-running worker
* cannot proceed beyong worker_detach_from_pool() in its self-destruct
@@ -3872,10 +3874,8 @@ static void rcu_free_pool(struct rcu_head *rcu)
static void put_unbound_pool(struct worker_pool *pool)
{
DECLARE_COMPLETION_ONSTACK(detach_completion);
- struct list_head cull_list;
struct worker *worker;
-
- INIT_LIST_HEAD(&cull_list);
+ LIST_HEAD(cull_list);
lockdep_assert_held(&wq_pool_mutex);
@@ -6458,6 +6458,9 @@ void __init workqueue_init_early(void)
cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(HK_TYPE_WQ));
cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, housekeeping_cpumask(HK_TYPE_DOMAIN));
+ if (!cpumask_empty(&wq_cmdline_cpumask))
+ cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, &wq_cmdline_cpumask);
+
pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
/* initialize CPU pools */
@@ -6612,10 +6615,20 @@ void __init workqueue_init(void)
wq_watchdog_init();
}
-/*
- * Despite the naming, this is a no-op function which is here only for avoiding
- * link error. Since compile-time warning may fail to catch, we will need to
- * emit run-time warning from __flush_workqueue().
- */
-void __warn_flushing_systemwide_wq(void) { }
+void __warn_flushing_systemwide_wq(void)
+{
+ pr_warn("WARNING: Flushing system-wide workqueues will be prohibited in near future.\n");
+ dump_stack();
+}
EXPORT_SYMBOL(__warn_flushing_systemwide_wq);
+
+static int __init workqueue_unbound_cpus_setup(char *str)
+{
+ if (cpulist_parse(str, &wq_cmdline_cpumask) < 0) {
+ cpumask_clear(&wq_cmdline_cpumask);
+ pr_warn("workqueue.unbound_cpus: incorrect CPU range, using default\n");
+ }
+
+ return 1;
+}
+__setup("workqueue.unbound_cpus=", workqueue_unbound_cpus_setup);