summaryrefslogtreecommitdiff
path: root/kernel/watchdog.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/watchdog.c')
-rw-r--r--kernel/watchdog.c27
1 files changed, 19 insertions, 8 deletions
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 99afb88d2e85..8e61f21e7e33 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -57,7 +57,7 @@ int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
* Should we panic when a soft-lockup or hard-lockup occurs:
*/
unsigned int __read_mostly hardlockup_panic =
- CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
+ IS_ENABLED(CONFIG_BOOTPARAM_HARDLOCKUP_PANIC);
/*
* We may not want to enable hard lockup detection by default in all cases,
* for example when running the kernel as a guest on a hypervisor. In these
@@ -168,7 +168,7 @@ static struct cpumask watchdog_allowed_mask __read_mostly;
/* Global variables, exported for sysctl */
unsigned int __read_mostly softlockup_panic =
- CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
+ IS_ENABLED(CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC);
static bool softlockup_initialized __read_mostly;
static u64 __read_mostly sample_period;
@@ -537,7 +537,7 @@ int lockup_detector_offline_cpu(unsigned int cpu)
return 0;
}
-static void lockup_detector_reconfigure(void)
+static void __lockup_detector_reconfigure(void)
{
cpus_read_lock();
watchdog_nmi_stop();
@@ -557,6 +557,13 @@ static void lockup_detector_reconfigure(void)
__lockup_detector_cleanup();
}
+void lockup_detector_reconfigure(void)
+{
+ mutex_lock(&watchdog_mutex);
+ __lockup_detector_reconfigure();
+ mutex_unlock(&watchdog_mutex);
+}
+
/*
* Create the watchdog infrastructure and configure the detector(s).
*/
@@ -573,13 +580,13 @@ static __init void lockup_detector_setup(void)
return;
mutex_lock(&watchdog_mutex);
- lockup_detector_reconfigure();
+ __lockup_detector_reconfigure();
softlockup_initialized = true;
mutex_unlock(&watchdog_mutex);
}
#else /* CONFIG_SOFTLOCKUP_DETECTOR */
-static void lockup_detector_reconfigure(void)
+static void __lockup_detector_reconfigure(void)
{
cpus_read_lock();
watchdog_nmi_stop();
@@ -587,9 +594,13 @@ static void lockup_detector_reconfigure(void)
watchdog_nmi_start();
cpus_read_unlock();
}
+void lockup_detector_reconfigure(void)
+{
+ __lockup_detector_reconfigure();
+}
static inline void lockup_detector_setup(void)
{
- lockup_detector_reconfigure();
+ __lockup_detector_reconfigure();
}
#endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
@@ -629,7 +640,7 @@ static void proc_watchdog_update(void)
{
/* Remove impossible cpus to keep sysctl output clean. */
cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
- lockup_detector_reconfigure();
+ __lockup_detector_reconfigure();
}
/*
@@ -848,7 +859,7 @@ void __init lockup_detector_init(void)
pr_info("Disabling watchdog on nohz_full cores by default\n");
cpumask_copy(&watchdog_cpumask,
- housekeeping_cpumask(HK_FLAG_TIMER));
+ housekeeping_cpumask(HK_TYPE_TIMER));
if (!watchdog_nmi_probe())
nmi_watchdog_available = true;