summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMartin K. Petersen <martin.petersen@oracle.com>2024-12-04 13:13:00 -0500
committerMartin K. Petersen <martin.petersen@oracle.com>2024-12-04 13:13:00 -0500
commit91e11129b9894ef709f3faf13a41ea60d46e7a44 (patch)
treecb0ea915789f360db6f0db2afefa2012fde7c080
parentcdb03e598750e7ebc222571aa96653e9b5a59dbe (diff)
parentbe769e5cf53b8a45eedcc7354bacf939ae16f72c (diff)
Merge patch series "Untie the host lock entanglement - part 2"
Avri Altman <avri.altman@wdc.com> says: Here is the 2nd part in the sequel, watering down the scsi host lock usage in the ufs driver. This work is motivated by a comment made by Bart [1], of the abuse of the scsi host lock in the ufs driver. Its Precursor [2] removed the host lock around some of the host register accesses. This part replaces the scsi host lock by dedicated locks serializing access to the clock gating and clock scaling members. Changes compared to v4: - split patch 1 into 2 parts (Bart) - use scoped_guard() for the host_lock as well (Bart) - remove irrelevant comment and use lockdep_assert_held instead (Bart) - improve @lock documentation (Bart) Changes compared to v3: - Keep the host lock when checking ufshcd_state (Bean) Changes compared to v2: - Use clang-format to fix formating (Bart) - Use guard() in ufshcd_clkgate_enable_store (Bart) - Elaborate commit log (Bart) Changes compared to v1: - use the guard() & scoped_guard() macros (Bart) - re-order struct ufs_clk_scaling and struct ufs_clk_gating (Bart) [1] https://lore.kernel.org/linux-scsi/0b031b8f-c07c-42ef-af93-7336439d3c37@acm.org/ [2] https://lore.kernel.org/linux-scsi/20241024075033.562562-1-avri.altman@wdc.com/ Link: https://lore.kernel.org/r/20241124070808.194860-1-avri.altman@wdc.com Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
-rw-r--r--drivers/ufs/core/ufshcd.c253
-rw-r--r--include/ufs/ufshcd.h25
2 files changed, 140 insertions, 138 deletions
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 6a2685333076..bd2cc2fdb5ba 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -258,10 +258,15 @@ ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
return UFS_PM_LVL_0;
}
+static bool ufshcd_has_pending_tasks(struct ufs_hba *hba)
+{
+ return hba->outstanding_tasks || hba->active_uic_cmd ||
+ hba->uic_async_done;
+}
+
static bool ufshcd_is_ufs_dev_busy(struct ufs_hba *hba)
{
- return (hba->clk_gating.active_reqs || hba->outstanding_reqs || hba->outstanding_tasks ||
- hba->active_uic_cmd || hba->uic_async_done);
+ return hba->outstanding_reqs || ufshcd_has_pending_tasks(hba);
}
static const struct ufs_dev_quirk ufs_fixups[] = {
@@ -1447,16 +1452,16 @@ static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
{
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_scaling.suspend_work);
- unsigned long irq_flags;
- spin_lock_irqsave(hba->host->host_lock, irq_flags);
- if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- return;
+ scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+ {
+ if (hba->clk_scaling.active_reqs ||
+ hba->clk_scaling.is_suspended)
+ return;
+
+ hba->clk_scaling.is_suspended = true;
+ hba->clk_scaling.window_start_t = 0;
}
- hba->clk_scaling.is_suspended = true;
- hba->clk_scaling.window_start_t = 0;
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
devfreq_suspend_device(hba->devfreq);
}
@@ -1465,15 +1470,13 @@ static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
{
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_scaling.resume_work);
- unsigned long irq_flags;
- spin_lock_irqsave(hba->host->host_lock, irq_flags);
- if (!hba->clk_scaling.is_suspended) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- return;
+ scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+ {
+ if (!hba->clk_scaling.is_suspended)
+ return;
+ hba->clk_scaling.is_suspended = false;
}
- hba->clk_scaling.is_suspended = false;
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
devfreq_resume_device(hba->devfreq);
}
@@ -1487,7 +1490,6 @@ static int ufshcd_devfreq_target(struct device *dev,
bool scale_up = false, sched_clk_scaling_suspend_work = false;
struct list_head *clk_list = &hba->clk_list_head;
struct ufs_clk_info *clki;
- unsigned long irq_flags;
if (!ufshcd_is_clkscaling_supported(hba))
return -EINVAL;
@@ -1508,43 +1510,38 @@ static int ufshcd_devfreq_target(struct device *dev,
*freq = (unsigned long) clk_round_rate(clki->clk, *freq);
}
- spin_lock_irqsave(hba->host->host_lock, irq_flags);
- if (ufshcd_eh_in_progress(hba)) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- return 0;
- }
+ scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+ {
+ if (ufshcd_eh_in_progress(hba))
+ return 0;
- /* Skip scaling clock when clock scaling is suspended */
- if (hba->clk_scaling.is_suspended) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- dev_warn(hba->dev, "clock scaling is suspended, skip");
- return 0;
- }
+ /* Skip scaling clock when clock scaling is suspended */
+ if (hba->clk_scaling.is_suspended) {
+ dev_warn(hba->dev, "clock scaling is suspended, skip");
+ return 0;
+ }
- if (!hba->clk_scaling.active_reqs)
- sched_clk_scaling_suspend_work = true;
+ if (!hba->clk_scaling.active_reqs)
+ sched_clk_scaling_suspend_work = true;
- if (list_empty(clk_list)) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- goto out;
- }
+ if (list_empty(clk_list))
+ goto out;
- /* Decide based on the target or rounded-off frequency and update */
- if (hba->use_pm_opp)
- scale_up = *freq > hba->clk_scaling.target_freq;
- else
- scale_up = *freq == clki->max_freq;
+ /* Decide based on the target or rounded-off frequency and update */
+ if (hba->use_pm_opp)
+ scale_up = *freq > hba->clk_scaling.target_freq;
+ else
+ scale_up = *freq == clki->max_freq;
- if (!hba->use_pm_opp && !scale_up)
- *freq = clki->min_freq;
+ if (!hba->use_pm_opp && !scale_up)
+ *freq = clki->min_freq;
- /* Update the frequency */
- if (!ufshcd_is_devfreq_scaling_required(hba, *freq, scale_up)) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- ret = 0;
- goto out; /* no state change required */
+ /* Update the frequency */
+ if (!ufshcd_is_devfreq_scaling_required(hba, *freq, scale_up)) {
+ ret = 0;
+ goto out; /* no state change required */
+ }
}
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
start = ktime_get();
ret = ufshcd_devfreq_scale(hba, *freq, scale_up);
@@ -1569,7 +1566,6 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev,
{
struct ufs_hba *hba = dev_get_drvdata(dev);
struct ufs_clk_scaling *scaling = &hba->clk_scaling;
- unsigned long flags;
ktime_t curr_t;
if (!ufshcd_is_clkscaling_supported(hba))
@@ -1577,7 +1573,8 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev,
memset(stat, 0, sizeof(*stat));
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_scaling.lock);
+
curr_t = ktime_get();
if (!scaling->window_start_t)
goto start_window;
@@ -1613,7 +1610,7 @@ start_window:
scaling->busy_start_t = 0;
scaling->is_busy_started = false;
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+
return 0;
}
@@ -1677,19 +1674,19 @@ static void ufshcd_devfreq_remove(struct ufs_hba *hba)
static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
{
- unsigned long flags;
bool suspend = false;
cancel_work_sync(&hba->clk_scaling.suspend_work);
cancel_work_sync(&hba->clk_scaling.resume_work);
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (!hba->clk_scaling.is_suspended) {
- suspend = true;
- hba->clk_scaling.is_suspended = true;
- hba->clk_scaling.window_start_t = 0;
+ scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+ {
+ if (!hba->clk_scaling.is_suspended) {
+ suspend = true;
+ hba->clk_scaling.is_suspended = true;
+ hba->clk_scaling.window_start_t = 0;
+ }
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
if (suspend)
devfreq_suspend_device(hba->devfreq);
@@ -1697,15 +1694,15 @@ static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
{
- unsigned long flags;
bool resume = false;
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->clk_scaling.is_suspended) {
- resume = true;
- hba->clk_scaling.is_suspended = false;
+ scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+ {
+ if (hba->clk_scaling.is_suspended) {
+ resume = true;
+ hba->clk_scaling.is_suspended = false;
+ }
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
if (resume)
devfreq_resume_device(hba->devfreq);
@@ -1791,6 +1788,8 @@ static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
INIT_WORK(&hba->clk_scaling.resume_work,
ufshcd_clk_scaling_resume_work);
+ spin_lock_init(&hba->clk_scaling.lock);
+
hba->clk_scaling.workq = alloc_ordered_workqueue(
"ufs_clkscaling_%d", WQ_MEM_RECLAIM, hba->host->host_no);
@@ -1811,19 +1810,16 @@ static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
static void ufshcd_ungate_work(struct work_struct *work)
{
int ret;
- unsigned long flags;
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_gating.ungate_work);
cancel_delayed_work_sync(&hba->clk_gating.gate_work);
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->clk_gating.state == CLKS_ON) {
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- return;
+ scoped_guard(spinlock_irqsave, &hba->clk_gating.lock) {
+ if (hba->clk_gating.state == CLKS_ON)
+ return;
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_hba_vreg_set_hpm(hba);
ufshcd_setup_clocks(hba, true);
@@ -1858,7 +1854,7 @@ void ufshcd_hold(struct ufs_hba *hba)
if (!ufshcd_is_clkgating_allowed(hba) ||
!hba->clk_gating.is_initialized)
return;
- spin_lock_irqsave(hba->host->host_lock, flags);
+ spin_lock_irqsave(&hba->clk_gating.lock, flags);
hba->clk_gating.active_reqs++;
start:
@@ -1874,11 +1870,11 @@ start:
*/
if (ufshcd_can_hibern8_during_gating(hba) &&
ufshcd_is_link_hibern8(hba)) {
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
flush_result = flush_work(&hba->clk_gating.ungate_work);
if (hba->clk_gating.is_suspended && !flush_result)
return;
- spin_lock_irqsave(hba->host->host_lock, flags);
+ spin_lock_irqsave(&hba->clk_gating.lock, flags);
goto start;
}
break;
@@ -1907,17 +1903,17 @@ start:
*/
fallthrough;
case REQ_CLKS_ON:
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
flush_work(&hba->clk_gating.ungate_work);
/* Make sure state is CLKS_ON before returning */
- spin_lock_irqsave(hba->host->host_lock, flags);
+ spin_lock_irqsave(&hba->clk_gating.lock, flags);
goto start;
default:
dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
__func__, hba->clk_gating.state);
break;
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
}
EXPORT_SYMBOL_GPL(ufshcd_hold);
@@ -1925,28 +1921,32 @@ static void ufshcd_gate_work(struct work_struct *work)
{
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_gating.gate_work.work);
- unsigned long flags;
int ret;
- spin_lock_irqsave(hba->host->host_lock, flags);
- /*
- * In case you are here to cancel this work the gating state
- * would be marked as REQ_CLKS_ON. In this case save time by
- * skipping the gating work and exit after changing the clock
- * state to CLKS_ON.
- */
- if (hba->clk_gating.is_suspended ||
- (hba->clk_gating.state != REQ_CLKS_OFF)) {
- hba->clk_gating.state = CLKS_ON;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
- hba->clk_gating.state);
- goto rel_lock;
- }
+ scoped_guard(spinlock_irqsave, &hba->clk_gating.lock) {
+ /*
+ * In case you are here to cancel this work the gating state
+ * would be marked as REQ_CLKS_ON. In this case save time by
+ * skipping the gating work and exit after changing the clock
+ * state to CLKS_ON.
+ */
+ if (hba->clk_gating.is_suspended ||
+ hba->clk_gating.state != REQ_CLKS_OFF) {
+ hba->clk_gating.state = CLKS_ON;
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
+ return;
+ }
- if (ufshcd_is_ufs_dev_busy(hba) || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
- goto rel_lock;
+ if (hba->clk_gating.active_reqs)
+ return;
+ }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ scoped_guard(spinlock_irqsave, hba->host->host_lock) {
+ if (ufshcd_is_ufs_dev_busy(hba) ||
+ hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
+ return;
+ }
/* put the link into hibern8 mode before turning off clocks */
if (ufshcd_can_hibern8_during_gating(hba)) {
@@ -1957,7 +1957,7 @@ static void ufshcd_gate_work(struct work_struct *work)
__func__, ret);
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
- goto out;
+ return;
}
ufshcd_set_link_hibern8(hba);
}
@@ -1977,33 +1977,34 @@ static void ufshcd_gate_work(struct work_struct *work)
* prevent from doing cancel work multiple times when there are
* new requests arriving before the current cancel work is done.
*/
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_gating.lock);
if (hba->clk_gating.state == REQ_CLKS_OFF) {
hba->clk_gating.state = CLKS_OFF;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
}
-rel_lock:
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-out:
- return;
}
-/* host lock must be held before calling this variant */
static void __ufshcd_release(struct ufs_hba *hba)
{
+ lockdep_assert_held(&hba->clk_gating.lock);
+
if (!ufshcd_is_clkgating_allowed(hba))
return;
hba->clk_gating.active_reqs--;
if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
- hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
- hba->outstanding_tasks || !hba->clk_gating.is_initialized ||
- hba->active_uic_cmd || hba->uic_async_done ||
+ !hba->clk_gating.is_initialized ||
hba->clk_gating.state == CLKS_OFF)
return;
+ scoped_guard(spinlock_irqsave, hba->host->host_lock) {
+ if (ufshcd_has_pending_tasks(hba) ||
+ hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
+ return;
+ }
+
hba->clk_gating.state = REQ_CLKS_OFF;
trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
queue_delayed_work(hba->clk_gating.clk_gating_workq,
@@ -2013,11 +2014,8 @@ static void __ufshcd_release(struct ufs_hba *hba)
void ufshcd_release(struct ufs_hba *hba)
{
- unsigned long flags;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_gating.lock);
__ufshcd_release(hba);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
}
EXPORT_SYMBOL_GPL(ufshcd_release);
@@ -2032,11 +2030,9 @@ static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
- unsigned long flags;
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_gating.lock);
hba->clk_gating.delay_ms = value;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
}
EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set);
@@ -2064,7 +2060,6 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
- unsigned long flags;
u32 value;
if (kstrtou32(buf, 0, &value))
@@ -2072,9 +2067,10 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
value = !!value;
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_gating.lock);
+
if (value == hba->clk_gating.is_enabled)
- goto out;
+ return count;
if (value)
__ufshcd_release(hba);
@@ -2082,8 +2078,7 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
hba->clk_gating.active_reqs++;
hba->clk_gating.is_enabled = value;
-out:
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+
return count;
}
@@ -2125,6 +2120,8 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba)
INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
+ spin_lock_init(&hba->clk_gating.lock);
+
hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(
"ufs_clk_gating_%d", WQ_MEM_RECLAIM | WQ_HIGHPRI,
hba->host->host_no);
@@ -2154,19 +2151,17 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
{
bool queue_resume_work = false;
ktime_t curr_t = ktime_get();
- unsigned long flags;
if (!ufshcd_is_clkscaling_supported(hba))
return;
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_scaling.lock);
+
if (!hba->clk_scaling.active_reqs++)
queue_resume_work = true;
- if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) {
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress)
return;
- }
if (queue_resume_work)
queue_work(hba->clk_scaling.workq,
@@ -2182,18 +2177,17 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
hba->clk_scaling.busy_start_t = curr_t;
hba->clk_scaling.is_busy_started = true;
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
}
static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
{
struct ufs_clk_scaling *scaling = &hba->clk_scaling;
- unsigned long flags;
if (!ufshcd_is_clkscaling_supported(hba))
return;
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_scaling.lock);
+
hba->clk_scaling.active_reqs--;
if (!scaling->active_reqs && scaling->is_busy_started) {
scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
@@ -2201,7 +2195,6 @@ static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
scaling->busy_start_t = 0;
scaling->is_busy_started = false;
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
}
static inline int ufshcd_monitor_opcode2dir(u8 opcode)
@@ -8221,7 +8214,9 @@ static void ufshcd_rtc_work(struct work_struct *work)
hba = container_of(to_delayed_work(work), struct ufs_hba, ufs_rtc_update_work);
/* Update RTC only when there are no requests in progress and UFSHCI is operational */
- if (!ufshcd_is_ufs_dev_busy(hba) && hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL)
+ if (!ufshcd_is_ufs_dev_busy(hba) &&
+ hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL &&
+ !hba->clk_gating.active_reqs)
ufshcd_update_rtc(hba);
if (ufshcd_is_ufs_dev_active(hba) && hba->dev_info.rtc_update_period)
@@ -9118,7 +9113,6 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
int ret = 0;
struct ufs_clk_info *clki;
struct list_head *head = &hba->clk_list_head;
- unsigned long flags;
ktime_t start = ktime_get();
bool clk_state_changed = false;
@@ -9169,11 +9163,10 @@ out:
clk_disable_unprepare(clki->clk);
}
} else if (!ret && on) {
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->clk_gating.state = CLKS_ON;
+ scoped_guard(spinlock_irqsave, &hba->clk_gating.lock)
+ hba->clk_gating.state = CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
}
if (clk_state_changed)
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index d7aca9e61684..ce7667b020e2 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -403,6 +403,9 @@ enum clk_gating_state {
* delay_ms
* @ungate_work: worker to turn on clocks that will be used in case of
* interrupt context
+ * @clk_gating_workq: workqueue for clock gating work.
+ * @lock: serialize access to some struct ufs_clk_gating members. An outer lock
+ * relative to the host lock
* @state: the current clocks state
* @delay_ms: gating delay in ms
* @is_suspended: clk gating is suspended when set to 1 which can be used
@@ -413,11 +416,14 @@ enum clk_gating_state {
* @is_initialized: Indicates whether clock gating is initialized or not
* @active_reqs: number of requests that are pending and should be waited for
* completion before gating clocks.
- * @clk_gating_workq: workqueue for clock gating work.
*/
struct ufs_clk_gating {
struct delayed_work gate_work;
struct work_struct ungate_work;
+ struct workqueue_struct *clk_gating_workq;
+
+ spinlock_t lock;
+
enum clk_gating_state state;
unsigned long delay_ms;
bool is_suspended;
@@ -426,11 +432,14 @@ struct ufs_clk_gating {
bool is_enabled;
bool is_initialized;
int active_reqs;
- struct workqueue_struct *clk_gating_workq;
};
/**
* struct ufs_clk_scaling - UFS clock scaling related data
+ * @workq: workqueue to schedule devfreq suspend/resume work
+ * @suspend_work: worker to suspend devfreq
+ * @resume_work: worker to resume devfreq
+ * @lock: serialize access to some struct ufs_clk_scaling members
* @active_reqs: number of requests that are pending. If this is zero when
* devfreq ->target() function is called then schedule "suspend_work" to
* suspend devfreq.
@@ -440,9 +449,6 @@ struct ufs_clk_gating {
* @enable_attr: sysfs attribute to enable/disable clock scaling
* @saved_pwr_info: UFS power mode may also be changed during scaling and this
* one keeps track of previous power mode.
- * @workq: workqueue to schedule devfreq suspend/resume work
- * @suspend_work: worker to suspend devfreq
- * @resume_work: worker to resume devfreq
* @target_freq: frequency requested by devfreq framework
* @min_gear: lowest HS gear to scale down to
* @is_enabled: tracks if scaling is currently enabled or not, controlled by
@@ -454,15 +460,18 @@ struct ufs_clk_gating {
* @is_suspended: tracks if devfreq is suspended or not
*/
struct ufs_clk_scaling {
+ struct workqueue_struct *workq;
+ struct work_struct suspend_work;
+ struct work_struct resume_work;
+
+ spinlock_t lock;
+
int active_reqs;
unsigned long tot_busy_t;
ktime_t window_start_t;
ktime_t busy_start_t;
struct device_attribute enable_attr;
struct ufs_pa_layer_attr saved_pwr_info;
- struct workqueue_struct *workq;
- struct work_struct suspend_work;
- struct work_struct resume_work;
unsigned long target_freq;
u32 min_gear;
bool is_enabled;