]> git.hungrycats.org Git - linux/commitdiff
cpufreq: intel_pstate: Make hwp_notify_lock a raw spinlock
authorUwe Kleine-König <ukleinek@debian.org>
Sun, 6 Oct 2024 20:51:06 +0000 (22:51 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 10 Oct 2024 10:01:08 +0000 (12:01 +0200)
commit 8b4865cd904650cbed7f2407e653934c621b8127 upstream.

notify_hwp_interrupt() is called via sysvec_thermal() ->
smp_thermal_vector() -> intel_thermal_interrupt() in hard irq context.
For this reason it must not use a simple spin_lock that sleeps with
PREEMPT_RT enabled. So convert it to a raw spinlock.

Reported-by: xiao sheng wen <atzlinux@sina.com>
Link: https://bugs.debian.org/1076483
Signed-off-by: Uwe Kleine-König <ukleinek@debian.org>
Acked-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
Acked-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Tested-by: xiao sheng wen <atzlinux@sina.com>
Link: https://patch.msgid.link/20240919081121.10784-2-ukleinek@debian.org
Cc: All applicable <stable@vger.kernel.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
[ukleinek: Backport to v6.10.y]
Signed-off-by: Uwe Kleine-König <ukleinek@debian.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/cpufreq/intel_pstate.c

index c31914a9876fa116956773f90005f080c814760d..b694e474acece06cf91b03d3e9b7b302aab377ee 100644 (file)
@@ -1622,7 +1622,7 @@ static void intel_pstate_notify_work(struct work_struct *work)
        wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
 }
 
-static DEFINE_SPINLOCK(hwp_notify_lock);
+static DEFINE_RAW_SPINLOCK(hwp_notify_lock);
 static cpumask_t hwp_intr_enable_mask;
 
 void notify_hwp_interrupt(void)
@@ -1638,7 +1638,7 @@ void notify_hwp_interrupt(void)
        if (!(value & 0x01))
                return;
 
-       spin_lock_irqsave(&hwp_notify_lock, flags);
+       raw_spin_lock_irqsave(&hwp_notify_lock, flags);
 
        if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask))
                goto ack_intr;
@@ -1646,13 +1646,13 @@ void notify_hwp_interrupt(void)
        schedule_delayed_work(&all_cpu_data[this_cpu]->hwp_notify_work,
                              msecs_to_jiffies(10));
 
-       spin_unlock_irqrestore(&hwp_notify_lock, flags);
+       raw_spin_unlock_irqrestore(&hwp_notify_lock, flags);
 
        return;
 
 ack_intr:
        wrmsrl_safe(MSR_HWP_STATUS, 0);
-       spin_unlock_irqrestore(&hwp_notify_lock, flags);
+       raw_spin_unlock_irqrestore(&hwp_notify_lock, flags);
 }
 
 static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
@@ -1665,9 +1665,9 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
        /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
        wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
 
-       spin_lock_irq(&hwp_notify_lock);
+       raw_spin_lock_irq(&hwp_notify_lock);
        cancel_work = cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask);
-       spin_unlock_irq(&hwp_notify_lock);
+       raw_spin_unlock_irq(&hwp_notify_lock);
 
        if (cancel_work)
                cancel_delayed_work_sync(&cpudata->hwp_notify_work);
@@ -1677,10 +1677,10 @@ static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
 {
        /* Enable HWP notification interrupt for guaranteed performance change */
        if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) {
-               spin_lock_irq(&hwp_notify_lock);
+               raw_spin_lock_irq(&hwp_notify_lock);
                INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work);
                cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask);
-               spin_unlock_irq(&hwp_notify_lock);
+               raw_spin_unlock_irq(&hwp_notify_lock);
 
                /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
                wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01);