diff mbox

[v3,6/6] cpufreq: powernv: Restore cpu frequency to policy->cur on unthrottling

Message ID 1430729652-14813-7-git-send-email-shilpa.bhat@linux.vnet.ibm.com (mailing list archive)
State Not Applicable
Headers show

Commit Message

Shilpasri G Bhat May 4, 2015, 8:54 a.m. UTC
If frequency is throttled due to OCC reset then cpus will be in Psafe
frequency, so restore the frequency on all cpus to policy->cur when
OCCs are active again. And if frequency is throttled due to Pmax
capping then restore the frequency of all the cpus  in the chip on
unthrottling.

Signed-off-by: Shilpasri G Bhat <shilpa.bhat@linux.vnet.ibm.com>
---
 drivers/cpufreq/powernv-cpufreq.c | 31 +++++++++++++++++++++++++++++--
 1 file changed, 29 insertions(+), 2 deletions(-)

Comments

Preeti U Murthy May 5, 2015, 9:39 a.m. UTC | #1
On 05/04/2015 02:24 PM, Shilpasri G Bhat wrote:
> If frequency is throttled due to OCC reset then cpus will be in Psafe
> frequency, so restore the frequency on all cpus to policy->cur when
> OCCs are active again. And if frequency is throttled due to Pmax
> capping then restore the frequency of all the cpus  in the chip on
> unthrottling.
> 
> Signed-off-by: Shilpasri G Bhat <shilpa.bhat@linux.vnet.ibm.com>
> ---
>  drivers/cpufreq/powernv-cpufreq.c | 31 +++++++++++++++++++++++++++++--
>  1 file changed, 29 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
> index 0a59d5b..b2915bc 100644
> --- a/drivers/cpufreq/powernv-cpufreq.c
> +++ b/drivers/cpufreq/powernv-cpufreq.c
> @@ -51,6 +51,7 @@ static struct chip {
>  	bool throttled;
>  	cpumask_t mask;
>  	struct work_struct throttle;
> +	bool restore;
>  } *chips;
> 
>  static int nr_chips;
> @@ -418,9 +419,29 @@ static struct notifier_block powernv_cpufreq_reboot_nb = {
>  void powernv_cpufreq_work_fn(struct work_struct *work)
>  {
>  	struct chip *chip = container_of(work, struct chip, throttle);
> +	unsigned int cpu;
> +	cpumask_var_t mask;
> 
>  	smp_call_function_any(&chip->mask,
>  			      powernv_cpufreq_throttle_check, NULL, 0);
> +
> +	if (!chip->restore)
> +		return;
> +
> +	chip->restore = false;
> +	cpumask_copy(mask, &chip->mask);
> +	for_each_cpu_and(cpu, mask, cpu_online_mask) {
> +		int index, tcpu;
> +		struct cpufreq_policy policy;
> +
> +		cpufreq_get_policy(&policy, cpu);
> +		cpufreq_frequency_table_target(&policy, policy.freq_table,
> +					       policy.cur,
> +					       CPUFREQ_RELATION_C, &index);
> +		powernv_cpufreq_target_index(&policy, index);
> +		for_each_cpu(tcpu, policy.cpus)
> +			cpumask_clear_cpu(tcpu, mask);
> +	}
>  }
> 
>  static char throttle_reason[][30] = {
> @@ -473,8 +494,10 @@ static int powernv_cpufreq_occ_msg(struct notifier_block *nb,
>  			throttled = false;
>  			pr_info("OCC: Active\n");
> 
> -			for (i = 0; i < nr_chips; i++)
> +			for (i = 0; i < nr_chips; i++) {
> +				chips[i].restore = true;
>  				schedule_work(&chips[i].throttle);
> +			}
> 
>  			return 0;
>  		}
> @@ -490,8 +513,11 @@ static int powernv_cpufreq_occ_msg(struct notifier_block *nb,
>  			return 0;
> 
>  		for (i = 0; i < nr_chips; i++)
> -			if (chips[i].id == chip_id)
> +			if (chips[i].id == chip_id) {
> +				if (!reason)
> +					chips[i].restore = true;
>  				schedule_work(&chips[i].throttle);
> +			}
>  	}
>  	return 0;
>  }
> @@ -545,6 +571,7 @@ static int init_chip_info(void)
>  		chips[i].throttled = false;
>  		cpumask_copy(&chips[i].mask, cpumask_of_node(chip[i]));
>  		INIT_WORK(&chips[i].throttle, powernv_cpufreq_work_fn);
> +		chips[i].restore = false;
>  	}
> 
>  	return 0;
> 

Reviewed-by: Preeti U Murthy <preeti@linux.vnet.ibm.com>
diff mbox

Patch

diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 0a59d5b..b2915bc 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -51,6 +51,7 @@  static struct chip {
 	bool throttled;
 	cpumask_t mask;
 	struct work_struct throttle;
+	bool restore;
 } *chips;
 
 static int nr_chips;
@@ -418,9 +419,29 @@  static struct notifier_block powernv_cpufreq_reboot_nb = {
 void powernv_cpufreq_work_fn(struct work_struct *work)
 {
 	struct chip *chip = container_of(work, struct chip, throttle);
+	unsigned int cpu;
+	cpumask_var_t mask;
 
 	smp_call_function_any(&chip->mask,
 			      powernv_cpufreq_throttle_check, NULL, 0);
+
+	if (!chip->restore)
+		return;
+
+	chip->restore = false;
+	cpumask_copy(mask, &chip->mask);
+	for_each_cpu_and(cpu, mask, cpu_online_mask) {
+		int index, tcpu;
+		struct cpufreq_policy policy;
+
+		cpufreq_get_policy(&policy, cpu);
+		cpufreq_frequency_table_target(&policy, policy.freq_table,
+					       policy.cur,
+					       CPUFREQ_RELATION_C, &index);
+		powernv_cpufreq_target_index(&policy, index);
+		for_each_cpu(tcpu, policy.cpus)
+			cpumask_clear_cpu(tcpu, mask);
+	}
 }
 
 static char throttle_reason[][30] = {
@@ -473,8 +494,10 @@  static int powernv_cpufreq_occ_msg(struct notifier_block *nb,
 			throttled = false;
 			pr_info("OCC: Active\n");
 
-			for (i = 0; i < nr_chips; i++)
+			for (i = 0; i < nr_chips; i++) {
+				chips[i].restore = true;
 				schedule_work(&chips[i].throttle);
+			}
 
 			return 0;
 		}
@@ -490,8 +513,11 @@  static int powernv_cpufreq_occ_msg(struct notifier_block *nb,
 			return 0;
 
 		for (i = 0; i < nr_chips; i++)
-			if (chips[i].id == chip_id)
+			if (chips[i].id == chip_id) {
+				if (!reason)
+					chips[i].restore = true;
 				schedule_work(&chips[i].throttle);
+			}
 	}
 	return 0;
 }
@@ -545,6 +571,7 @@  static int init_chip_info(void)
 		chips[i].throttled = false;
 		cpumask_copy(&chips[i].mask, cpumask_of_node(chip[i]));
 		INIT_WORK(&chips[i].throttle, powernv_cpufreq_work_fn);
+		chips[i].restore = false;
 	}
 
 	return 0;