diff mbox

[v3,10/45] smp: Use get/put_online_cpus_atomic() to prevent CPU offline

Message ID 20130627195418.29830.34958.stgit@srivatsabhat.in.ibm.com (mailing list archive)
State Not Applicable
Headers show

Commit Message

Srivatsa S. Bhat June 27, 2013, 7:54 p.m. UTC
Once stop_machine() is gone from the CPU offline path, we won't be able
to depend on disabling preemption to prevent CPUs from going offline
from under us.

Use the get/put_online_cpus_atomic() APIs to prevent CPUs from going
offline, while invoking from atomic context.

Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Wang YanQing <udknight@gmail.com>
Cc: Shaohua Li <shli@fusionio.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: liguang <lig.fnst@cn.fujitsu.com>
Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
---

 kernel/smp.c |   52 ++++++++++++++++++++++++++++++----------------------
 1 file changed, 30 insertions(+), 22 deletions(-)

Comments

Michael Wang July 2, 2013, 5:32 a.m. UTC | #1
Hi, Srivatsa

On 06/28/2013 03:54 AM, Srivatsa S. Bhat wrote:
[snip]
> @@ -625,8 +632,9 @@ EXPORT_SYMBOL(on_each_cpu_mask);
>   * The function might sleep if the GFP flags indicates a non
>   * atomic allocation is allowed.
>   *
> - * Preemption is disabled to protect against CPUs going offline but not online.
> - * CPUs going online during the call will not be seen or sent an IPI.
> + * We use get/put_online_cpus_atomic() to protect against CPUs going
> + * offline but not online. CPUs going online during the call will
> + * not be seen or sent an IPI.

I was a little confused about this comment, if the offline-cpu still
have chances to become online, then there is chances that we will pick
it from for_each_online_cpu(), isn't it? Did I miss some point?

Regards,
Michael Wang

>   *
>   * You must not call this function with disabled interrupts or
>   * from a hardware interrupt handler or from a bottom half handler.
> @@ -641,26 +649,26 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
>  	might_sleep_if(gfp_flags & __GFP_WAIT);
> 
>  	if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
> -		preempt_disable();
> +		get_online_cpus_atomic();
>  		for_each_online_cpu(cpu)
>  			if (cond_func(cpu, info))
>  				cpumask_set_cpu(cpu, cpus);
>  		on_each_cpu_mask(cpus, func, info, wait);
> -		preempt_enable();
> +		put_online_cpus_atomic();
>  		free_cpumask_var(cpus);
>  	} else {
>  		/*
>  		 * No free cpumask, bother. No matter, we'll
>  		 * just have to IPI them one by one.
>  		 */
> -		preempt_disable();
> +		get_online_cpus_atomic();
>  		for_each_online_cpu(cpu)
>  			if (cond_func(cpu, info)) {
>  				ret = smp_call_function_single(cpu, func,
>  								info, wait);
>  				WARN_ON_ONCE(!ret);
>  			}
> -		preempt_enable();
> +		put_online_cpus_atomic();
>  	}
>  }
>  EXPORT_SYMBOL(on_each_cpu_cond);
>
Srivatsa S. Bhat July 2, 2013, 8:25 a.m. UTC | #2
Hi Michael,

On 07/02/2013 11:02 AM, Michael Wang wrote:
> Hi, Srivatsa
> 
> On 06/28/2013 03:54 AM, Srivatsa S. Bhat wrote:
> [snip]
>> @@ -625,8 +632,9 @@ EXPORT_SYMBOL(on_each_cpu_mask);
>>   * The function might sleep if the GFP flags indicates a non
>>   * atomic allocation is allowed.
>>   *
>> - * Preemption is disabled to protect against CPUs going offline but not online.
>> - * CPUs going online during the call will not be seen or sent an IPI.
>> + * We use get/put_online_cpus_atomic() to protect against CPUs going
>> + * offline but not online. CPUs going online during the call will
>> + * not be seen or sent an IPI.
> 
> I was a little confused about this comment, if the offline-cpu still
> have chances to become online, then there is chances that we will pick
> it from for_each_online_cpu(), isn't it? Did I miss some point?
>

Whether or not the newly onlined CPU is observed in our for_each_online_cpu()
loop, is dependent on timing. On top of that, there are 2 paths in the code:
one which uses a temporary cpumask and the other which doesn't. In the former
case, if a CPU comes online _after_ we populate the temporary cpumask, then
we won't send an IPI to that cpu, since the temporary cpumask doesn't contain
that CPU. Whereas, if we observe the newly onlined CPU in the for_each_online_cpu()
loop itself (either in the former or the latter case), then yes, we will send
the IPI to that CPU.

Regards,
Srivatsa S. Bhat

> 
>>   *
>>   * You must not call this function with disabled interrupts or
>>   * from a hardware interrupt handler or from a bottom half handler.
>> @@ -641,26 +649,26 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
>>  	might_sleep_if(gfp_flags & __GFP_WAIT);
>>
>>  	if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
>> -		preempt_disable();
>> +		get_online_cpus_atomic();
>>  		for_each_online_cpu(cpu)
>>  			if (cond_func(cpu, info))
>>  				cpumask_set_cpu(cpu, cpus);
>>  		on_each_cpu_mask(cpus, func, info, wait);
>> -		preempt_enable();
>> +		put_online_cpus_atomic();
>>  		free_cpumask_var(cpus);
>>  	} else {
>>  		/*
>>  		 * No free cpumask, bother. No matter, we'll
>>  		 * just have to IPI them one by one.
>>  		 */
>> -		preempt_disable();
>> +		get_online_cpus_atomic();
>>  		for_each_online_cpu(cpu)
>>  			if (cond_func(cpu, info)) {
>>  				ret = smp_call_function_single(cpu, func,
>>  								info, wait);
>>  				WARN_ON_ONCE(!ret);
>>  			}
>> -		preempt_enable();
>> +		put_online_cpus_atomic();
>>  	}
>>  }
>>  EXPORT_SYMBOL(on_each_cpu_cond);
>>
>
Michael Wang July 2, 2013, 8:47 a.m. UTC | #3
On 07/02/2013 04:25 PM, Srivatsa S. Bhat wrote:
> Hi Michael,
> 
> On 07/02/2013 11:02 AM, Michael Wang wrote:
>> Hi, Srivatsa
>>
>> On 06/28/2013 03:54 AM, Srivatsa S. Bhat wrote:
>> [snip]
>>> @@ -625,8 +632,9 @@ EXPORT_SYMBOL(on_each_cpu_mask);
>>>   * The function might sleep if the GFP flags indicates a non
>>>   * atomic allocation is allowed.
>>>   *
>>> - * Preemption is disabled to protect against CPUs going offline but not online.
>>> - * CPUs going online during the call will not be seen or sent an IPI.
>>> + * We use get/put_online_cpus_atomic() to protect against CPUs going
>>> + * offline but not online. CPUs going online during the call will
>>> + * not be seen or sent an IPI.
>>
>> I was a little confused about this comment, if the offline-cpu still
>> have chances to become online, then there is chances that we will pick
>> it from for_each_online_cpu(), isn't it? Did I miss some point?
>>
> 
> Whether or not the newly onlined CPU is observed in our for_each_online_cpu()
> loop, is dependent on timing. On top of that, there are 2 paths in the code:
> one which uses a temporary cpumask and the other which doesn't. In the former
> case, if a CPU comes online _after_ we populate the temporary cpumask, then
> we won't send an IPI to that cpu, since the temporary cpumask doesn't contain
> that CPU. Whereas, if we observe the newly onlined CPU in the for_each_online_cpu()
> loop itself (either in the former or the latter case), then yes, we will send
> the IPI to that CPU.

So it is not 'during the call' but 'during the call of
on_each_cpu_mask()', correct?

The comment position seems like it declaim that during the call of this
func, online-cpu won't be seem and send IPI...

Regards,
Michael Wang

> 
> Regards,
> Srivatsa S. Bhat
> 
>>
>>>   *
>>>   * You must not call this function with disabled interrupts or
>>>   * from a hardware interrupt handler or from a bottom half handler.
>>> @@ -641,26 +649,26 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
>>>  	might_sleep_if(gfp_flags & __GFP_WAIT);
>>>
>>>  	if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
>>> -		preempt_disable();
>>> +		get_online_cpus_atomic();
>>>  		for_each_online_cpu(cpu)
>>>  			if (cond_func(cpu, info))
>>>  				cpumask_set_cpu(cpu, cpus);
>>>  		on_each_cpu_mask(cpus, func, info, wait);
>>> -		preempt_enable();
>>> +		put_online_cpus_atomic();
>>>  		free_cpumask_var(cpus);
>>>  	} else {
>>>  		/*
>>>  		 * No free cpumask, bother. No matter, we'll
>>>  		 * just have to IPI them one by one.
>>>  		 */
>>> -		preempt_disable();
>>> +		get_online_cpus_atomic();
>>>  		for_each_online_cpu(cpu)
>>>  			if (cond_func(cpu, info)) {
>>>  				ret = smp_call_function_single(cpu, func,
>>>  								info, wait);
>>>  				WARN_ON_ONCE(!ret);
>>>  			}
>>> -		preempt_enable();
>>> +		put_online_cpus_atomic();
>>>  	}
>>>  }
>>>  EXPORT_SYMBOL(on_each_cpu_cond);
>>>
>>
> 
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at  http://www.tux.org/lkml/
>
Srivatsa S. Bhat July 2, 2013, 9:51 a.m. UTC | #4
On 07/02/2013 02:17 PM, Michael Wang wrote:
> On 07/02/2013 04:25 PM, Srivatsa S. Bhat wrote:
>> Hi Michael,
>>
>> On 07/02/2013 11:02 AM, Michael Wang wrote:
>>> Hi, Srivatsa
>>>
>>> On 06/28/2013 03:54 AM, Srivatsa S. Bhat wrote:
>>> [snip]
>>>> @@ -625,8 +632,9 @@ EXPORT_SYMBOL(on_each_cpu_mask);
>>>>   * The function might sleep if the GFP flags indicates a non
>>>>   * atomic allocation is allowed.
>>>>   *
>>>> - * Preemption is disabled to protect against CPUs going offline but not online.
>>>> - * CPUs going online during the call will not be seen or sent an IPI.
>>>> + * We use get/put_online_cpus_atomic() to protect against CPUs going
>>>> + * offline but not online. CPUs going online during the call will
>>>> + * not be seen or sent an IPI.
>>>
>>> I was a little confused about this comment, if the offline-cpu still
>>> have chances to become online, then there is chances that we will pick
>>> it from for_each_online_cpu(), isn't it? Did I miss some point?
>>>
>>
>> Whether or not the newly onlined CPU is observed in our for_each_online_cpu()
>> loop, is dependent on timing. On top of that, there are 2 paths in the code:
>> one which uses a temporary cpumask and the other which doesn't. In the former
>> case, if a CPU comes online _after_ we populate the temporary cpumask, then
>> we won't send an IPI to that cpu, since the temporary cpumask doesn't contain
>> that CPU. Whereas, if we observe the newly onlined CPU in the for_each_online_cpu()
>> loop itself (either in the former or the latter case), then yes, we will send
>> the IPI to that CPU.
> 
> So it is not 'during the call' but 'during the call of
> on_each_cpu_mask()', correct?
> 

Well, as I said, its timing dependent. We might miss the newly onlined CPU in
the for_each_online_cpu() loop itself, based on when exactly the CPU was added
to the cpu_online_mask. So you can't exactly pin-point the places where you'll
miss the CPU and where you won't. Besides, is it _that_ important? It is after
all unpredictable..

> The comment position seems like it declaim that during the call of this
> func, online-cpu won't be seem and send IPI...
>

Doesn't matter, AFAICS. The key take-away from that whole comment is: nothing is
done to prevent CPUs from coming online while the function is running, whereas
the online CPUs are guaranteed to remain online throughout the function. In other
words, its a weaker form of get_online_cpus()/put_online_cpus(), providing a
one-way synchronization (CPU offline).

As long as that idea is conveyed properly, the purpose of that comment is served,
IMHO.

Regards,
Srivatsa S. Bhat


>>>>   *
>>>>   * You must not call this function with disabled interrupts or
>>>>   * from a hardware interrupt handler or from a bottom half handler.
>>>> @@ -641,26 +649,26 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
>>>>  	might_sleep_if(gfp_flags & __GFP_WAIT);
>>>>
>>>>  	if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
>>>> -		preempt_disable();
>>>> +		get_online_cpus_atomic();
>>>>  		for_each_online_cpu(cpu)
>>>>  			if (cond_func(cpu, info))
>>>>  				cpumask_set_cpu(cpu, cpus);
>>>>  		on_each_cpu_mask(cpus, func, info, wait);
>>>> -		preempt_enable();
>>>> +		put_online_cpus_atomic();
>>>>  		free_cpumask_var(cpus);
>>>>  	} else {
>>>>  		/*
>>>>  		 * No free cpumask, bother. No matter, we'll
>>>>  		 * just have to IPI them one by one.
>>>>  		 */
>>>> -		preempt_disable();
>>>> +		get_online_cpus_atomic();
>>>>  		for_each_online_cpu(cpu)
>>>>  			if (cond_func(cpu, info)) {
>>>>  				ret = smp_call_function_single(cpu, func,
>>>>  								info, wait);
>>>>  				WARN_ON_ONCE(!ret);
>>>>  			}
>>>> -		preempt_enable();
>>>> +		put_online_cpus_atomic();
>>>>  	}
>>>>  }
>>>>  EXPORT_SYMBOL(on_each_cpu_cond);
>>>>
>>>
>>
Michael Wang July 2, 2013, 10:08 a.m. UTC | #5
On 07/02/2013 05:51 PM, Srivatsa S. Bhat wrote:
[snip]
> 
> Well, as I said, its timing dependent. We might miss the newly onlined CPU in
> the for_each_online_cpu() loop itself, based on when exactly the CPU was added
> to the cpu_online_mask. So you can't exactly pin-point the places where you'll
> miss the CPU and where you won't. Besides, is it _that_ important? It is after
> all unpredictable..

Sure, it's nothing important ;-)

I just think this comment:

+ * We use get/put_online_cpus_atomic() to protect against CPUs going
+ * offline but not online. CPUs going online during the call will
+ * not be seen or sent an IPI

It told people that the cpu could online during the call, but won't get
IPI, while actually they have a chance to get it, folks haven't look
inside may missed some thing when use it.

But it's just self-opinion, so let's put down the discuss :)

Regards,
Michael Wang

> 
>> The comment position seems like it declaim that during the call of this
>> func, online-cpu won't be seem and send IPI...
>>
> 
> Doesn't matter, AFAICS. The key take-away from that whole comment is: nothing is
> done to prevent CPUs from coming online while the function is running, whereas
> the online CPUs are guaranteed to remain online throughout the function. In other
> words, its a weaker form of get_online_cpus()/put_online_cpus(), providing a
> one-way synchronization (CPU offline).
> 
> As long as that idea is conveyed properly, the purpose of that comment is served,
> IMHO.
> 
> Regards,
> Srivatsa S. Bhat
> 
> 
>>>>>   *
>>>>>   * You must not call this function with disabled interrupts or
>>>>>   * from a hardware interrupt handler or from a bottom half handler.
>>>>> @@ -641,26 +649,26 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
>>>>>  	might_sleep_if(gfp_flags & __GFP_WAIT);
>>>>>
>>>>>  	if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
>>>>> -		preempt_disable();
>>>>> +		get_online_cpus_atomic();
>>>>>  		for_each_online_cpu(cpu)
>>>>>  			if (cond_func(cpu, info))
>>>>>  				cpumask_set_cpu(cpu, cpus);
>>>>>  		on_each_cpu_mask(cpus, func, info, wait);
>>>>> -		preempt_enable();
>>>>> +		put_online_cpus_atomic();
>>>>>  		free_cpumask_var(cpus);
>>>>>  	} else {
>>>>>  		/*
>>>>>  		 * No free cpumask, bother. No matter, we'll
>>>>>  		 * just have to IPI them one by one.
>>>>>  		 */
>>>>> -		preempt_disable();
>>>>> +		get_online_cpus_atomic();
>>>>>  		for_each_online_cpu(cpu)
>>>>>  			if (cond_func(cpu, info)) {
>>>>>  				ret = smp_call_function_single(cpu, func,
>>>>>  								info, wait);
>>>>>  				WARN_ON_ONCE(!ret);
>>>>>  			}
>>>>> -		preempt_enable();
>>>>> +		put_online_cpus_atomic();
>>>>>  	}
>>>>>  }
>>>>>  EXPORT_SYMBOL(on_each_cpu_cond);
>>>>>
>>>>
>>>
> 
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at  http://www.tux.org/lkml/
>
diff mbox

Patch

diff --git a/kernel/smp.c b/kernel/smp.c
index 4dba0f7..1f36d6d 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -232,7 +232,7 @@  int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
 	 * prevent preemption and reschedule on another processor,
 	 * as well as CPU removal
 	 */
-	this_cpu = get_cpu();
+	this_cpu = get_online_cpus_atomic();
 
 	/*
 	 * Can deadlock when called with interrupts disabled.
@@ -264,7 +264,7 @@  int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
 		}
 	}
 
-	put_cpu();
+	put_online_cpus_atomic();
 
 	return err;
 }
@@ -294,7 +294,7 @@  int smp_call_function_any(const struct cpumask *mask,
 	int ret;
 
 	/* Try for same CPU (cheapest) */
-	cpu = get_cpu();
+	cpu = get_online_cpus_atomic();
 	if (cpumask_test_cpu(cpu, mask))
 		goto call;
 
@@ -310,7 +310,7 @@  int smp_call_function_any(const struct cpumask *mask,
 	cpu = cpumask_any_and(mask, cpu_online_mask);
 call:
 	ret = smp_call_function_single(cpu, func, info, wait);
-	put_cpu();
+	put_online_cpus_atomic();
 	return ret;
 }
 EXPORT_SYMBOL_GPL(smp_call_function_any);
@@ -331,7 +331,8 @@  void __smp_call_function_single(int cpu, struct call_single_data *csd,
 	unsigned int this_cpu;
 	unsigned long flags;
 
-	this_cpu = get_cpu();
+	this_cpu = get_online_cpus_atomic();
+
 	/*
 	 * Can deadlock when called with interrupts disabled.
 	 * We allow cpu's that are not yet online though, as no one else can
@@ -349,7 +350,8 @@  void __smp_call_function_single(int cpu, struct call_single_data *csd,
 		csd_lock(csd);
 		generic_exec_single(cpu, csd, wait);
 	}
-	put_cpu();
+
+	put_online_cpus_atomic();
 }
 
 /**
@@ -370,7 +372,9 @@  void smp_call_function_many(const struct cpumask *mask,
 			    smp_call_func_t func, void *info, bool wait)
 {
 	struct call_function_data *cfd;
-	int cpu, next_cpu, this_cpu = smp_processor_id();
+	int cpu, next_cpu, this_cpu;
+
+	this_cpu = get_online_cpus_atomic();
 
 	/*
 	 * Can deadlock when called with interrupts disabled.
@@ -388,7 +392,7 @@  void smp_call_function_many(const struct cpumask *mask,
 
 	/* No online cpus?  We're done. */
 	if (cpu >= nr_cpu_ids)
-		return;
+		goto out;
 
 	/* Do we have another CPU which isn't us? */
 	next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
@@ -398,7 +402,7 @@  void smp_call_function_many(const struct cpumask *mask,
 	/* Fastpath: do that cpu by itself. */
 	if (next_cpu >= nr_cpu_ids) {
 		smp_call_function_single(cpu, func, info, wait);
-		return;
+		goto out;
 	}
 
 	cfd = &__get_cpu_var(cfd_data);
@@ -408,7 +412,7 @@  void smp_call_function_many(const struct cpumask *mask,
 
 	/* Some callers race with other cpus changing the passed mask */
 	if (unlikely(!cpumask_weight(cfd->cpumask)))
-		return;
+		goto out;
 
 	/*
 	 * After we put an entry into the list, cfd->cpumask may be cleared
@@ -443,6 +447,9 @@  void smp_call_function_many(const struct cpumask *mask,
 			csd_lock_wait(csd);
 		}
 	}
+
+out:
+	put_online_cpus_atomic();
 }
 EXPORT_SYMBOL(smp_call_function_many);
 
@@ -463,9 +470,9 @@  EXPORT_SYMBOL(smp_call_function_many);
  */
 int smp_call_function(smp_call_func_t func, void *info, int wait)
 {
-	preempt_disable();
+	get_online_cpus_atomic();
 	smp_call_function_many(cpu_online_mask, func, info, wait);
-	preempt_enable();
+	put_online_cpus_atomic();
 
 	return 0;
 }
@@ -565,12 +572,12 @@  int on_each_cpu(void (*func) (void *info), void *info, int wait)
 	unsigned long flags;
 	int ret = 0;
 
-	preempt_disable();
+	get_online_cpus_atomic();
 	ret = smp_call_function(func, info, wait);
 	local_irq_save(flags);
 	func(info);
 	local_irq_restore(flags);
-	preempt_enable();
+	put_online_cpus_atomic();
 	return ret;
 }
 EXPORT_SYMBOL(on_each_cpu);
@@ -592,7 +599,7 @@  EXPORT_SYMBOL(on_each_cpu);
 void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
 			void *info, bool wait)
 {
-	int cpu = get_cpu();
+	unsigned int cpu = get_online_cpus_atomic();
 
 	smp_call_function_many(mask, func, info, wait);
 	if (cpumask_test_cpu(cpu, mask)) {
@@ -600,7 +607,7 @@  void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
 		func(info);
 		local_irq_enable();
 	}
-	put_cpu();
+	put_online_cpus_atomic();
 }
 EXPORT_SYMBOL(on_each_cpu_mask);
 
@@ -625,8 +632,9 @@  EXPORT_SYMBOL(on_each_cpu_mask);
  * The function might sleep if the GFP flags indicates a non
  * atomic allocation is allowed.
  *
- * Preemption is disabled to protect against CPUs going offline but not online.
- * CPUs going online during the call will not be seen or sent an IPI.
+ * We use get/put_online_cpus_atomic() to protect against CPUs going
+ * offline but not online. CPUs going online during the call will
+ * not be seen or sent an IPI.
  *
  * You must not call this function with disabled interrupts or
  * from a hardware interrupt handler or from a bottom half handler.
@@ -641,26 +649,26 @@  void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
 	might_sleep_if(gfp_flags & __GFP_WAIT);
 
 	if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
-		preempt_disable();
+		get_online_cpus_atomic();
 		for_each_online_cpu(cpu)
 			if (cond_func(cpu, info))
 				cpumask_set_cpu(cpu, cpus);
 		on_each_cpu_mask(cpus, func, info, wait);
-		preempt_enable();
+		put_online_cpus_atomic();
 		free_cpumask_var(cpus);
 	} else {
 		/*
 		 * No free cpumask, bother. No matter, we'll
 		 * just have to IPI them one by one.
 		 */
-		preempt_disable();
+		get_online_cpus_atomic();
 		for_each_online_cpu(cpu)
 			if (cond_func(cpu, info)) {
 				ret = smp_call_function_single(cpu, func,
 								info, wait);
 				WARN_ON_ONCE(!ret);
 			}
-		preempt_enable();
+		put_online_cpus_atomic();
 	}
 }
 EXPORT_SYMBOL(on_each_cpu_cond);