@@ -2107,6 +2107,8 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *))
rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
continue;
}
+
+ get_online_cpus_atomic();
cpu = rnp->grplo;
bit = 1;
for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
@@ -2114,6 +2116,8 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *))
f(per_cpu_ptr(rsp->rda, cpu)))
mask |= bit;
}
+ put_online_cpus_atomic();
+
if (mask != 0) {
/* rcu_report_qs_rnp() releases rnp->lock. */
Once stop_machine() is gone from the CPU offline path, we won't be able to depend on disabling preemption to prevent CPUs from going offline from under us. In RCU code, rcu_implicit_dynticks_qs() checks if a CPU is offline, while being protected by a spinlock. Use the get/put_online_cpus_atomic() APIs to prevent CPUs from going offline, while invoking from atomic context. Cc: Dipankar Sarma <dipankar@in.ibm.com> Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> --- kernel/rcutree.c | 4 ++++ 1 file changed, 4 insertions(+)