diff mbox series

[01/12] powerpc: qspinlock: Fix qnode->locked value interpretation

Message ID 20230508020120.218494-2-rmclure@linux.ibm.com (mailing list archive)
State Superseded
Headers show
Series powerpc: KCSAN fix warnings and mark accesses | expand

Commit Message

Rohan McLure May 8, 2023, 2:01 a.m. UTC
A comment accompanying the locked attribute of a qnode assigns a value
of 1 to mean that the lock has been acquired. The usages of this
variable however assume opposite semantics. Update usages so that the
assertions of this comment are reflected in this file.

Signed-off-by: Rohan McLure <rmclure@linux.ibm.com>
---
 arch/powerpc/lib/qspinlock.c | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

Comments

Nicholas Piggin May 9, 2023, 2:01 a.m. UTC | #1
On Mon May 8, 2023 at 12:01 PM AEST, Rohan McLure wrote:
> A comment accompanying the locked attribute of a qnode assigns a value
> of 1 to mean that the lock has been acquired. The usages of this
> variable however assume opposite semantics. Update usages so that the
> assertions of this comment are reflected in this file.

1 actually means if the lock is acquired for this waiter. The
previous owner sets it to 1 which means we now have the lock.
It's slightly confusing but that's how generic qspinlock calls
it too.

It actually doesn't even really mean we have acquired the lock
though, it means we got through the MCS queue. "Waiting" or
"released" or something like that might be a better name.

Could change the name or comment to make that a bit clearer, but
while it'the same as kernel/locking/qspinlock.c then better
keep polarity the same.

Thanks,
Nick

>
> Signed-off-by: Rohan McLure <rmclure@linux.ibm.com>
> ---
>  arch/powerpc/lib/qspinlock.c | 10 +++++-----
>  1 file changed, 5 insertions(+), 5 deletions(-)
>
> diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c
> index e4bd145255d0..9cf93963772b 100644
> --- a/arch/powerpc/lib/qspinlock.c
> +++ b/arch/powerpc/lib/qspinlock.c
> @@ -435,7 +435,7 @@ static __always_inline bool yield_to_prev(struct qspinlock *lock, struct qnode *
>  
>  	smp_rmb(); /* See __yield_to_locked_owner comment */
>  
> -	if (!node->locked) {
> +	if (node->locked) {
>  		yield_to_preempted(prev_cpu, yield_count);
>  		spin_begin();
>  		return preempted;
> @@ -566,7 +566,7 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
>  	node->lock = lock;
>  	node->cpu = smp_processor_id();
>  	node->yield_cpu = -1;
> -	node->locked = 0;
> +	node->locked = 1;
>  
>  	tail = encode_tail_cpu(node->cpu);
>  
> @@ -584,7 +584,7 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
>  
>  		/* Wait for mcs node lock to be released */
>  		spin_begin();
> -		while (!node->locked) {
> +		while (node->locked) {
>  			spec_barrier();
>  
>  			if (yield_to_prev(lock, node, old, paravirt))
> @@ -693,13 +693,13 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
>  	 */
>  	if (paravirt && pv_prod_head) {
>  		int next_cpu = next->cpu;
> -		WRITE_ONCE(next->locked, 1);
> +		WRITE_ONCE(next->locked, 0);
>  		if (_Q_SPIN_MISO)
>  			asm volatile("miso" ::: "memory");
>  		if (vcpu_is_preempted(next_cpu))
>  			prod_cpu(next_cpu);
>  	} else {
> -		WRITE_ONCE(next->locked, 1);
> +		WRITE_ONCE(next->locked, 0);
>  		if (_Q_SPIN_MISO)
>  			asm volatile("miso" ::: "memory");
>  	}
> -- 
> 2.37.2
Rohan McLure May 9, 2023, 4:26 a.m. UTC | #2
> On 9 May 2023, at 12:01 pm, Nicholas Piggin <npiggin@gmail.com> wrote:
> 
> On Mon May 8, 2023 at 12:01 PM AEST, Rohan McLure wrote:
>> A comment accompanying the locked attribute of a qnode assigns a value
>> of 1 to mean that the lock has been acquired. The usages of this
>> variable however assume opposite semantics. Update usages so that the
>> assertions of this comment are reflected in this file.
> 
> 1 actually means if the lock is acquired for this waiter. The
> previous owner sets it to 1 which means we now have the lock.
> It's slightly confusing but that's how generic qspinlock calls
> it too.
> 
> It actually doesn't even really mean we have acquired the lock
> though, it means we got through the MCS queue. "Waiting" or
> "released" or something like that might be a better name.

This makes more sense. Seemed pretty unlikely to me that swapped
polarity have gone unnoticed, so glad to have that cleared up.

> 
> Could change the name or comment to make that a bit clearer, but
> while it'the same as kernel/locking/qspinlock.c then better
> keep polarity the same.

Yeah since ‘locked’ is an mcs intrinsic I think I’d rather keep
the name from kernel/locking/qspinlock.c.

> 
> Thanks,
> Nick
> 
>> 
>> Signed-off-by: Rohan McLure <rmclure@linux.ibm.com>
>> ---
>> arch/powerpc/lib/qspinlock.c | 10 +++++-----
>> 1 file changed, 5 insertions(+), 5 deletions(-)
>> 
>> diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c
>> index e4bd145255d0..9cf93963772b 100644
>> --- a/arch/powerpc/lib/qspinlock.c
>> +++ b/arch/powerpc/lib/qspinlock.c
>> @@ -435,7 +435,7 @@ static __always_inline bool yield_to_prev(struct qspinlock *lock, struct qnode *
>> 
>> smp_rmb(); /* See __yield_to_locked_owner comment */
>> 
>> - if (!node->locked) {
>> + if (node->locked) {
>> yield_to_preempted(prev_cpu, yield_count);
>> spin_begin();
>> return preempted;
>> @@ -566,7 +566,7 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
>> node->lock = lock;
>> node->cpu = smp_processor_id();
>> node->yield_cpu = -1;
>> - node->locked = 0;
>> + node->locked = 1;
>> 
>> tail = encode_tail_cpu(node->cpu);
>> 
>> @@ -584,7 +584,7 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
>> 
>> /* Wait for mcs node lock to be released */
>> spin_begin();
>> - while (!node->locked) {
>> + while (node->locked) {
>> spec_barrier();
>> 
>> if (yield_to_prev(lock, node, old, paravirt))
>> @@ -693,13 +693,13 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
>> */
>> if (paravirt && pv_prod_head) {
>> int next_cpu = next->cpu;
>> - WRITE_ONCE(next->locked, 1);
>> + WRITE_ONCE(next->locked, 0);
>> if (_Q_SPIN_MISO)
>> asm volatile("miso" ::: "memory");
>> if (vcpu_is_preempted(next_cpu))
>> prod_cpu(next_cpu);
>> } else {
>> - WRITE_ONCE(next->locked, 1);
>> + WRITE_ONCE(next->locked, 0);
>> if (_Q_SPIN_MISO)
>> asm volatile("miso" ::: "memory");
>> }
>> -- 
>> 2.37.2
>
diff mbox series

Patch

diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c
index e4bd145255d0..9cf93963772b 100644
--- a/arch/powerpc/lib/qspinlock.c
+++ b/arch/powerpc/lib/qspinlock.c
@@ -435,7 +435,7 @@  static __always_inline bool yield_to_prev(struct qspinlock *lock, struct qnode *
 
 	smp_rmb(); /* See __yield_to_locked_owner comment */
 
-	if (!node->locked) {
+	if (node->locked) {
 		yield_to_preempted(prev_cpu, yield_count);
 		spin_begin();
 		return preempted;
@@ -566,7 +566,7 @@  static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
 	node->lock = lock;
 	node->cpu = smp_processor_id();
 	node->yield_cpu = -1;
-	node->locked = 0;
+	node->locked = 1;
 
 	tail = encode_tail_cpu(node->cpu);
 
@@ -584,7 +584,7 @@  static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
 
 		/* Wait for mcs node lock to be released */
 		spin_begin();
-		while (!node->locked) {
+		while (node->locked) {
 			spec_barrier();
 
 			if (yield_to_prev(lock, node, old, paravirt))
@@ -693,13 +693,13 @@  static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
 	 */
 	if (paravirt && pv_prod_head) {
 		int next_cpu = next->cpu;
-		WRITE_ONCE(next->locked, 1);
+		WRITE_ONCE(next->locked, 0);
 		if (_Q_SPIN_MISO)
 			asm volatile("miso" ::: "memory");
 		if (vcpu_is_preempted(next_cpu))
 			prod_cpu(next_cpu);
 	} else {
-		WRITE_ONCE(next->locked, 1);
+		WRITE_ONCE(next->locked, 0);
 		if (_Q_SPIN_MISO)
 			asm volatile("miso" ::: "memory");
 	}