diff mbox series

[v2,2/2] xfs: Close race between direct IO and xfs_break_layouts()

Message ID 153374942137.42241.10539674028265137668.stgit@djiang5-desk3.ch.intel.com
State Not Applicable
Headers show
Series None | expand

Commit Message

Dave Jiang Aug. 8, 2018, 5:31 p.m. UTC
This patch is the duplicate of ross's fix for ext4 for xfs.

If the refcount of a page is lowered between the time that it is returned
by dax_busy_page() and when the refcount is again checked in
xfs_break_layouts() => ___wait_var_event(), the waiting function
xfs_wait_dax_page() will never be called.  This means that
xfs_break_layouts() will still have 'retry' set to false, so we'll stop
looping and never check the refcount of other pages in this inode.

Instead, always continue looping as long as dax_layout_busy_page() gives us
a page which it found with an elevated refcount.

Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Reviewed-by: Jan Kara <jack@suse.cz>
---

Sorry resend, forgot to add Jan's reviewed-by.

v2:
- Rename parameter from did_unlock to retry (Jan)

 fs/xfs/xfs_file.c |    9 ++++-----
 1 file changed, 4 insertions(+), 5 deletions(-)

Comments

Darrick Wong Aug. 10, 2018, 3:48 p.m. UTC | #1
On Wed, Aug 08, 2018 at 10:31:40AM -0700, Dave Jiang wrote:
> This patch is the duplicate of ross's fix for ext4 for xfs.
> 
> If the refcount of a page is lowered between the time that it is returned
> by dax_busy_page() and when the refcount is again checked in
> xfs_break_layouts() => ___wait_var_event(), the waiting function
> xfs_wait_dax_page() will never be called.  This means that
> xfs_break_layouts() will still have 'retry' set to false, so we'll stop
> looping and never check the refcount of other pages in this inode.
> 
> Instead, always continue looping as long as dax_layout_busy_page() gives us
> a page which it found with an elevated refcount.
> 
> Signed-off-by: Dave Jiang <dave.jiang@intel.com>
> Reviewed-by: Jan Kara <jack@suse.cz>
> ---
> 
> Sorry resend, forgot to add Jan's reviewed-by.
> 
> v2:
> - Rename parameter from did_unlock to retry (Jan)
> 
>  fs/xfs/xfs_file.c |    9 ++++-----
>  1 file changed, 4 insertions(+), 5 deletions(-)
> 
> diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
> index a3e7767a5715..cd6f0d8c4922 100644
> --- a/fs/xfs/xfs_file.c
> +++ b/fs/xfs/xfs_file.c
> @@ -721,12 +721,10 @@ xfs_file_write_iter(
>  
>  static void
>  xfs_wait_dax_page(
> -	struct inode		*inode,
> -	bool			*did_unlock)
> +	struct inode		*inode)
>  {
>  	struct xfs_inode        *ip = XFS_I(inode);
>  
> -	*did_unlock = true;
>  	xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
>  	schedule();
>  	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
> @@ -736,7 +734,7 @@ static int
>  xfs_break_dax_layouts(
>  	struct inode		*inode,
>  	uint			iolock,
> -	bool			*did_unlock)
> +	bool			*retry)

Uhhh, this hunk doesn't apply.  xfs_break_dax_layouts doesn't have an
iolock parameter anymore; was this not generated off of xfs for-next?

--D

>  {
>  	struct page		*page;
>  
> @@ -746,9 +744,10 @@ xfs_break_dax_layouts(
>  	if (!page)
>  		return 0;
>  
> +	*retry = true;
>  	return ___wait_var_event(&page->_refcount,
>  			atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
> -			0, 0, xfs_wait_dax_page(inode, did_unlock));
> +			0, 0, xfs_wait_dax_page(inode));
>  }
>  
>  int
> 
> --
> To unsubscribe from this list: send the line "unsubscribe linux-xfs" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
Dave Jiang Aug. 10, 2018, 3:54 p.m. UTC | #2
On 08/10/2018 08:48 AM, Darrick J. Wong wrote:
> On Wed, Aug 08, 2018 at 10:31:40AM -0700, Dave Jiang wrote:
>> This patch is the duplicate of ross's fix for ext4 for xfs.
>>
>> If the refcount of a page is lowered between the time that it is returned
>> by dax_busy_page() and when the refcount is again checked in
>> xfs_break_layouts() => ___wait_var_event(), the waiting function
>> xfs_wait_dax_page() will never be called.  This means that
>> xfs_break_layouts() will still have 'retry' set to false, so we'll stop
>> looping and never check the refcount of other pages in this inode.
>>
>> Instead, always continue looping as long as dax_layout_busy_page() gives us
>> a page which it found with an elevated refcount.
>>
>> Signed-off-by: Dave Jiang <dave.jiang@intel.com>
>> Reviewed-by: Jan Kara <jack@suse.cz>
>> ---
>>
>> Sorry resend, forgot to add Jan's reviewed-by.
>>
>> v2:
>> - Rename parameter from did_unlock to retry (Jan)
>>
>>  fs/xfs/xfs_file.c |    9 ++++-----
>>  1 file changed, 4 insertions(+), 5 deletions(-)
>>
>> diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
>> index a3e7767a5715..cd6f0d8c4922 100644
>> --- a/fs/xfs/xfs_file.c
>> +++ b/fs/xfs/xfs_file.c
>> @@ -721,12 +721,10 @@ xfs_file_write_iter(
>>  
>>  static void
>>  xfs_wait_dax_page(
>> -	struct inode		*inode,
>> -	bool			*did_unlock)
>> +	struct inode		*inode)
>>  {
>>  	struct xfs_inode        *ip = XFS_I(inode);
>>  
>> -	*did_unlock = true;
>>  	xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
>>  	schedule();
>>  	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
>> @@ -736,7 +734,7 @@ static int
>>  xfs_break_dax_layouts(
>>  	struct inode		*inode,
>>  	uint			iolock,
>> -	bool			*did_unlock)
>> +	bool			*retry)
> 
> Uhhh, this hunk doesn't apply.  xfs_break_dax_layouts doesn't have an
> iolock parameter anymore; was this not generated off of xfs for-next?

Sorry. It was generated against 4.18-rc8. I'll respin patch against xfs
for-next.

> 
> --D
> 
>>  {
>>  	struct page		*page;
>>  
>> @@ -746,9 +744,10 @@ xfs_break_dax_layouts(
>>  	if (!page)
>>  		return 0;
>>  
>> +	*retry = true;
>>  	return ___wait_var_event(&page->_refcount,
>>  			atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
>> -			0, 0, xfs_wait_dax_page(inode, did_unlock));
>> +			0, 0, xfs_wait_dax_page(inode));
>>  }
>>  
>>  int
>>
>> --
>> To unsubscribe from this list: send the line "unsubscribe linux-xfs" in
>> the body of a message to majordomo@vger.kernel.org
>> More majordomo info at  http://vger.kernel.org/majordomo-info.html
Darrick Wong Aug. 10, 2018, 4:02 p.m. UTC | #3
On Fri, Aug 10, 2018 at 08:54:00AM -0700, Dave Jiang wrote:
> 
> 
> On 08/10/2018 08:48 AM, Darrick J. Wong wrote:
> > On Wed, Aug 08, 2018 at 10:31:40AM -0700, Dave Jiang wrote:
> >> This patch is the duplicate of ross's fix for ext4 for xfs.
> >>
> >> If the refcount of a page is lowered between the time that it is returned
> >> by dax_busy_page() and when the refcount is again checked in
> >> xfs_break_layouts() => ___wait_var_event(), the waiting function
> >> xfs_wait_dax_page() will never be called.  This means that
> >> xfs_break_layouts() will still have 'retry' set to false, so we'll stop
> >> looping and never check the refcount of other pages in this inode.
> >>
> >> Instead, always continue looping as long as dax_layout_busy_page() gives us
> >> a page which it found with an elevated refcount.
> >>
> >> Signed-off-by: Dave Jiang <dave.jiang@intel.com>
> >> Reviewed-by: Jan Kara <jack@suse.cz>
> >> ---
> >>
> >> Sorry resend, forgot to add Jan's reviewed-by.
> >>
> >> v2:
> >> - Rename parameter from did_unlock to retry (Jan)
> >>
> >>  fs/xfs/xfs_file.c |    9 ++++-----
> >>  1 file changed, 4 insertions(+), 5 deletions(-)
> >>
> >> diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
> >> index a3e7767a5715..cd6f0d8c4922 100644
> >> --- a/fs/xfs/xfs_file.c
> >> +++ b/fs/xfs/xfs_file.c
> >> @@ -721,12 +721,10 @@ xfs_file_write_iter(
> >>  
> >>  static void
> >>  xfs_wait_dax_page(
> >> -	struct inode		*inode,
> >> -	bool			*did_unlock)
> >> +	struct inode		*inode)
> >>  {
> >>  	struct xfs_inode        *ip = XFS_I(inode);
> >>  
> >> -	*did_unlock = true;
> >>  	xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
> >>  	schedule();
> >>  	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
> >> @@ -736,7 +734,7 @@ static int
> >>  xfs_break_dax_layouts(
> >>  	struct inode		*inode,
> >>  	uint			iolock,
> >> -	bool			*did_unlock)
> >> +	bool			*retry)
> > 
> > Uhhh, this hunk doesn't apply.  xfs_break_dax_layouts doesn't have an
> > iolock parameter anymore; was this not generated off of xfs for-next?
> 
> Sorry. It was generated against 4.18-rc8. I'll respin patch against xfs
> for-next.

I think it's just a matter of taking the old patch and changing
"did_unlock" to "retry", right?  If so, I'll just change that and be
done with this one. :)

--D

> > 
> > --D
> > 
> >>  {
> >>  	struct page		*page;
> >>  
> >> @@ -746,9 +744,10 @@ xfs_break_dax_layouts(
> >>  	if (!page)
> >>  		return 0;
> >>  
> >> +	*retry = true;
> >>  	return ___wait_var_event(&page->_refcount,
> >>  			atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
> >> -			0, 0, xfs_wait_dax_page(inode, did_unlock));
> >> +			0, 0, xfs_wait_dax_page(inode));
> >>  }
> >>  
> >>  int
> >>
> >> --
> >> To unsubscribe from this list: send the line "unsubscribe linux-xfs" in
> >> the body of a message to majordomo@vger.kernel.org
> >> More majordomo info at  http://vger.kernel.org/majordomo-info.html
Dave Jiang Aug. 10, 2018, 4:05 p.m. UTC | #4
On 08/10/2018 09:02 AM, Darrick J. Wong wrote:
> On Fri, Aug 10, 2018 at 08:54:00AM -0700, Dave Jiang wrote:
>>
>>
>> On 08/10/2018 08:48 AM, Darrick J. Wong wrote:
>>> On Wed, Aug 08, 2018 at 10:31:40AM -0700, Dave Jiang wrote:
>>>> This patch is the duplicate of ross's fix for ext4 for xfs.
>>>>
>>>> If the refcount of a page is lowered between the time that it is returned
>>>> by dax_busy_page() and when the refcount is again checked in
>>>> xfs_break_layouts() => ___wait_var_event(), the waiting function
>>>> xfs_wait_dax_page() will never be called.  This means that
>>>> xfs_break_layouts() will still have 'retry' set to false, so we'll stop
>>>> looping and never check the refcount of other pages in this inode.
>>>>
>>>> Instead, always continue looping as long as dax_layout_busy_page() gives us
>>>> a page which it found with an elevated refcount.
>>>>
>>>> Signed-off-by: Dave Jiang <dave.jiang@intel.com>
>>>> Reviewed-by: Jan Kara <jack@suse.cz>
>>>> ---
>>>>
>>>> Sorry resend, forgot to add Jan's reviewed-by.
>>>>
>>>> v2:
>>>> - Rename parameter from did_unlock to retry (Jan)
>>>>
>>>>  fs/xfs/xfs_file.c |    9 ++++-----
>>>>  1 file changed, 4 insertions(+), 5 deletions(-)
>>>>
>>>> diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
>>>> index a3e7767a5715..cd6f0d8c4922 100644
>>>> --- a/fs/xfs/xfs_file.c
>>>> +++ b/fs/xfs/xfs_file.c
>>>> @@ -721,12 +721,10 @@ xfs_file_write_iter(
>>>>  
>>>>  static void
>>>>  xfs_wait_dax_page(
>>>> -	struct inode		*inode,
>>>> -	bool			*did_unlock)
>>>> +	struct inode		*inode)
>>>>  {
>>>>  	struct xfs_inode        *ip = XFS_I(inode);
>>>>  
>>>> -	*did_unlock = true;
>>>>  	xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
>>>>  	schedule();
>>>>  	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
>>>> @@ -736,7 +734,7 @@ static int
>>>>  xfs_break_dax_layouts(
>>>>  	struct inode		*inode,
>>>>  	uint			iolock,
>>>> -	bool			*did_unlock)
>>>> +	bool			*retry)
>>>
>>> Uhhh, this hunk doesn't apply.  xfs_break_dax_layouts doesn't have an
>>> iolock parameter anymore; was this not generated off of xfs for-next?
>>
>> Sorry. It was generated against 4.18-rc8. I'll respin patch against xfs
>> for-next.
> 
> I think it's just a matter of taking the old patch and changing
> "did_unlock" to "retry", right?  If so, I'll just change that and be
> done with this one. :)

For the conflict part yes. Thanks! :)

> 
> --D
> 
>>>
>>> --D
>>>
>>>>  {
>>>>  	struct page		*page;
>>>>  
>>>> @@ -746,9 +744,10 @@ xfs_break_dax_layouts(
>>>>  	if (!page)
>>>>  		return 0;
>>>>  
>>>> +	*retry = true;
>>>>  	return ___wait_var_event(&page->_refcount,
>>>>  			atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
>>>> -			0, 0, xfs_wait_dax_page(inode, did_unlock));
>>>> +			0, 0, xfs_wait_dax_page(inode));
>>>>  }
>>>>  
>>>>  int
>>>>
>>>> --
>>>> To unsubscribe from this list: send the line "unsubscribe linux-xfs" in
>>>> the body of a message to majordomo@vger.kernel.org
>>>> More majordomo info at  http://vger.kernel.org/majordomo-info.html
Eric Sandeen Aug. 10, 2018, 6:31 p.m. UTC | #5
On 8/8/18 12:31 PM, Dave Jiang wrote:
> This patch is the duplicate of ross's fix for ext4 for xfs.
> 
> If the refcount of a page is lowered between the time that it is returned
> by dax_busy_page() and when the refcount is again checked in
> xfs_break_layouts() => ___wait_var_event(), the waiting function
> xfs_wait_dax_page() will never be called.  This means that
> xfs_break_layouts() will still have 'retry' set to false, so we'll stop
> looping and never check the refcount of other pages in this inode.
> 
> Instead, always continue looping as long as dax_layout_busy_page() gives us
> a page which it found with an elevated refcount.

Hi Dave, does this have a testcase?  Have you seen the issue using Ross's
xfstest generic/503 or is there some other test?  Apologies if I missed
prior discussion on a testcase or race frequency...

Thanks,
-Eric

> Signed-off-by: Dave Jiang <dave.jiang@intel.com>
> Reviewed-by: Jan Kara <jack@suse.cz>
> ---
> 
> Sorry resend, forgot to add Jan's reviewed-by.
> 
> v2:
> - Rename parameter from did_unlock to retry (Jan)
> 
>  fs/xfs/xfs_file.c |    9 ++++-----
>  1 file changed, 4 insertions(+), 5 deletions(-)
> 
> diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
> index a3e7767a5715..cd6f0d8c4922 100644
> --- a/fs/xfs/xfs_file.c
> +++ b/fs/xfs/xfs_file.c
> @@ -721,12 +721,10 @@ xfs_file_write_iter(
>  
>  static void
>  xfs_wait_dax_page(
> -	struct inode		*inode,
> -	bool			*did_unlock)
> +	struct inode		*inode)
>  {
>  	struct xfs_inode        *ip = XFS_I(inode);
>  
> -	*did_unlock = true;
>  	xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
>  	schedule();
>  	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
> @@ -736,7 +734,7 @@ static int
>  xfs_break_dax_layouts(
>  	struct inode		*inode,
>  	uint			iolock,
> -	bool			*did_unlock)
> +	bool			*retry)
>  {
>  	struct page		*page;
>  
> @@ -746,9 +744,10 @@ xfs_break_dax_layouts(
>  	if (!page)
>  		return 0;
>  
> +	*retry = true;
>  	return ___wait_var_event(&page->_refcount,
>  			atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
> -			0, 0, xfs_wait_dax_page(inode, did_unlock));
> +			0, 0, xfs_wait_dax_page(inode));
>  }
>  
>  int
>
Dave Jiang Aug. 10, 2018, 7:23 p.m. UTC | #6
On 08/10/2018 11:31 AM, Eric Sandeen wrote:
> On 8/8/18 12:31 PM, Dave Jiang wrote:
>> This patch is the duplicate of ross's fix for ext4 for xfs.
>>
>> If the refcount of a page is lowered between the time that it is returned
>> by dax_busy_page() and when the refcount is again checked in
>> xfs_break_layouts() => ___wait_var_event(), the waiting function
>> xfs_wait_dax_page() will never be called.  This means that
>> xfs_break_layouts() will still have 'retry' set to false, so we'll stop
>> looping and never check the refcount of other pages in this inode.
>>
>> Instead, always continue looping as long as dax_layout_busy_page() gives us
>> a page which it found with an elevated refcount.
> 
> Hi Dave, does this have a testcase?  Have you seen the issue using Ross's
> xfstest generic/503 or is there some other test?  Apologies if I missed
> prior discussion on a testcase or race frequency...

I do not have a testcase. I know Ross replicated it on ext4. And Jan
asked to create the same fix with XFS when he reviewed Ross's fix for ext4.

> 
> Thanks,
> -Eric
> 
>> Signed-off-by: Dave Jiang <dave.jiang@intel.com>
>> Reviewed-by: Jan Kara <jack@suse.cz>
>> ---
>>
>> Sorry resend, forgot to add Jan's reviewed-by.
>>
>> v2:
>> - Rename parameter from did_unlock to retry (Jan)
>>
>>  fs/xfs/xfs_file.c |    9 ++++-----
>>  1 file changed, 4 insertions(+), 5 deletions(-)
>>
>> diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
>> index a3e7767a5715..cd6f0d8c4922 100644
>> --- a/fs/xfs/xfs_file.c
>> +++ b/fs/xfs/xfs_file.c
>> @@ -721,12 +721,10 @@ xfs_file_write_iter(
>>  
>>  static void
>>  xfs_wait_dax_page(
>> -	struct inode		*inode,
>> -	bool			*did_unlock)
>> +	struct inode		*inode)
>>  {
>>  	struct xfs_inode        *ip = XFS_I(inode);
>>  
>> -	*did_unlock = true;
>>  	xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
>>  	schedule();
>>  	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
>> @@ -736,7 +734,7 @@ static int
>>  xfs_break_dax_layouts(
>>  	struct inode		*inode,
>>  	uint			iolock,
>> -	bool			*did_unlock)
>> +	bool			*retry)
>>  {
>>  	struct page		*page;
>>  
>> @@ -746,9 +744,10 @@ xfs_break_dax_layouts(
>>  	if (!page)
>>  		return 0;
>>  
>> +	*retry = true;
>>  	return ___wait_var_event(&page->_refcount,
>>  			atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
>> -			0, 0, xfs_wait_dax_page(inode, did_unlock));
>> +			0, 0, xfs_wait_dax_page(inode));
>>  }
>>  
>>  int
>>
Ross Zwisler Aug. 10, 2018, 7:24 p.m. UTC | #7
On Fri, Aug 10, 2018 at 9:23 AM Dave Jiang <dave.jiang@intel.com> wrote:
> On 08/10/2018 11:31 AM, Eric Sandeen wrote:
> > On 8/8/18 12:31 PM, Dave Jiang wrote:
> >> This patch is the duplicate of ross's fix for ext4 for xfs.
> >>
> >> If the refcount of a page is lowered between the time that it is returned
> >> by dax_busy_page() and when the refcount is again checked in
> >> xfs_break_layouts() => ___wait_var_event(), the waiting function
> >> xfs_wait_dax_page() will never be called.  This means that
> >> xfs_break_layouts() will still have 'retry' set to false, so we'll stop
> >> looping and never check the refcount of other pages in this inode.
> >>
> >> Instead, always continue looping as long as dax_layout_busy_page() gives us
> >> a page which it found with an elevated refcount.
> >
> > Hi Dave, does this have a testcase?  Have you seen the issue using Ross's
> > xfstest generic/503 or is there some other test?  Apologies if I missed
> > prior discussion on a testcase or race frequency...
>
> I do not have a testcase. I know Ross replicated it on ext4. And Jan
> asked to create the same fix with XFS when he reviewed Ross's fix for ext4.

In my testing I couldn't get this race to hit with XFS.  I couldn't
even get a failure with generic/503 when testing XFS before Dan's
initial patches went in which added xfs_break_layouts() et al.  I
think that Dan had to manually insert timing delays to get the warning
to hit for XFS when testing his patches.

The race we're fixing happens consistently with ext4 and through code
inspection we can see that the race exists in XFS.
Eric Sandeen Aug. 10, 2018, 7:26 p.m. UTC | #8
On 8/10/18 2:24 PM, Ross Zwisler wrote:
> On Fri, Aug 10, 2018 at 9:23 AM Dave Jiang <dave.jiang@intel.com> wrote:
>> On 08/10/2018 11:31 AM, Eric Sandeen wrote:
>>> On 8/8/18 12:31 PM, Dave Jiang wrote:
>>>> This patch is the duplicate of ross's fix for ext4 for xfs.
>>>>
>>>> If the refcount of a page is lowered between the time that it is returned
>>>> by dax_busy_page() and when the refcount is again checked in
>>>> xfs_break_layouts() => ___wait_var_event(), the waiting function
>>>> xfs_wait_dax_page() will never be called.  This means that
>>>> xfs_break_layouts() will still have 'retry' set to false, so we'll stop
>>>> looping and never check the refcount of other pages in this inode.
>>>>
>>>> Instead, always continue looping as long as dax_layout_busy_page() gives us
>>>> a page which it found with an elevated refcount.
>>>
>>> Hi Dave, does this have a testcase?  Have you seen the issue using Ross's
>>> xfstest generic/503 or is there some other test?  Apologies if I missed
>>> prior discussion on a testcase or race frequency...
>>
>> I do not have a testcase. I know Ross replicated it on ext4. And Jan
>> asked to create the same fix with XFS when he reviewed Ross's fix for ext4.
> 
> In my testing I couldn't get this race to hit with XFS.  I couldn't
> even get a failure with generic/503 when testing XFS before Dan's
> initial patches went in which added xfs_break_layouts() et al.  I
> think that Dan had to manually insert timing delays to get the warning
> to hit for XFS when testing his patches.
> 
> The race we're fixing happens consistently with ext4 and through code
> inspection we can see that the race exists in XFS.

Ok, thanks for the info Dave & Ross!

-Eric
diff mbox series

Patch

diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index a3e7767a5715..cd6f0d8c4922 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -721,12 +721,10 @@  xfs_file_write_iter(
 
 static void
 xfs_wait_dax_page(
-	struct inode		*inode,
-	bool			*did_unlock)
+	struct inode		*inode)
 {
 	struct xfs_inode        *ip = XFS_I(inode);
 
-	*did_unlock = true;
 	xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
 	schedule();
 	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
@@ -736,7 +734,7 @@  static int
 xfs_break_dax_layouts(
 	struct inode		*inode,
 	uint			iolock,
-	bool			*did_unlock)
+	bool			*retry)
 {
 	struct page		*page;
 
@@ -746,9 +744,10 @@  xfs_break_dax_layouts(
 	if (!page)
 		return 0;
 
+	*retry = true;
 	return ___wait_var_event(&page->_refcount,
 			atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
-			0, 0, xfs_wait_dax_page(inode, did_unlock));
+			0, 0, xfs_wait_dax_page(inode));
 }
 
 int