diff mbox

[RFC,1/2] bdi: Create a flag to indicate that a backing device needs stable page writes

Message ID 20121027013524.GA19591@blackbox.djwong.org
State Superseded, archived
Headers show

Commit Message

Darrick Wong Oct. 27, 2012, 1:35 a.m. UTC
This creates BDI_CAP_STABLE_WRITES, which indicates that a device requires
stable page writes.  It also plumbs in a sysfs attribute so that admins can
check the device status.

Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
---

 block/blk-integrity.c       |    8 ++++++++
 include/linux/backing-dev.h |    6 ++++++
 mm/backing-dev.c            |   11 +++++++++++
 3 files changed, 25 insertions(+)

--
To unsubscribe from this list: send the line "unsubscribe linux-ext4" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Comments

Jan Kara Oct. 29, 2012, 6:13 p.m. UTC | #1
On Fri 26-10-12 18:35:24, Darrick J. Wong wrote:
> This creates BDI_CAP_STABLE_WRITES, which indicates that a device requires
> stable page writes.  It also plumbs in a sysfs attribute so that admins can
> check the device status.
> 
> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
  I guess Jens Axboe <axboe@kernel.dk> would be the best target for this
patch (so that he can merge it). The patch looks OK to me. You can add:
  Reviewed-by: Jan Kara <jack@suse.cz>

								Honza

> ---
> 
>  block/blk-integrity.c       |    8 ++++++++
>  include/linux/backing-dev.h |    6 ++++++
>  mm/backing-dev.c            |   11 +++++++++++
>  3 files changed, 25 insertions(+)
> 
> diff --git a/block/blk-integrity.c b/block/blk-integrity.c
> index da2a818..d05d7b3 100644
> --- a/block/blk-integrity.c
> +++ b/block/blk-integrity.c
> @@ -384,6 +384,7 @@ EXPORT_SYMBOL(blk_integrity_is_initialized);
>  int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
>  {
>  	struct blk_integrity *bi;
> +	struct backing_dev_info *bdi;
>  
>  	BUG_ON(disk == NULL);
>  
> @@ -420,6 +421,9 @@ int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
>  	} else
>  		bi->name = bi_unsupported_name;
>  
> +	bdi = &disk->queue->backing_dev_info;
> +	bdi->capabilities |= BDI_CAP_STABLE_WRITES;
> +
>  	return 0;
>  }
>  EXPORT_SYMBOL(blk_integrity_register);
> @@ -434,10 +438,14 @@ EXPORT_SYMBOL(blk_integrity_register);
>  void blk_integrity_unregister(struct gendisk *disk)
>  {
>  	struct blk_integrity *bi;
> +	struct backing_dev_info *bdi;
>  
>  	if (!disk || !disk->integrity)
>  		return;
>  
> +	bdi = &disk->queue->backing_dev_info;
> +	bdi->capabilities &= ~BDI_CAP_STABLE_WRITES;
> +
>  	bi = disk->integrity;
>  
>  	kobject_uevent(&bi->kobj, KOBJ_REMOVE);
> diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
> index 2a9a9ab..b82a8bb 100644
> --- a/include/linux/backing-dev.h
> +++ b/include/linux/backing-dev.h
> @@ -253,6 +253,7 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
>  #define BDI_CAP_EXEC_MAP	0x00000040
>  #define BDI_CAP_NO_ACCT_WB	0x00000080
>  #define BDI_CAP_SWAP_BACKED	0x00000100
> +#define BDI_CAP_STABLE_WRITES	0x00000200
>  
>  #define BDI_CAP_VMFLAGS \
>  	(BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP)
> @@ -307,6 +308,11 @@ long wait_iff_congested(struct zone *zone, int sync, long timeout);
>  int pdflush_proc_obsolete(struct ctl_table *table, int write,
>  		void __user *buffer, size_t *lenp, loff_t *ppos);
>  
> +static inline bool bdi_cap_stable_writes(struct backing_dev_info *bdi)
> +{
> +	return bdi->capabilities & BDI_CAP_STABLE_WRITES;
> +}
> +
>  static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
>  {
>  	return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
> diff --git a/mm/backing-dev.c b/mm/backing-dev.c
> index d3ca2b3..a2f4c08 100644
> --- a/mm/backing-dev.c
> +++ b/mm/backing-dev.c
> @@ -221,12 +221,23 @@ static ssize_t max_ratio_store(struct device *dev,
>  }
>  BDI_SHOW(max_ratio, bdi->max_ratio)
>  
> +static ssize_t stable_page_writes_show(struct device *dev,
> +	struct device_attribute *attr, char *page)
> +{
> +	struct backing_dev_info *bdi = dev_get_drvdata(dev);
> +
> +	return snprintf(page, PAGE_SIZE-1, "%d\n",
> +			bdi->capabilities & BDI_CAP_STABLE_WRITES ? 1 : 0);
> +}
> +
>  #define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store)
> +#define __ATTR_RO(attr) __ATTR(attr, 0444, attr##_show, NULL)
>  
>  static struct device_attribute bdi_dev_attrs[] = {
>  	__ATTR_RW(read_ahead_kb),
>  	__ATTR_RW(min_ratio),
>  	__ATTR_RW(max_ratio),
> +	__ATTR_RO(stable_page_writes),
>  	__ATTR_NULL,
>  };
>  
> --
> To unsubscribe from this list: send the line "unsubscribe linux-ext4" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
Jan Kara Oct. 29, 2012, 6:30 p.m. UTC | #2
On Mon 29-10-12 19:13:58, Jan Kara wrote:
> On Fri 26-10-12 18:35:24, Darrick J. Wong wrote:
> > This creates BDI_CAP_STABLE_WRITES, which indicates that a device requires
> > stable page writes.  It also plumbs in a sysfs attribute so that admins can
> > check the device status.
> > 
> > Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
>   I guess Jens Axboe <axboe@kernel.dk> would be the best target for this
> patch (so that he can merge it). The patch looks OK to me. You can add:
>   Reviewed-by: Jan Kara <jack@suse.cz>
  One more thing popped up in my mind: What about NFS, Ceph or md RAID5?
These could (at least theoretically) care about stable writes as well. I'm
not sure if they really started to use them but it would be good to at
least let them know.

								Honza

> > ---
> > 
> >  block/blk-integrity.c       |    8 ++++++++
> >  include/linux/backing-dev.h |    6 ++++++
> >  mm/backing-dev.c            |   11 +++++++++++
> >  3 files changed, 25 insertions(+)
> > 
> > diff --git a/block/blk-integrity.c b/block/blk-integrity.c
> > index da2a818..d05d7b3 100644
> > --- a/block/blk-integrity.c
> > +++ b/block/blk-integrity.c
> > @@ -384,6 +384,7 @@ EXPORT_SYMBOL(blk_integrity_is_initialized);
> >  int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
> >  {
> >  	struct blk_integrity *bi;
> > +	struct backing_dev_info *bdi;
> >  
> >  	BUG_ON(disk == NULL);
> >  
> > @@ -420,6 +421,9 @@ int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
> >  	} else
> >  		bi->name = bi_unsupported_name;
> >  
> > +	bdi = &disk->queue->backing_dev_info;
> > +	bdi->capabilities |= BDI_CAP_STABLE_WRITES;
> > +
> >  	return 0;
> >  }
> >  EXPORT_SYMBOL(blk_integrity_register);
> > @@ -434,10 +438,14 @@ EXPORT_SYMBOL(blk_integrity_register);
> >  void blk_integrity_unregister(struct gendisk *disk)
> >  {
> >  	struct blk_integrity *bi;
> > +	struct backing_dev_info *bdi;
> >  
> >  	if (!disk || !disk->integrity)
> >  		return;
> >  
> > +	bdi = &disk->queue->backing_dev_info;
> > +	bdi->capabilities &= ~BDI_CAP_STABLE_WRITES;
> > +
> >  	bi = disk->integrity;
> >  
> >  	kobject_uevent(&bi->kobj, KOBJ_REMOVE);
> > diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
> > index 2a9a9ab..b82a8bb 100644
> > --- a/include/linux/backing-dev.h
> > +++ b/include/linux/backing-dev.h
> > @@ -253,6 +253,7 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
> >  #define BDI_CAP_EXEC_MAP	0x00000040
> >  #define BDI_CAP_NO_ACCT_WB	0x00000080
> >  #define BDI_CAP_SWAP_BACKED	0x00000100
> > +#define BDI_CAP_STABLE_WRITES	0x00000200
> >  
> >  #define BDI_CAP_VMFLAGS \
> >  	(BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP)
> > @@ -307,6 +308,11 @@ long wait_iff_congested(struct zone *zone, int sync, long timeout);
> >  int pdflush_proc_obsolete(struct ctl_table *table, int write,
> >  		void __user *buffer, size_t *lenp, loff_t *ppos);
> >  
> > +static inline bool bdi_cap_stable_writes(struct backing_dev_info *bdi)
> > +{
> > +	return bdi->capabilities & BDI_CAP_STABLE_WRITES;
> > +}
> > +
> >  static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
> >  {
> >  	return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
> > diff --git a/mm/backing-dev.c b/mm/backing-dev.c
> > index d3ca2b3..a2f4c08 100644
> > --- a/mm/backing-dev.c
> > +++ b/mm/backing-dev.c
> > @@ -221,12 +221,23 @@ static ssize_t max_ratio_store(struct device *dev,
> >  }
> >  BDI_SHOW(max_ratio, bdi->max_ratio)
> >  
> > +static ssize_t stable_page_writes_show(struct device *dev,
> > +	struct device_attribute *attr, char *page)
> > +{
> > +	struct backing_dev_info *bdi = dev_get_drvdata(dev);
> > +
> > +	return snprintf(page, PAGE_SIZE-1, "%d\n",
> > +			bdi->capabilities & BDI_CAP_STABLE_WRITES ? 1 : 0);
> > +}
> > +
> >  #define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store)
> > +#define __ATTR_RO(attr) __ATTR(attr, 0444, attr##_show, NULL)
> >  
> >  static struct device_attribute bdi_dev_attrs[] = {
> >  	__ATTR_RW(read_ahead_kb),
> >  	__ATTR_RW(min_ratio),
> >  	__ATTR_RW(max_ratio),
> > +	__ATTR_RO(stable_page_writes),
> >  	__ATTR_NULL,
> >  };
> >  
> > --
> > To unsubscribe from this list: send the line "unsubscribe linux-ext4" in
> > the body of a message to majordomo@vger.kernel.org
> > More majordomo info at  http://vger.kernel.org/majordomo-info.html
> -- 
> Jan Kara <jack@suse.cz>
> SUSE Labs, CR
> --
> To unsubscribe from this list: send the line "unsubscribe linux-ext4" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
NeilBrown Oct. 29, 2012, 11:48 p.m. UTC | #3
On Mon, 29 Oct 2012 19:30:51 +0100 Jan Kara <jack@suse.cz> wrote:

> On Mon 29-10-12 19:13:58, Jan Kara wrote:
> > On Fri 26-10-12 18:35:24, Darrick J. Wong wrote:
> > > This creates BDI_CAP_STABLE_WRITES, which indicates that a device requires
> > > stable page writes.  It also plumbs in a sysfs attribute so that admins can
> > > check the device status.
> > > 
> > > Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
> >   I guess Jens Axboe <axboe@kernel.dk> would be the best target for this
> > patch (so that he can merge it). The patch looks OK to me. You can add:
> >   Reviewed-by: Jan Kara <jack@suse.cz>
>   One more thing popped up in my mind: What about NFS, Ceph or md RAID5?
> These could (at least theoretically) care about stable writes as well. I'm
> not sure if they really started to use them but it would be good to at
> least let them know.
> 

What exactly are the semantics of BDI_CAP_STABLE_WRITES ?

If I set it for md/RAID5, do I get a cast-iron guarantee that no byte in any
page submitted for write will ever change until after I call bio_endio()?

If so, is this true for all filesystems? - I would expect a bigger patch would
be needed for that.

If not, what exactly are the semantics?

Thanks,
NeilBrown
Jan Kara Oct. 30, 2012, 12:10 a.m. UTC | #4
On Tue 30-10-12 10:48:37, NeilBrown wrote:
> On Mon, 29 Oct 2012 19:30:51 +0100 Jan Kara <jack@suse.cz> wrote:
> 
> > On Mon 29-10-12 19:13:58, Jan Kara wrote:
> > > On Fri 26-10-12 18:35:24, Darrick J. Wong wrote:
> > > > This creates BDI_CAP_STABLE_WRITES, which indicates that a device requires
> > > > stable page writes.  It also plumbs in a sysfs attribute so that admins can
> > > > check the device status.
> > > > 
> > > > Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
> > >   I guess Jens Axboe <axboe@kernel.dk> would be the best target for this
> > > patch (so that he can merge it). The patch looks OK to me. You can add:
> > >   Reviewed-by: Jan Kara <jack@suse.cz>
> >   One more thing popped up in my mind: What about NFS, Ceph or md RAID5?
> > These could (at least theoretically) care about stable writes as well. I'm
> > not sure if they really started to use them but it would be good to at
> > least let them know.
> > 
> 
> What exactly are the semantics of BDI_CAP_STABLE_WRITES ?
> 
> If I set it for md/RAID5, do I get a cast-iron guarantee that no byte in any
> page submitted for write will ever change until after I call bio_endio()?
  Yes.

> If so, is this true for all filesystems? - I would expect a bigger patch would
> be needed for that.
  Actually the code is in kernel for quite some time already. The problem
is it is always enabled causing unnecessary performance issues for some
workloads. So these patches try to be more selective in when the code gets
enabled.

Regarding "all filesystems" question: If we update filemap_page_mkwrite()
to call wait_on_page_writeback() then it should be for all filesystems.

								Honza
--
To unsubscribe from this list: send the line "unsubscribe linux-ext4" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
NeilBrown Oct. 30, 2012, 12:34 a.m. UTC | #5
On Tue, 30 Oct 2012 01:10:08 +0100 Jan Kara <jack@suse.cz> wrote:

> On Tue 30-10-12 10:48:37, NeilBrown wrote:
> > On Mon, 29 Oct 2012 19:30:51 +0100 Jan Kara <jack@suse.cz> wrote:
> > 
> > > On Mon 29-10-12 19:13:58, Jan Kara wrote:
> > > > On Fri 26-10-12 18:35:24, Darrick J. Wong wrote:
> > > > > This creates BDI_CAP_STABLE_WRITES, which indicates that a device requires
> > > > > stable page writes.  It also plumbs in a sysfs attribute so that admins can
> > > > > check the device status.
> > > > > 
> > > > > Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
> > > >   I guess Jens Axboe <axboe@kernel.dk> would be the best target for this
> > > > patch (so that he can merge it). The patch looks OK to me. You can add:
> > > >   Reviewed-by: Jan Kara <jack@suse.cz>
> > >   One more thing popped up in my mind: What about NFS, Ceph or md RAID5?
> > > These could (at least theoretically) care about stable writes as well. I'm
> > > not sure if they really started to use them but it would be good to at
> > > least let them know.
> > > 
> > 
> > What exactly are the semantics of BDI_CAP_STABLE_WRITES ?
> > 
> > If I set it for md/RAID5, do I get a cast-iron guarantee that no byte in any
> > page submitted for write will ever change until after I call bio_endio()?
>   Yes.
> 
> > If so, is this true for all filesystems? - I would expect a bigger patch would
> > be needed for that.
>   Actually the code is in kernel for quite some time already. The problem
> is it is always enabled causing unnecessary performance issues for some
> workloads. So these patches try to be more selective in when the code gets
> enabled.
> 
> Regarding "all filesystems" question: If we update filemap_page_mkwrite()
> to call wait_on_page_writeback() then it should be for all filesystems.

Cool.  I didn't realise it had progressed that far.

I guess it is time to look at the possibility of removing the
'copy-into-cache' step for full-page, well-aligned bi_iovecs.

I assume this applies to swap-out as well ??  It has been a minor source of
frustration that when you swap-out to RAID1, you can occasionally get
different data on the two devices because memory changed between the two DMA
events.

NeilBrown
Martin K. Petersen Oct. 30, 2012, 4:10 a.m. UTC | #6
>>>>> "Darrick" == Darrick J Wong <darrick.wong@oracle.com> writes:

Darrick> This creates BDI_CAP_STABLE_WRITES, which indicates that a
Darrick> device requires stable page writes.  It also plumbs in a sysfs
Darrick> attribute so that admins can check the device status.

Might be nice to make the sysfs knob tweakable. Also, don't forget to
add a suitable blurb to Documentation/ABI/.

Feel free to add my Acked-by:
NeilBrown Oct. 30, 2012, 4:48 a.m. UTC | #7
On Tue, 30 Oct 2012 00:10:22 -0400 "Martin K. Petersen"
<martin.petersen@oracle.com> wrote:

> >>>>> "Darrick" == Darrick J Wong <darrick.wong@oracle.com> writes:
> 
> Darrick> This creates BDI_CAP_STABLE_WRITES, which indicates that a
> Darrick> device requires stable page writes.  It also plumbs in a sysfs
> Darrick> attribute so that admins can check the device status.
> 
> Might be nice to make the sysfs knob tweakable. Also, don't forget to
> add a suitable blurb to Documentation/ABI/.

 It isn't at all clear to me that having the sysfs knob 'tweakable' is a good
 idea.
 From the md/raid5 perspective, I would want to know for certain whether the
 pages in a give bio are guaranteed not to change, or if they might.
 I could set the BDI_CAP_STABLE_WRITES and believe they will never change, or
 test the BDI_CAP_STABLE_WRITES and let that tell me if they might change or
 not.  But if the bit can be changed at any moment, then it can never be
 trusted and so becomes worthless to me.

 At the very least it should only be possible to change it when there is no
 IO in flight.

NeilBrown


> 
> Feel free to add my Acked-by:
>
Martin K. Petersen Oct. 30, 2012, 12:19 p.m. UTC | #8
>>>>> "Neil" == NeilBrown  <neilb@suse.de> writes:

Neil,

>> Might be nice to make the sysfs knob tweakable. Also, don't forget to
>> add a suitable blurb to Documentation/ABI/.

Neil>  It isn't at all clear to me that having the sysfs knob
Neil>  'tweakable' is a good idea.  From the md/raid5 perspective, I
Neil>  would want to know for certain whether the pages in a give bio
Neil>  are guaranteed not to change, or if they might.  I could set the
Neil>  BDI_CAP_STABLE_WRITES and believe they will never change, or test
Neil>  the BDI_CAP_STABLE_WRITES and let that tell me if they might
Neil>  change or not.  But if the bit can be changed at any moment, then
Neil>  it can never be trusted and so becomes worthless to me.

I was mostly interested in being able to turn it on for devices that
haven't explicitly done so. I agree that turning it off can be
problematic.
Jan Kara Oct. 30, 2012, 1:38 p.m. UTC | #9
On Tue 30-10-12 11:34:41, NeilBrown wrote:
> On Tue, 30 Oct 2012 01:10:08 +0100 Jan Kara <jack@suse.cz> wrote:
> 
> > On Tue 30-10-12 10:48:37, NeilBrown wrote:
> > > On Mon, 29 Oct 2012 19:30:51 +0100 Jan Kara <jack@suse.cz> wrote:
> > > 
> > > > On Mon 29-10-12 19:13:58, Jan Kara wrote:
> > > > > On Fri 26-10-12 18:35:24, Darrick J. Wong wrote:
> > > > > > This creates BDI_CAP_STABLE_WRITES, which indicates that a device requires
> > > > > > stable page writes.  It also plumbs in a sysfs attribute so that admins can
> > > > > > check the device status.
> > > > > > 
> > > > > > Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
> > > > >   I guess Jens Axboe <axboe@kernel.dk> would be the best target for this
> > > > > patch (so that he can merge it). The patch looks OK to me. You can add:
> > > > >   Reviewed-by: Jan Kara <jack@suse.cz>
> > > >   One more thing popped up in my mind: What about NFS, Ceph or md RAID5?
> > > > These could (at least theoretically) care about stable writes as well. I'm
> > > > not sure if they really started to use them but it would be good to at
> > > > least let them know.
> > > > 
> > > 
> > > What exactly are the semantics of BDI_CAP_STABLE_WRITES ?
> > > 
> > > If I set it for md/RAID5, do I get a cast-iron guarantee that no byte in any
> > > page submitted for write will ever change until after I call bio_endio()?
> >   Yes.
> > 
> > > If so, is this true for all filesystems? - I would expect a bigger patch would
> > > be needed for that.
> >   Actually the code is in kernel for quite some time already. The problem
> > is it is always enabled causing unnecessary performance issues for some
> > workloads. So these patches try to be more selective in when the code gets
> > enabled.
> > 
> > Regarding "all filesystems" question: If we update filemap_page_mkwrite()
> > to call wait_on_page_writeback() then it should be for all filesystems.
> 
> Cool.  I didn't realise it had progressed that far.
> 
> I guess it is time to look at the possibility of removing the
> 'copy-into-cache' step for full-page, well-aligned bi_iovecs.
> 
> I assume this applies to swap-out as well ??  It has been a minor source of
> frustration that when you swap-out to RAID1, you can occasionally get
> different data on the two devices because memory changed between the two DMA
> events.
  Really? I'm somewhat surprised. I was under the impression that when a
page is added to a swap cache it is unmapped so there should be no
modification to it possible while it is being swapped out. But maybe it
could get mapped back and modified after we unlock the page and submit the
bio. So mm/memory.c:do_swap_page() might need wait_on_page_writeback() as
well. But I'm not an expert on swap code. I guess I'll experiment with this
a bit. Thanks for a pointer.

								Honza
--
To unsubscribe from this list: send the line "unsubscribe linux-ext4" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Darrick Wong Oct. 30, 2012, 8:14 p.m. UTC | #10
On Tue, Oct 30, 2012 at 08:19:41AM -0400, Martin K. Petersen wrote:
> >>>>> "Neil" == NeilBrown  <neilb@suse.de> writes:
> 
> Neil,
> 
> >> Might be nice to make the sysfs knob tweakable. Also, don't forget to
> >> add a suitable blurb to Documentation/ABI/.
> 
> Neil>  It isn't at all clear to me that having the sysfs knob
> Neil>  'tweakable' is a good idea.  From the md/raid5 perspective, I
> Neil>  would want to know for certain whether the pages in a give bio
> Neil>  are guaranteed not to change, or if they might.  I could set the
> Neil>  BDI_CAP_STABLE_WRITES and believe they will never change, or test
> Neil>  the BDI_CAP_STABLE_WRITES and let that tell me if they might
> Neil>  change or not.  But if the bit can be changed at any moment, then
> Neil>  it can never be trusted and so becomes worthless to me.
> 
> I was mostly interested in being able to turn it on for devices that
> haven't explicitly done so. I agree that turning it off can be
> problematic.

I'm ok with having a tunable that can turn it on, but atm I can't really think
of a convincing reason to let people turn it /off/.  If people yell loud enough
I'll add it, but I'd rather not have to distinguish between "on because user
set it on" vs "on because hw needs it".

It'd be nice if the presence BDI_CAP_STABLE_WRITES meant that all filesystems
would wait on page writes.  Hrm, guess I'll see about adding that to the patch
set.  Though ISTR that at least the vfat and ext2 maintainers weren't
interested the last time I asked.

--D
> 
> -- 
> Martin K. Petersen	Oracle Linux Engineering
> --
> To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
--
To unsubscribe from this list: send the line "unsubscribe linux-ext4" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
NeilBrown Oct. 30, 2012, 9:49 p.m. UTC | #11
On Tue, 30 Oct 2012 14:38:25 +0100 Jan Kara <jack@suse.cz> wrote:

> On Tue 30-10-12 11:34:41, NeilBrown wrote:
> > On Tue, 30 Oct 2012 01:10:08 +0100 Jan Kara <jack@suse.cz> wrote:
> > 
> > > On Tue 30-10-12 10:48:37, NeilBrown wrote:
> > > > On Mon, 29 Oct 2012 19:30:51 +0100 Jan Kara <jack@suse.cz> wrote:
> > > > 
> > > > > On Mon 29-10-12 19:13:58, Jan Kara wrote:
> > > > > > On Fri 26-10-12 18:35:24, Darrick J. Wong wrote:
> > > > > > > This creates BDI_CAP_STABLE_WRITES, which indicates that a device requires
> > > > > > > stable page writes.  It also plumbs in a sysfs attribute so that admins can
> > > > > > > check the device status.
> > > > > > > 
> > > > > > > Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
> > > > > >   I guess Jens Axboe <axboe@kernel.dk> would be the best target for this
> > > > > > patch (so that he can merge it). The patch looks OK to me. You can add:
> > > > > >   Reviewed-by: Jan Kara <jack@suse.cz>
> > > > >   One more thing popped up in my mind: What about NFS, Ceph or md RAID5?
> > > > > These could (at least theoretically) care about stable writes as well. I'm
> > > > > not sure if they really started to use them but it would be good to at
> > > > > least let them know.
> > > > > 
> > > > 
> > > > What exactly are the semantics of BDI_CAP_STABLE_WRITES ?
> > > > 
> > > > If I set it for md/RAID5, do I get a cast-iron guarantee that no byte in any
> > > > page submitted for write will ever change until after I call bio_endio()?
> > >   Yes.
> > > 
> > > > If so, is this true for all filesystems? - I would expect a bigger patch would
> > > > be needed for that.
> > >   Actually the code is in kernel for quite some time already. The problem
> > > is it is always enabled causing unnecessary performance issues for some
> > > workloads. So these patches try to be more selective in when the code gets
> > > enabled.
> > > 
> > > Regarding "all filesystems" question: If we update filemap_page_mkwrite()
> > > to call wait_on_page_writeback() then it should be for all filesystems.
> > 
> > Cool.  I didn't realise it had progressed that far.
> > 
> > I guess it is time to look at the possibility of removing the
> > 'copy-into-cache' step for full-page, well-aligned bi_iovecs.
> > 
> > I assume this applies to swap-out as well ??  It has been a minor source of
> > frustration that when you swap-out to RAID1, you can occasionally get
> > different data on the two devices because memory changed between the two DMA
> > events.
>   Really? I'm somewhat surprised. I was under the impression that when a
> page is added to a swap cache it is unmapped so there should be no
> modification to it possible while it is being swapped out. But maybe it
> could get mapped back and modified after we unlock the page and submit the
> bio. So mm/memory.c:do_swap_page() might need wait_on_page_writeback() as
> well. But I'm not an expert on swap code. I guess I'll experiment with this
> a bit. Thanks for a pointer.
> 
> 								Honza

Thanks for looking into it.  I should mention that it was some years ago that
this occasional inconsistency in RAID1 was reported and that I concluded that
it as due to swap (though I don't recall how deeply I examined the code).
It could well be different now.  I never bothered pursuing it because I don't
think that behaviour is really wrong, just mildly annoying.

Thanks,
NeilBrown
NeilBrown Oct. 30, 2012, 10:14 p.m. UTC | #12
On Tue, 30 Oct 2012 13:14:24 -0700 "Darrick J. Wong"
<darrick.wong@oracle.com> wrote:

> On Tue, Oct 30, 2012 at 08:19:41AM -0400, Martin K. Petersen wrote:
> > >>>>> "Neil" == NeilBrown  <neilb@suse.de> writes:
> > 
> > Neil,
> > 
> > >> Might be nice to make the sysfs knob tweakable. Also, don't forget to
> > >> add a suitable blurb to Documentation/ABI/.
> > 
> > Neil>  It isn't at all clear to me that having the sysfs knob
> > Neil>  'tweakable' is a good idea.  From the md/raid5 perspective, I
> > Neil>  would want to know for certain whether the pages in a give bio
> > Neil>  are guaranteed not to change, or if they might.  I could set the
> > Neil>  BDI_CAP_STABLE_WRITES and believe they will never change, or test
> > Neil>  the BDI_CAP_STABLE_WRITES and let that tell me if they might
> > Neil>  change or not.  But if the bit can be changed at any moment, then
> > Neil>  it can never be trusted and so becomes worthless to me.
> > 
> > I was mostly interested in being able to turn it on for devices that
> > haven't explicitly done so. I agree that turning it off can be
> > problematic.
> 
> I'm ok with having a tunable that can turn it on, but atm I can't really think
> of a convincing reason to let people turn it /off/.  If people yell loud enough
> I'll add it, but I'd rather not have to distinguish between "on because user
> set it on" vs "on because hw needs it".
> 
> It'd be nice if the presence BDI_CAP_STABLE_WRITES meant that all filesystems
> would wait on page writes.  Hrm, guess I'll see about adding that to the patch
> set.  Though ISTR that at least the vfat and ext2 maintainers weren't
> interested the last time I asked.

I'm still a little foggy on the exact semantics and use-cases for this flag.
I'll try to piece together the bits that I know and ask you to tell me what
I've missed or what I've got wrong.

 Stable writes are valuable when the filesystem or device driver calculates
 some sort of 'redundancy' information based on the page in memory that is
 about to be written.
 This could be:
      integrity data that will be sent with the page to the storage device
      parity over a number of pages that will be written to a separate device
       (i.e. RAID5/RAID6).
      MAC or similar checksum that will be sent with the data over the network
        and will be validated by the receiving device, which can then ACK or
        NAK depending on correctness.

 These could be implemented  in the filesystem or in the device driver, so
 either should be able to request stable writes.  If neither request stable
 writes, then the cost of stable writes should be avoided.

 For the device driver (or network transport), not getting stable writes when
 requested might be a performance issue, or it might be a correctness issue.
 e.g. if an unstable write causes a MAC to be wrong, the network layer can
 simply arrange a re-transmit.  If an unstable write causes RAID5 parity to
 be wrong, that unstable write could cause data corruption.

 For the filesystem, the requirement to provide stable writes could just be a
 performance cost (a few extra waits) or it could require significant
 re-working of the code (you say vfat and ext2 aren't really comfortable with
 supporting them).

 Finally there is the VFS/VM which needs to provide support for stable
 writes.  It already does - your flag seems to just allow clients of the
 VFS/VM to indicate whether stable writes are required.

 So there seem to be several cases:

 1/ The filesystem wants to always use stable writes.  It just sets the flag,
    and the device will see stable writes whether it cares or not.  This would
    happen if the filesystem is adding integrity data.
 2/ The device would prefer stable writes if possible.  This would apply to
    iscsi (??) as it needs to add some sort of checksum before putting the
    data on the network
 3/ The device absolutely requires stable writes, or needs to provide
    stability itself by taking a copy (md/RAID5 does this).  So it needs to
    know whether each write is stable, or it cannot take advantage of stable
    writes.


 So I see a need for 2 flags here.
 The first one is set by the device or transport to say "I would prefer
 stable writes if possible".
 The second is set by the filesystem, either because it has its own needs, or
 because it sees the first flag set on the device and chooses to honour it.
 The VFS/VM would act based on this second flag, and devices like md/RAID5
 would set the first flag, and assume writes are stable if the second flag is
 also set.

 This implies that setting that second flag must be handled synchronously by
 the filesystem, so that the device doesn't see the flag set until the
 filesystem has committed to honouring it.  That seems to make a mount (or
 remount) option the safest way to set it.

 Comments?

NeilBrown
Boaz Harrosh Oct. 30, 2012, 10:40 p.m. UTC | #13
On 10/26/2012 06:35 PM, Darrick J. Wong wrote:
> This creates BDI_CAP_STABLE_WRITES, which indicates that a device requires
> stable page writes.  It also plumbs in a sysfs attribute so that admins can
> check the device status.
> 
> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
> ---
<>
> @@ -434,10 +438,14 @@ EXPORT_SYMBOL(blk_integrity_register);
>  void blk_integrity_unregister(struct gendisk *disk)
>  {
>  	struct blk_integrity *bi;
> +	struct backing_dev_info *bdi;
>  
>  	if (!disk || !disk->integrity)
>  		return;
>  
> +	bdi = &disk->queue->backing_dev_info;
> +	bdi->capabilities &= ~BDI_CAP_STABLE_WRITES;
> +

Once this patchset is in we'll need to add one such code site to
iscsi in the case data_integrity is turned on.

You are welcome to add such a patch to your patchset, (I can show
you where) but it will take a little while before I have the time to
tests such a change.

Currently if data_integrity is turned on iscsi reverts to copy-full
networking, as an hackish attempt to narrow the window of pages changing.
With such a patch in place, we can remove the hack.

But please note that your changes are only half the picture. Because not
all filesystems support stable pages, so it is not like if I turn this flag
on I will magically be protected. A more complete picture is if the FS could
communicate back to iscsi if it actually will provide stable pages, for it
to start trusting stable pages again.

But since iscsi's protection is just an hack, and an admin that mounts
a none supporting FS can always just turn off data_integrity for that
particular LUN, then I'd say it's fine. Even with this half of the
patchset iscsi should be fixed to use it.

But for the likes of future dm-raid5 that wants to be copy-less, you will
need in the future, a way for the FS to communicate back if it will actually
support stable-pages or the block device needs to revert to old tricks.

Thanks
Boaz

>  	bi = disk->integrity;
>  
>  	kobject_uevent(&bi->kobj, KOBJ_REMOVE);
--
To unsubscribe from this list: send the line "unsubscribe linux-ext4" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Boaz Harrosh Oct. 30, 2012, 11:58 p.m. UTC | #14
On 10/30/2012 03:14 PM, NeilBrown wrote:
> On Tue, 30 Oct 2012 13:14:24 -0700 "Darrick J. Wong"
> <darrick.wong@oracle.com> wrote:
> 
>> On Tue, Oct 30, 2012 at 08:19:41AM -0400, Martin K. Petersen wrote:
>>>>>>>> "Neil" == NeilBrown  <neilb@suse.de> writes:
>>>
<>
>  So I see a need for 2 flags here.

Yes that was my thought too. We need two flags. The FS should communicate
it's capabilities as well.

>  The first one is set by the device or transport to say "I would prefer
>  stable writes if possible".
>  The second is set by the filesystem, either because it has its own needs, or
>  because it sees the first flag set on the device and chooses to honour it.
>  The VFS/VM would act based on this second flag, and devices like md/RAID5
>  would set the first flag, and assume writes are stable if the second flag is
>  also set.
> 
>  This implies that setting that second flag must be handled synchronously by
>  the filesystem, so that the device doesn't see the flag set until the
>  filesystem has committed to honouring it.  That seems to make a mount (or
>  remount) option the safest way to set it.
> 

I think I do not like any mount option or any other tuneable. With the
block device stating it's needs and the FS confirming on it's capability
then I do not see how reverting that decision by admin can be any good.
Any overrides by an admin would then just be a bug.

>  Comments?
> 
> NeilBrown
> 

Thanks
Boaz
--
To unsubscribe from this list: send the line "unsubscribe linux-ext4" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Darrick Wong Oct. 31, 2012, 8:56 a.m. UTC | #15
On Wed, Oct 31, 2012 at 09:14:41AM +1100, NeilBrown wrote:
> On Tue, 30 Oct 2012 13:14:24 -0700 "Darrick J. Wong"
> <darrick.wong@oracle.com> wrote:
> 
> > On Tue, Oct 30, 2012 at 08:19:41AM -0400, Martin K. Petersen wrote:
> > > >>>>> "Neil" == NeilBrown  <neilb@suse.de> writes:
> > > 
> > > Neil,
> > > 
> > > >> Might be nice to make the sysfs knob tweakable. Also, don't forget to
> > > >> add a suitable blurb to Documentation/ABI/.
> > > 
> > > Neil>  It isn't at all clear to me that having the sysfs knob
> > > Neil>  'tweakable' is a good idea.  From the md/raid5 perspective, I
> > > Neil>  would want to know for certain whether the pages in a give bio
> > > Neil>  are guaranteed not to change, or if they might.  I could set the
> > > Neil>  BDI_CAP_STABLE_WRITES and believe they will never change, or test
> > > Neil>  the BDI_CAP_STABLE_WRITES and let that tell me if they might
> > > Neil>  change or not.  But if the bit can be changed at any moment, then
> > > Neil>  it can never be trusted and so becomes worthless to me.
> > > 
> > > I was mostly interested in being able to turn it on for devices that
> > > haven't explicitly done so. I agree that turning it off can be
> > > problematic.
> > 
> > I'm ok with having a tunable that can turn it on, but atm I can't really think
> > of a convincing reason to let people turn it /off/.  If people yell loud enough
> > I'll add it, but I'd rather not have to distinguish between "on because user
> > set it on" vs "on because hw needs it".
> > 
> > It'd be nice if the presence BDI_CAP_STABLE_WRITES meant that all filesystems
> > would wait on page writes.  Hrm, guess I'll see about adding that to the patch
> > set.  Though ISTR that at least the vfat and ext2 maintainers weren't
> > interested the last time I asked.
> 
> I'm still a little foggy on the exact semantics and use-cases for this flag.
> I'll try to piece together the bits that I know and ask you to tell me what
> I've missed or what I've got wrong.
> 
>  Stable writes are valuable when the filesystem or device driver calculates
>  some sort of 'redundancy' information based on the page in memory that is
>  about to be written.
>  This could be:
>       integrity data that will be sent with the page to the storage device
>       parity over a number of pages that will be written to a separate device
>        (i.e. RAID5/RAID6).
>       MAC or similar checksum that will be sent with the data over the network
>         and will be validated by the receiving device, which can then ACK or
>         NAK depending on correctness.
> 
>  These could be implemented  in the filesystem or in the device driver, so
>  either should be able to request stable writes.  If neither request stable
>  writes, then the cost of stable writes should be avoided.

Most of the changes are in the VM; the only place where filesystem-specific
bits are needed are for things like ext4 which override page_mkwrite.  I'm not
sure what you mean by the filesystem requesting stable writes; unless I'm
missing something (it's late), it's only the storage device that has any sort
of needs.  The filesystem is free either to accomodate the need for stable
writes, or ignore that and deal with the IO errors.

>  For the device driver (or network transport), not getting stable writes when
>  requested might be a performance issue, or it might be a correctness issue.
>  e.g. if an unstable write causes a MAC to be wrong, the network layer can
>  simply arrange a re-transmit.  If an unstable write causes RAID5 parity to
>  be wrong, that unstable write could cause data corruption.
> 
>  For the filesystem, the requirement to provide stable writes could just be a
>  performance cost (a few extra waits) or it could require significant
>  re-working of the code (you say vfat and ext2 aren't really comfortable with
>  supporting them).

I vaguely recall that the reason for skipping vfat was that the maintainer
didn't like the idea of paying the wait costs even on a disk that didn't
require it.  I think we're addressing that.  As for ext2 there wasn't much
comment, though I think Ted or someone mentioned that since it was "stable"
code, it wasn't worth fixing.  In any case, patching filemap_page_mkwrite to
have a write_on_page_writeback seems to have fixed a number of filesystems
(hfs, reiser, ext2...) with little fuss.

>  Finally there is the VFS/VM which needs to provide support for stable
>  writes.  It already does - your flag seems to just allow clients of the
>  VFS/VM to indicate whether stable writes are required.
> 
>  So there seem to be several cases:
> 
>  1/ The filesystem wants to always use stable writes.  It just sets the flag,
>     and the device will see stable writes whether it cares or not.  This would
>     happen if the filesystem is adding integrity data.
>  2/ The device would prefer stable writes if possible.  This would apply to
>     iscsi (??) as it needs to add some sort of checksum before putting the
>     data on the network
>  3/ The device absolutely requires stable writes, or needs to provide
>     stability itself by taking a copy (md/RAID5 does this).  So it needs to
>     know whether each write is stable, or it cannot take advantage of stable
>     writes.
> 
> 
>  So I see a need for 2 flags here.
>  The first one is set by the device or transport to say "I would prefer
>  stable writes if possible".

Yep, that seems to correspond with what the patch provides right now.

>  The second is set by the filesystem, either because it has its own needs, or
>  because it sees the first flag set on the device and chooses to honour it.
>  The VFS/VM would act based on this second flag, and devices like md/RAID5
>  would set the first flag, and assume writes are stable if the second flag is
>  also set.

This is a trickier piece... I guess this means that the bdi has to keep track
of how many clients are using it (it looks like multiple fses on a partitioned
disk share the same bdi) and of those clients, which ones claim to support
stable page writes.  If all clients do, then the disk can assume that all
writes headed to it will be stable.  If even one doesn't, then I guess the
device falls back to whatever strategy it used before (the raid5 copying).

I guess we also need a way to pass these flags through md/dm if appropriate.

>  This implies that setting that second flag must be handled synchronously by
>  the filesystem, so that the device doesn't see the flag set until the
>  filesystem has committed to honouring it.  That seems to make a mount (or
>  remount) option the safest way to set it.

Yeah, I was thinking that fill_super and kill_sb might be good sites for
whatever calls we need to make back to the bdi to make the stable page write
declarations.

--D

>  Comments?
> 
> NeilBrown


--
To unsubscribe from this list: send the line "unsubscribe linux-ext4" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Jan Kara Oct. 31, 2012, 11:56 a.m. UTC | #16
On Wed 31-10-12 01:56:04, Darrick J. Wong wrote:
> On Wed, Oct 31, 2012 at 09:14:41AM +1100, NeilBrown wrote:
> > On Tue, 30 Oct 2012 13:14:24 -0700 "Darrick J. Wong"
> > <darrick.wong@oracle.com> wrote:
> > 
> > > On Tue, Oct 30, 2012 at 08:19:41AM -0400, Martin K. Petersen wrote:
> > > > >>>>> "Neil" == NeilBrown  <neilb@suse.de> writes:
> > > > 
> > > > Neil,
> > > > 
> > > > >> Might be nice to make the sysfs knob tweakable. Also, don't forget to
> > > > >> add a suitable blurb to Documentation/ABI/.
> > > > 
> > > > Neil>  It isn't at all clear to me that having the sysfs knob
> > > > Neil>  'tweakable' is a good idea.  From the md/raid5 perspective, I
> > > > Neil>  would want to know for certain whether the pages in a give bio
> > > > Neil>  are guaranteed not to change, or if they might.  I could set the
> > > > Neil>  BDI_CAP_STABLE_WRITES and believe they will never change, or test
> > > > Neil>  the BDI_CAP_STABLE_WRITES and let that tell me if they might
> > > > Neil>  change or not.  But if the bit can be changed at any moment, then
> > > > Neil>  it can never be trusted and so becomes worthless to me.
> > > > 
> > > > I was mostly interested in being able to turn it on for devices that
> > > > haven't explicitly done so. I agree that turning it off can be
> > > > problematic.
> > > 
> > > I'm ok with having a tunable that can turn it on, but atm I can't really think
> > > of a convincing reason to let people turn it /off/.  If people yell loud enough
> > > I'll add it, but I'd rather not have to distinguish between "on because user
> > > set it on" vs "on because hw needs it".
> > > 
> > > It'd be nice if the presence BDI_CAP_STABLE_WRITES meant that all filesystems
> > > would wait on page writes.  Hrm, guess I'll see about adding that to the patch
> > > set.  Though ISTR that at least the vfat and ext2 maintainers weren't
> > > interested the last time I asked.
> > 
> > I'm still a little foggy on the exact semantics and use-cases for this flag.
> > I'll try to piece together the bits that I know and ask you to tell me what
> > I've missed or what I've got wrong.
> > 
> >  Stable writes are valuable when the filesystem or device driver calculates
> >  some sort of 'redundancy' information based on the page in memory that is
> >  about to be written.
> >  This could be:
> >       integrity data that will be sent with the page to the storage device
> >       parity over a number of pages that will be written to a separate device
> >        (i.e. RAID5/RAID6).
> >       MAC or similar checksum that will be sent with the data over the network
> >         and will be validated by the receiving device, which can then ACK or
> >         NAK depending on correctness.
> > 
> >  These could be implemented  in the filesystem or in the device driver, so
> >  either should be able to request stable writes.  If neither request stable
> >  writes, then the cost of stable writes should be avoided.
> 
> Most of the changes are in the VM; the only place where filesystem-specific
> bits are needed are for things like ext4 which override page_mkwrite.  I'm not
> sure what you mean by the filesystem requesting stable writes; unless I'm
> missing something (it's late), it's only the storage device that has any sort
> of needs.  The filesystem is free either to accomodate the need for stable
> writes, or ignore that and deal with the IO errors.
  As it was noted somewhere else in the thread, if you compute data
checksums in the filesystem, you need stable pages as well.

> >  For the device driver (or network transport), not getting stable writes when
> >  requested might be a performance issue, or it might be a correctness issue.
> >  e.g. if an unstable write causes a MAC to be wrong, the network layer can
> >  simply arrange a re-transmit.  If an unstable write causes RAID5 parity to
> >  be wrong, that unstable write could cause data corruption.
> > 
> >  For the filesystem, the requirement to provide stable writes could just be a
> >  performance cost (a few extra waits) or it could require significant
> >  re-working of the code (you say vfat and ext2 aren't really comfortable with
> >  supporting them).
> 
> I vaguely recall that the reason for skipping vfat was that the maintainer
> didn't like the idea of paying the wait costs even on a disk that didn't
> require it.  I think we're addressing that.  As for ext2 there wasn't much
> comment, though I think Ted or someone mentioned that since it was "stable"
> code, it wasn't worth fixing.  In any case, patching filemap_page_mkwrite to
> have a write_on_page_writeback seems to have fixed a number of filesystems
> (hfs, reiser, ext2...) with little fuss.
  Yup, I sent you a patch for this a while ago. If stable pages are
conditional only for case which require them (or make a good use of them),
I have no problem with making all filesystems provide them. After changing
filemap_page_mkwrite(), there are 5 filesystems (ncpfs, ceph, cifs, ubifs,
ocfs2) which won't support stable pages. I can fix these once we agree on
the generic infrastructure.
 
> >  Finally there is the VFS/VM which needs to provide support for stable
> >  writes.  It already does - your flag seems to just allow clients of the
> >  VFS/VM to indicate whether stable writes are required.
> > 
> >  So there seem to be several cases:
> > 
> >  1/ The filesystem wants to always use stable writes.  It just sets the flag,
> >     and the device will see stable writes whether it cares or not.  This would
> >     happen if the filesystem is adding integrity data.
> >  2/ The device would prefer stable writes if possible.  This would apply to
> >     iscsi (??) as it needs to add some sort of checksum before putting the
> >     data on the network
> >  3/ The device absolutely requires stable writes, or needs to provide
> >     stability itself by taking a copy (md/RAID5 does this).  So it needs to
> >     know whether each write is stable, or it cannot take advantage of stable
> >     writes.
> > 
> > 
> >  So I see a need for 2 flags here.
> >  The first one is set by the device or transport to say "I would prefer
> >  stable writes if possible".
> 
> Yep, that seems to correspond with what the patch provides right now.
> 
> >  The second is set by the filesystem, either because it has its own needs, or
> >  because it sees the first flag set on the device and chooses to honour it.
> >  The VFS/VM would act based on this second flag, and devices like md/RAID5
> >  would set the first flag, and assume writes are stable if the second flag is
> >  also set.
> 
> This is a trickier piece... I guess this means that the bdi has to keep track
> of how many clients are using it (it looks like multiple fses on a partitioned
> disk share the same bdi) and of those clients, which ones claim to support
> stable page writes.  If all clients do, then the disk can assume that all
> writes headed to it will be stable.  If even one doesn't, then I guess the
> device falls back to whatever strategy it used before (the raid5 copying).
> 
> I guess we also need a way to pass these flags through md/dm if appropriate.
  I'd like to keep things simple. It's not hard to make all filesystems
support stable pages so let's just do that to remove one variable from the
picture. Then we are in the situation where storage can request stable
pages by setting bdi bit and filesystem will obey. Also filesystem itself can
request stable pages by setting the bit and generic functions in mm will
take that into account. No need for two flags...

You are right that we need a mechanism to push the flags from the devices
through various storage layers up into the bdi filesystem sees to make
things reliable.

								Honza
Darrick Wong Oct. 31, 2012, 7:36 p.m. UTC | #17
On Wed, Oct 31, 2012 at 12:56:14PM +0100, Jan Kara wrote:
> On Wed 31-10-12 01:56:04, Darrick J. Wong wrote:
> > On Wed, Oct 31, 2012 at 09:14:41AM +1100, NeilBrown wrote:
> > > On Tue, 30 Oct 2012 13:14:24 -0700 "Darrick J. Wong"
> > > <darrick.wong@oracle.com> wrote:
> > > 
> > > > On Tue, Oct 30, 2012 at 08:19:41AM -0400, Martin K. Petersen wrote:
> > > > > >>>>> "Neil" == NeilBrown  <neilb@suse.de> writes:
> > > > > 
> > > > > Neil,
> > > > > 
> > > > > >> Might be nice to make the sysfs knob tweakable. Also, don't forget to
> > > > > >> add a suitable blurb to Documentation/ABI/.
> > > > > 
> > > > > Neil>  It isn't at all clear to me that having the sysfs knob
> > > > > Neil>  'tweakable' is a good idea.  From the md/raid5 perspective, I
> > > > > Neil>  would want to know for certain whether the pages in a give bio
> > > > > Neil>  are guaranteed not to change, or if they might.  I could set the
> > > > > Neil>  BDI_CAP_STABLE_WRITES and believe they will never change, or test
> > > > > Neil>  the BDI_CAP_STABLE_WRITES and let that tell me if they might
> > > > > Neil>  change or not.  But if the bit can be changed at any moment, then
> > > > > Neil>  it can never be trusted and so becomes worthless to me.
> > > > > 
> > > > > I was mostly interested in being able to turn it on for devices that
> > > > > haven't explicitly done so. I agree that turning it off can be
> > > > > problematic.
> > > > 
> > > > I'm ok with having a tunable that can turn it on, but atm I can't really think
> > > > of a convincing reason to let people turn it /off/.  If people yell loud enough
> > > > I'll add it, but I'd rather not have to distinguish between "on because user
> > > > set it on" vs "on because hw needs it".
> > > > 
> > > > It'd be nice if the presence BDI_CAP_STABLE_WRITES meant that all filesystems
> > > > would wait on page writes.  Hrm, guess I'll see about adding that to the patch
> > > > set.  Though ISTR that at least the vfat and ext2 maintainers weren't
> > > > interested the last time I asked.
> > > 
> > > I'm still a little foggy on the exact semantics and use-cases for this flag.
> > > I'll try to piece together the bits that I know and ask you to tell me what
> > > I've missed or what I've got wrong.
> > > 
> > >  Stable writes are valuable when the filesystem or device driver calculates
> > >  some sort of 'redundancy' information based on the page in memory that is
> > >  about to be written.
> > >  This could be:
> > >       integrity data that will be sent with the page to the storage device
> > >       parity over a number of pages that will be written to a separate device
> > >        (i.e. RAID5/RAID6).
> > >       MAC or similar checksum that will be sent with the data over the network
> > >         and will be validated by the receiving device, which can then ACK or
> > >         NAK depending on correctness.
> > > 
> > >  These could be implemented  in the filesystem or in the device driver, so
> > >  either should be able to request stable writes.  If neither request stable
> > >  writes, then the cost of stable writes should be avoided.
> > 
> > Most of the changes are in the VM; the only place where filesystem-specific
> > bits are needed are for things like ext4 which override page_mkwrite.  I'm not
> > sure what you mean by the filesystem requesting stable writes; unless I'm
> > missing something (it's late), it's only the storage device that has any sort
> > of needs.  The filesystem is free either to accomodate the need for stable
> > writes, or ignore that and deal with the IO errors.
>   As it was noted somewhere else in the thread, if you compute data
> checksums in the filesystem, you need stable pages as well.

Which filesystems other than btrfs do this?

iirc btrfs already provides its own stable page writes by virtue of its COW
nature, though I'm not sure if that holds when nodatacow is set.

> > >  For the device driver (or network transport), not getting stable writes when
> > >  requested might be a performance issue, or it might be a correctness issue.
> > >  e.g. if an unstable write causes a MAC to be wrong, the network layer can
> > >  simply arrange a re-transmit.  If an unstable write causes RAID5 parity to
> > >  be wrong, that unstable write could cause data corruption.
> > > 
> > >  For the filesystem, the requirement to provide stable writes could just be a
> > >  performance cost (a few extra waits) or it could require significant
> > >  re-working of the code (you say vfat and ext2 aren't really comfortable with
> > >  supporting them).
> > 
> > I vaguely recall that the reason for skipping vfat was that the maintainer
> > didn't like the idea of paying the wait costs even on a disk that didn't
> > require it.  I think we're addressing that.  As for ext2 there wasn't much
> > comment, though I think Ted or someone mentioned that since it was "stable"
> > code, it wasn't worth fixing.  In any case, patching filemap_page_mkwrite to
> > have a write_on_page_writeback seems to have fixed a number of filesystems
> > (hfs, reiser, ext2...) with little fuss.
>   Yup, I sent you a patch for this a while ago. If stable pages are
> conditional only for case which require them (or make a good use of them),
> I have no problem with making all filesystems provide them. After changing
> filemap_page_mkwrite(), there are 5 filesystems (ncpfs, ceph, cifs, ubifs,
> ocfs2) which won't support stable pages. I can fix these once we agree on
> the generic infrastructure.

Yesterday I added a patch to my tree that's similar to yours. :)

Actually, I created a function so that we don't have to open-code the if and
wait pieces.

It looks like ceph, cifs, and ocfs2 won't be difficult to modify.  ncpfs seems
to use the generic vm_ops and should be covered.  That leaves ubifs, which
seems to return VM_FAULT_MINOR after unlocking the page.

> > >  Finally there is the VFS/VM which needs to provide support for stable
> > >  writes.  It already does - your flag seems to just allow clients of the
> > >  VFS/VM to indicate whether stable writes are required.
> > > 
> > >  So there seem to be several cases:
> > > 
> > >  1/ The filesystem wants to always use stable writes.  It just sets the flag,
> > >     and the device will see stable writes whether it cares or not.  This would
> > >     happen if the filesystem is adding integrity data.
> > >  2/ The device would prefer stable writes if possible.  This would apply to
> > >     iscsi (??) as it needs to add some sort of checksum before putting the
> > >     data on the network
> > >  3/ The device absolutely requires stable writes, or needs to provide
> > >     stability itself by taking a copy (md/RAID5 does this).  So it needs to
> > >     know whether each write is stable, or it cannot take advantage of stable
> > >     writes.
> > > 
> > > 
> > >  So I see a need for 2 flags here.
> > >  The first one is set by the device or transport to say "I would prefer
> > >  stable writes if possible".
> > 
> > Yep, that seems to correspond with what the patch provides right now.
> > 
> > >  The second is set by the filesystem, either because it has its own needs, or
> > >  because it sees the first flag set on the device and chooses to honour it.
> > >  The VFS/VM would act based on this second flag, and devices like md/RAID5
> > >  would set the first flag, and assume writes are stable if the second flag is
> > >  also set.
> > 
> > This is a trickier piece... I guess this means that the bdi has to keep track
> > of how many clients are using it (it looks like multiple fses on a partitioned
> > disk share the same bdi) and of those clients, which ones claim to support
> > stable page writes.  If all clients do, then the disk can assume that all
> > writes headed to it will be stable.  If even one doesn't, then I guess the
> > device falls back to whatever strategy it used before (the raid5 copying).
> > 
> > I guess we also need a way to pass these flags through md/dm if appropriate.
>   I'd like to keep things simple. It's not hard to make all filesystems
> support stable pages so let's just do that to remove one variable from the
> picture. Then we are in the situation where storage can request stable
> pages by setting bdi bit and filesystem will obey. Also filesystem itself can
> request stable pages by setting the bit and generic functions in mm will
> take that into account. No need for two flags...
> 
> You are right that we need a mechanism to push the flags from the devices
> through various storage layers up into the bdi filesystem sees to make
> things reliable.

md/dm will call blk_integrity_register, so pushing the "stable page writes
required" flag through the various layers is already taken care of.  If devices
and filesystems can both indicate that they want stable page writes,  I'll have
to keep track of however many users there are.

It does seem like less work to fix all the filesystems than to dork around with
another flag.

--D
> 
> 								Honza
> -- 
> Jan Kara <jack@suse.cz>
> SUSE Labs, CR
> --
> To unsubscribe from this list: send the line "unsubscribe linux-ext4" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
--
To unsubscribe from this list: send the line "unsubscribe linux-ext4" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Boaz Harrosh Oct. 31, 2012, 11:12 p.m. UTC | #18
On 10/31/2012 12:36 PM, Darrick J. Wong wrote:
> On Wed, Oct 31, 2012 at 12:56:14PM +0100, Jan Kara wrote:
<snip>
>> You are right that we need a mechanism to push the flags from the devices
>> through various storage layers up into the bdi filesystem sees to make
>> things reliable.
> 
> md/dm will call blk_integrity_register, so pushing the "stable page writes
> required" flag through the various layers is already taken care of.  If devices
> and filesystems can both indicate that they want stable page writes,  I'll have
> to keep track of however many users there are.
> 

Please note again the iscsi case. Say the admin defined half of an md iscsi devices
with data_integrity and half without.

For me I would like an OR. If any underline device needs "stable pages" they all
get them.

Please also provide - or how easy is to make an API - for the like of iscsi that
given a request_queue, set the BDI's "stable pages" on. Something like:

/* stable_pages can only be turned on never off */
blk_set_stable_pages(struct request_queue); 

> It does seem like less work to fix all the filesystems than to dork around with
> another flag.

Sure if that is possible, that will be perfect, then I do not need to keep
the old "unstable pages" code around at all.

Thanks for working on this
Boaz
--
To unsubscribe from this list: send the line "unsubscribe linux-ext4" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Darrick Wong Nov. 1, 2012, 5:51 a.m. UTC | #19
On Wed, Oct 31, 2012 at 04:12:53PM -0700, Boaz Harrosh wrote:
> On 10/31/2012 12:36 PM, Darrick J. Wong wrote:
> > On Wed, Oct 31, 2012 at 12:56:14PM +0100, Jan Kara wrote:
> <snip>
> >> You are right that we need a mechanism to push the flags from the devices
> >> through various storage layers up into the bdi filesystem sees to make
> >> things reliable.
> > 
> > md/dm will call blk_integrity_register, so pushing the "stable page writes
> > required" flag through the various layers is already taken care of.  If devices
> > and filesystems can both indicate that they want stable page writes,  I'll have
> > to keep track of however many users there are.
> > 
> 
> Please note again the iscsi case. Say the admin defined half of an md iscsi devices
> with data_integrity and half without.
> 
> For me I would like an OR. If any underline device needs "stable pages" they all
> get them.
> 
> Please also provide - or how easy is to make an API - for the like of iscsi that
> given a request_queue, set the BDI's "stable pages" on. Something like:
> 
> /* stable_pages can only be turned on never off */
> blk_set_stable_pages(struct request_queue); 

I'll have to test this further, but I think at least DM requires that a given
dm device's sub-devices all have the same integrity profile, which atm seems to
imply that stable writes will be turned on for everything or not at all.

Of course, all that goes out the door with iscsi since it doesn't necessarily
care about DIF/DIX but wants stable pages anyway.  It'd be useful to be able to
play around with iscsi too.  Could you point me to where in the iscsi code it
does the copy-and-checksum behavior?

--D

> > It does seem like less work to fix all the filesystems than to dork around with
> > another flag.
> 
> Sure if that is possible, that will be perfect, then I do not need to keep
> the old "unstable pages" code around at all.
> 
> Thanks for working on this
> Boaz
> --
> To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
--
To unsubscribe from this list: send the line "unsubscribe linux-ext4" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Boaz Harrosh Nov. 1, 2012, 6:25 a.m. UTC | #20
On 10/31/2012 10:51 PM, Darrick J. Wong wrote:
> 
> Of course, all that goes out the door with iscsi since it doesn't necessarily
> care about DIF/DIX but wants stable pages anyway.  It'd be useful to be able to
> play around with iscsi too.  Could you point me to where in the iscsi code it
> does the copy-and-checksum behavior?
> 

I'll send you an untested proof of concept patch tomorrow. So at least you can
see where the code is. But no guaranty that it actually works.

You will need to dig into iscsi docs to enable it in config files, look for
data_digest= or something like that.

header digest is when only the headers are check-summed and data-digest is when
all the data is check-summed.

> --D
> 

Cheers
Boaz
--
To unsubscribe from this list: send the line "unsubscribe linux-ext4" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Jan Kara Nov. 1, 2012, 8:59 a.m. UTC | #21
On Wed 31-10-12 12:36:52, Darrick J. Wong wrote:
> On Wed, Oct 31, 2012 at 12:56:14PM +0100, Jan Kara wrote:
> >   I'd like to keep things simple. It's not hard to make all filesystems
> > support stable pages so let's just do that to remove one variable from the
> > picture. Then we are in the situation where storage can request stable
> > pages by setting bdi bit and filesystem will obey. Also filesystem itself can
> > request stable pages by setting the bit and generic functions in mm will
> > take that into account. No need for two flags...
> > 
> > You are right that we need a mechanism to push the flags from the devices
> > through various storage layers up into the bdi filesystem sees to make
> > things reliable.
> 
> md/dm will call blk_integrity_register, so pushing the "stable page writes
> required" flag through the various layers is already taken care of.  If devices
> and filesystems can both indicate that they want stable page writes,  I'll have
> to keep track of however many users there are.
  Oh, right. When the device is partitioned and more filesystems are used,
it would be reasonable to clear the flag when there are no stable-page
users. Actually, thinking more about this case, two flags would make things
more transparent. One flag will be in bdi and one in superblock. When
device needs stable pages, it sets bdi flag. If filesystem needs stable pages,
it sets the sb flag. This way we don't have to track any users.

I was thinking about transferring the flag from bdi to sb on mount so that
we can check only sb flag but that actually won't work because of writes
directly to the block device (all block device inodes share one
superblock).

Thoughts?

									Honza
Boaz Harrosh Nov. 1, 2012, 5:24 p.m. UTC | #22
On 11/01/2012 01:59 AM, Jan Kara wrote:
<>
> (all block device inodes share one superblock).
> 

Really? that is not so good is it, for other obvious reasons. 
Why is it not one superblock per BDI? That would be more obvious
to me.

> Thoughts?

It's a really bad design. I think it is worth fixing. For the above
problem, as well as a much better fit with our current thread-per-bdi,
and the rest of the Kernel model. No?

> 
> 									Honza
> 

Thanks
Boaz
--
To unsubscribe from this list: send the line "unsubscribe linux-ext4" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Jan Kara Nov. 1, 2012, 10:42 p.m. UTC | #23
On Thu 01-11-12 10:24:48, Boaz Harrosh wrote:
> On 11/01/2012 01:59 AM, Jan Kara wrote:
> <>
> > (all block device inodes share one superblock).
> > 
> 
> Really? that is not so good is it, for other obvious reasons. 
> Why is it not one superblock per BDI? That would be more obvious
> to me.
> 
> > Thoughts?
> 
> It's a really bad design. I think it is worth fixing. For the above
> problem, as well as a much better fit with our current thread-per-bdi,
> and the rest of the Kernel model. No?
  So the fact that there is one superblock of virtual filesystem containing
all block device inodes is inconvenient at times (that's why we have to
have inode_to_bdi() function in fs/fs-writeback.c) but OTOH you cannot
really attach these virtual block device inodes (note that these are
different from an inode for an object say /dev/sda in the filesystem)
anywhere else and having one sb per block device would really be an
overkill.

								Honza
diff mbox

Patch

diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index da2a818..d05d7b3 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -384,6 +384,7 @@  EXPORT_SYMBOL(blk_integrity_is_initialized);
 int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
 {
 	struct blk_integrity *bi;
+	struct backing_dev_info *bdi;
 
 	BUG_ON(disk == NULL);
 
@@ -420,6 +421,9 @@  int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
 	} else
 		bi->name = bi_unsupported_name;
 
+	bdi = &disk->queue->backing_dev_info;
+	bdi->capabilities |= BDI_CAP_STABLE_WRITES;
+
 	return 0;
 }
 EXPORT_SYMBOL(blk_integrity_register);
@@ -434,10 +438,14 @@  EXPORT_SYMBOL(blk_integrity_register);
 void blk_integrity_unregister(struct gendisk *disk)
 {
 	struct blk_integrity *bi;
+	struct backing_dev_info *bdi;
 
 	if (!disk || !disk->integrity)
 		return;
 
+	bdi = &disk->queue->backing_dev_info;
+	bdi->capabilities &= ~BDI_CAP_STABLE_WRITES;
+
 	bi = disk->integrity;
 
 	kobject_uevent(&bi->kobj, KOBJ_REMOVE);
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 2a9a9ab..b82a8bb 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -253,6 +253,7 @@  int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
 #define BDI_CAP_EXEC_MAP	0x00000040
 #define BDI_CAP_NO_ACCT_WB	0x00000080
 #define BDI_CAP_SWAP_BACKED	0x00000100
+#define BDI_CAP_STABLE_WRITES	0x00000200
 
 #define BDI_CAP_VMFLAGS \
 	(BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP)
@@ -307,6 +308,11 @@  long wait_iff_congested(struct zone *zone, int sync, long timeout);
 int pdflush_proc_obsolete(struct ctl_table *table, int write,
 		void __user *buffer, size_t *lenp, loff_t *ppos);
 
+static inline bool bdi_cap_stable_writes(struct backing_dev_info *bdi)
+{
+	return bdi->capabilities & BDI_CAP_STABLE_WRITES;
+}
+
 static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
 {
 	return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index d3ca2b3..a2f4c08 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -221,12 +221,23 @@  static ssize_t max_ratio_store(struct device *dev,
 }
 BDI_SHOW(max_ratio, bdi->max_ratio)
 
+static ssize_t stable_page_writes_show(struct device *dev,
+	struct device_attribute *attr, char *page)
+{
+	struct backing_dev_info *bdi = dev_get_drvdata(dev);
+
+	return snprintf(page, PAGE_SIZE-1, "%d\n",
+			bdi->capabilities & BDI_CAP_STABLE_WRITES ? 1 : 0);
+}
+
 #define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store)
+#define __ATTR_RO(attr) __ATTR(attr, 0444, attr##_show, NULL)
 
 static struct device_attribute bdi_dev_attrs[] = {
 	__ATTR_RW(read_ahead_kb),
 	__ATTR_RW(min_ratio),
 	__ATTR_RW(max_ratio),
+	__ATTR_RO(stable_page_writes),
 	__ATTR_NULL,
 };