diff mbox series

[v5,3/9] iomap: Rename page_done handler to put_folio

Message ID 20221231150919.659533-4-agruenba@redhat.com
State Not Applicable
Headers show
Series Turn iomap_page_ops into iomap_folio_ops | expand

Commit Message

Andreas Gruenbacher Dec. 31, 2022, 3:09 p.m. UTC
The ->page_done() handler in struct iomap_page_ops is now somewhat
misnamed in that it mainly deals with unlocking and putting a folio, so
rename it to ->put_folio().

Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
---
 fs/gfs2/bmap.c         |  4 ++--
 fs/iomap/buffered-io.c |  4 ++--
 include/linux/iomap.h  | 10 +++++-----
 3 files changed, 9 insertions(+), 9 deletions(-)

Comments

Darrick J. Wong Jan. 4, 2023, 5:37 p.m. UTC | #1
On Sat, Dec 31, 2022 at 04:09:13PM +0100, Andreas Gruenbacher wrote:
> The ->page_done() handler in struct iomap_page_ops is now somewhat
> misnamed in that it mainly deals with unlocking and putting a folio, so
> rename it to ->put_folio().
> 
> Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
> ---
>  fs/gfs2/bmap.c         |  4 ++--
>  fs/iomap/buffered-io.c |  4 ++--
>  include/linux/iomap.h  | 10 +++++-----
>  3 files changed, 9 insertions(+), 9 deletions(-)
> 
> diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
> index 46206286ad42..0c041459677b 100644
> --- a/fs/gfs2/bmap.c
> +++ b/fs/gfs2/bmap.c
> @@ -967,7 +967,7 @@ static int gfs2_iomap_page_prepare(struct inode *inode, loff_t pos,
>  	return gfs2_trans_begin(sdp, RES_DINODE + blocks, 0);
>  }
>  
> -static void gfs2_iomap_page_done(struct inode *inode, loff_t pos,
> +static void gfs2_iomap_put_folio(struct inode *inode, loff_t pos,
>  				 unsigned copied, struct folio *folio)
>  {
>  	struct gfs2_trans *tr = current->journal_info;
> @@ -994,7 +994,7 @@ static void gfs2_iomap_page_done(struct inode *inode, loff_t pos,
>  
>  static const struct iomap_page_ops gfs2_iomap_page_ops = {
>  	.page_prepare = gfs2_iomap_page_prepare,
> -	.page_done = gfs2_iomap_page_done,
> +	.put_folio = gfs2_iomap_put_folio,
>  };
>  
>  static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
> diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
> index e13d5694e299..2a9bab4f3c79 100644
> --- a/fs/iomap/buffered-io.c
> +++ b/fs/iomap/buffered-io.c
> @@ -580,8 +580,8 @@ static void iomap_put_folio(struct iomap_iter *iter, loff_t pos, size_t ret,
>  {
>  	const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
>  
> -	if (page_ops && page_ops->page_done) {
> -		page_ops->page_done(iter->inode, pos, ret, folio);
> +	if (page_ops && page_ops->put_folio) {
> +		page_ops->put_folio(iter->inode, pos, ret, folio);
>  	} else if (folio) {
>  		folio_unlock(folio);
>  		folio_put(folio);
> diff --git a/include/linux/iomap.h b/include/linux/iomap.h
> index 743e2a909162..10ec36f373f4 100644
> --- a/include/linux/iomap.h
> +++ b/include/linux/iomap.h
> @@ -126,18 +126,18 @@ static inline bool iomap_inline_data_valid(const struct iomap *iomap)
>  
>  /*
>   * When a filesystem sets page_ops in an iomap mapping it returns, page_prepare
> - * and page_done will be called for each page written to.  This only applies to
> + * and put_folio will be called for each page written to.  This only applies to

"...for each folio written to."

With that fixed,
Reviewed-by: Darrick J. Wong <djwong@kernel.org>

--D


>   * buffered writes as unbuffered writes will not typically have pages
>   * associated with them.
>   *
> - * When page_prepare succeeds, page_done will always be called to do any
> - * cleanup work necessary.  In that page_done call, @folio will be NULL if the
> - * associated folio could not be obtained.  When folio is not NULL, page_done
> + * When page_prepare succeeds, put_folio will always be called to do any
> + * cleanup work necessary.  In that put_folio call, @folio will be NULL if the
> + * associated folio could not be obtained.  When folio is not NULL, put_folio
>   * is responsible for unlocking and putting the folio.
>   */
>  struct iomap_page_ops {
>  	int (*page_prepare)(struct inode *inode, loff_t pos, unsigned len);
> -	void (*page_done)(struct inode *inode, loff_t pos, unsigned copied,
> +	void (*put_folio)(struct inode *inode, loff_t pos, unsigned copied,
>  			struct folio *folio);
>  
>  	/*
> -- 
> 2.38.1
>
Andreas Grünbacher Jan. 4, 2023, 6:51 p.m. UTC | #2
Am Mi., 4. Jan. 2023 um 18:47 Uhr schrieb Darrick J. Wong <djwong@kernel.org>:
>
> On Sat, Dec 31, 2022 at 04:09:13PM +0100, Andreas Gruenbacher wrote:
> > The ->page_done() handler in struct iomap_page_ops is now somewhat
> > misnamed in that it mainly deals with unlocking and putting a folio, so
> > rename it to ->put_folio().
> >
> > Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
> > ---
> >  fs/gfs2/bmap.c         |  4 ++--
> >  fs/iomap/buffered-io.c |  4 ++--
> >  include/linux/iomap.h  | 10 +++++-----
> >  3 files changed, 9 insertions(+), 9 deletions(-)
> >
> > diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
> > index 46206286ad42..0c041459677b 100644
> > --- a/fs/gfs2/bmap.c
> > +++ b/fs/gfs2/bmap.c
> > @@ -967,7 +967,7 @@ static int gfs2_iomap_page_prepare(struct inode *inode, loff_t pos,
> >       return gfs2_trans_begin(sdp, RES_DINODE + blocks, 0);
> >  }
> >
> > -static void gfs2_iomap_page_done(struct inode *inode, loff_t pos,
> > +static void gfs2_iomap_put_folio(struct inode *inode, loff_t pos,
> >                                unsigned copied, struct folio *folio)
> >  {
> >       struct gfs2_trans *tr = current->journal_info;
> > @@ -994,7 +994,7 @@ static void gfs2_iomap_page_done(struct inode *inode, loff_t pos,
> >
> >  static const struct iomap_page_ops gfs2_iomap_page_ops = {
> >       .page_prepare = gfs2_iomap_page_prepare,
> > -     .page_done = gfs2_iomap_page_done,
> > +     .put_folio = gfs2_iomap_put_folio,
> >  };
> >
> >  static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
> > diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
> > index e13d5694e299..2a9bab4f3c79 100644
> > --- a/fs/iomap/buffered-io.c
> > +++ b/fs/iomap/buffered-io.c
> > @@ -580,8 +580,8 @@ static void iomap_put_folio(struct iomap_iter *iter, loff_t pos, size_t ret,
> >  {
> >       const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
> >
> > -     if (page_ops && page_ops->page_done) {
> > -             page_ops->page_done(iter->inode, pos, ret, folio);
> > +     if (page_ops && page_ops->put_folio) {
> > +             page_ops->put_folio(iter->inode, pos, ret, folio);
> >       } else if (folio) {
> >               folio_unlock(folio);
> >               folio_put(folio);
> > diff --git a/include/linux/iomap.h b/include/linux/iomap.h
> > index 743e2a909162..10ec36f373f4 100644
> > --- a/include/linux/iomap.h
> > +++ b/include/linux/iomap.h
> > @@ -126,18 +126,18 @@ static inline bool iomap_inline_data_valid(const struct iomap *iomap)
> >
> >  /*
> >   * When a filesystem sets page_ops in an iomap mapping it returns, page_prepare
> > - * and page_done will be called for each page written to.  This only applies to
> > + * and put_folio will be called for each page written to.  This only applies to
>
> "...for each folio written to."

Ah, yes.

> With that fixed,
> Reviewed-by: Darrick J. Wong <djwong@kernel.org>
>
> --D
>
>
> >   * buffered writes as unbuffered writes will not typically have pages

And here, it should be "folios" as well I'd say.

Thanks,
Andreas

> >   * associated with them.
> >   *
> > - * When page_prepare succeeds, page_done will always be called to do any
> > - * cleanup work necessary.  In that page_done call, @folio will be NULL if the
> > - * associated folio could not be obtained.  When folio is not NULL, page_done
> > + * When page_prepare succeeds, put_folio will always be called to do any
> > + * cleanup work necessary.  In that put_folio call, @folio will be NULL if the
> > + * associated folio could not be obtained.  When folio is not NULL, put_folio
> >   * is responsible for unlocking and putting the folio.
> >   */
> >  struct iomap_page_ops {
> >       int (*page_prepare)(struct inode *inode, loff_t pos, unsigned len);
> > -     void (*page_done)(struct inode *inode, loff_t pos, unsigned copied,
> > +     void (*put_folio)(struct inode *inode, loff_t pos, unsigned copied,
> >                       struct folio *folio);
> >
> >       /*
> > --
> > 2.38.1
> >
Christoph Hellwig Jan. 8, 2023, 5:26 p.m. UTC | #3
Looks good:

Reviewed-by: Christoph Hellwig <hch@lst.de>
diff mbox series

Patch

diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 46206286ad42..0c041459677b 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -967,7 +967,7 @@  static int gfs2_iomap_page_prepare(struct inode *inode, loff_t pos,
 	return gfs2_trans_begin(sdp, RES_DINODE + blocks, 0);
 }
 
-static void gfs2_iomap_page_done(struct inode *inode, loff_t pos,
+static void gfs2_iomap_put_folio(struct inode *inode, loff_t pos,
 				 unsigned copied, struct folio *folio)
 {
 	struct gfs2_trans *tr = current->journal_info;
@@ -994,7 +994,7 @@  static void gfs2_iomap_page_done(struct inode *inode, loff_t pos,
 
 static const struct iomap_page_ops gfs2_iomap_page_ops = {
 	.page_prepare = gfs2_iomap_page_prepare,
-	.page_done = gfs2_iomap_page_done,
+	.put_folio = gfs2_iomap_put_folio,
 };
 
 static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index e13d5694e299..2a9bab4f3c79 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -580,8 +580,8 @@  static void iomap_put_folio(struct iomap_iter *iter, loff_t pos, size_t ret,
 {
 	const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
 
-	if (page_ops && page_ops->page_done) {
-		page_ops->page_done(iter->inode, pos, ret, folio);
+	if (page_ops && page_ops->put_folio) {
+		page_ops->put_folio(iter->inode, pos, ret, folio);
 	} else if (folio) {
 		folio_unlock(folio);
 		folio_put(folio);
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 743e2a909162..10ec36f373f4 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -126,18 +126,18 @@  static inline bool iomap_inline_data_valid(const struct iomap *iomap)
 
 /*
  * When a filesystem sets page_ops in an iomap mapping it returns, page_prepare
- * and page_done will be called for each page written to.  This only applies to
+ * and put_folio will be called for each page written to.  This only applies to
  * buffered writes as unbuffered writes will not typically have pages
  * associated with them.
  *
- * When page_prepare succeeds, page_done will always be called to do any
- * cleanup work necessary.  In that page_done call, @folio will be NULL if the
- * associated folio could not be obtained.  When folio is not NULL, page_done
+ * When page_prepare succeeds, put_folio will always be called to do any
+ * cleanup work necessary.  In that put_folio call, @folio will be NULL if the
+ * associated folio could not be obtained.  When folio is not NULL, put_folio
  * is responsible for unlocking and putting the folio.
  */
 struct iomap_page_ops {
 	int (*page_prepare)(struct inode *inode, loff_t pos, unsigned len);
-	void (*page_done)(struct inode *inode, loff_t pos, unsigned copied,
+	void (*put_folio)(struct inode *inode, loff_t pos, unsigned copied,
 			struct folio *folio);
 
 	/*