diff mbox series

[25/31] ext4: Convert ext4_block_write_begin() to take a folio

Message ID 20230126202415.1682629-26-willy@infradead.org
State Changes Requested
Headers show
Series Convert most of ext4 to folios | expand

Commit Message

Matthew Wilcox (Oracle) Jan. 26, 2023, 8:24 p.m. UTC
All the callers now have a folio, so pass that in and operate on folios.
Removes four calls to compound_head().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 fs/ext4/inode.c | 41 +++++++++++++++++++++--------------------
 1 file changed, 21 insertions(+), 20 deletions(-)

Comments

Ritesh Harjani (IBM) March 6, 2023, 6:51 a.m. UTC | #1
"Matthew Wilcox (Oracle)" <willy@infradead.org> writes:

> All the callers now have a folio, so pass that in and operate on folios.
> Removes four calls to compound_head().

Why do you say four? Isn't it 3 calls of PageUptodate(page) which
removes calls to compound_head()? Which one did I miss?

>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>  fs/ext4/inode.c | 41 +++++++++++++++++++++--------------------
>  1 file changed, 21 insertions(+), 20 deletions(-)
>
> diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
> index dbfc0670de75..507c7f88d737 100644
> --- a/fs/ext4/inode.c
> +++ b/fs/ext4/inode.c
> @@ -1055,12 +1055,12 @@ int do_journal_get_write_access(handle_t *handle, struct inode *inode,
>  }
>
>  #ifdef CONFIG_FS_ENCRYPTION
> -static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
> +static int ext4_block_write_begin(struct folio *folio, loff_t pos, unsigned len,
>  				  get_block_t *get_block)
>  {
>  	unsigned from = pos & (PAGE_SIZE - 1);
>  	unsigned to = from + len;
> -	struct inode *inode = page->mapping->host;
> +	struct inode *inode = folio->mapping->host;
>  	unsigned block_start, block_end;
>  	sector_t block;
>  	int err = 0;
> @@ -1070,22 +1070,24 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
>  	int nr_wait = 0;
>  	int i;
>
> -	BUG_ON(!PageLocked(page));
> +	BUG_ON(!folio_test_locked(folio));
>  	BUG_ON(from > PAGE_SIZE);
>  	BUG_ON(to > PAGE_SIZE);
>  	BUG_ON(from > to);
>
> -	if (!page_has_buffers(page))
> -		create_empty_buffers(page, blocksize, 0);
> -	head = page_buffers(page);
> +	head = folio_buffers(folio);
> +	if (!head) {
> +		create_empty_buffers(&folio->page, blocksize, 0);
> +		head = folio_buffers(folio);
> +	}
>  	bbits = ilog2(blocksize);
> -	block = (sector_t)page->index << (PAGE_SHIFT - bbits);
> +	block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
>
>  	for (bh = head, block_start = 0; bh != head || !block_start;
>  	    block++, block_start = block_end, bh = bh->b_this_page) {
>  		block_end = block_start + blocksize;
>  		if (block_end <= from || block_start >= to) {
> -			if (PageUptodate(page)) {
> +			if (folio_test_uptodate(folio)) {
>  				set_buffer_uptodate(bh);
>  			}
>  			continue;
> @@ -1098,19 +1100,20 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
>  			if (err)
>  				break;
>  			if (buffer_new(bh)) {
> -				if (PageUptodate(page)) {
> +				if (folio_test_uptodate(folio)) {
>  					clear_buffer_new(bh);
>  					set_buffer_uptodate(bh);
>  					mark_buffer_dirty(bh);
>  					continue;
>  				}
>  				if (block_end > to || block_start < from)
> -					zero_user_segments(page, to, block_end,
> -							   block_start, from);
> +					folio_zero_segments(folio, to,
> +							    block_end,
> +							    block_start, from);
>  				continue;
>  			}
>  		}
> -		if (PageUptodate(page)) {
> +		if (folio_test_uptodate(folio)) {
>  			set_buffer_uptodate(bh);
>  			continue;
>  		}
> @@ -1130,13 +1133,13 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
>  			err = -EIO;
>  	}
>  	if (unlikely(err)) {
> -		page_zero_new_buffers(page, from, to);
> +		page_zero_new_buffers(&folio->page, from, to);
>  	} else if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
>  		for (i = 0; i < nr_wait; i++) {
>  			int err2;
>
> -			err2 = fscrypt_decrypt_pagecache_blocks(page, blocksize,
> -								bh_offset(wait[i]));
> +			err2 = fscrypt_decrypt_pagecache_blocks(&folio->page,
> +						blocksize, bh_offset(wait[i]));

folio_decrypt_pagecache_blocks() takes folio as it's argument now.

Other than that it looks good to me. Please feel free to add -
Reviewed-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
Matthew Wilcox (Oracle) March 6, 2023, 8:27 a.m. UTC | #2
On Mon, Mar 06, 2023 at 12:21:48PM +0530, Ritesh Harjani wrote:
> "Matthew Wilcox (Oracle)" <willy@infradead.org> writes:
> 
> > All the callers now have a folio, so pass that in and operate on folios.
> > Removes four calls to compound_head().
> 
> Why do you say four? Isn't it 3 calls of PageUptodate(page) which
> removes calls to compound_head()? Which one did I miss?
>
> > -	BUG_ON(!PageLocked(page));
> > +	BUG_ON(!folio_test_locked(folio));

That one ;-)

> >  	} else if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
> >  		for (i = 0; i < nr_wait; i++) {
> >  			int err2;
> >
> > -			err2 = fscrypt_decrypt_pagecache_blocks(page, blocksize,
> > -								bh_offset(wait[i]));
> > +			err2 = fscrypt_decrypt_pagecache_blocks(&folio->page,
> > +						blocksize, bh_offset(wait[i]));
> 
> folio_decrypt_pagecache_blocks() takes folio as it's argument now.
> 
> Other than that it looks good to me. Please feel free to add -
> Reviewed-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>

Thanks.  I'll refresh this patchset next week.
Ritesh Harjani (IBM) March 6, 2023, 3:21 p.m. UTC | #3
Matthew Wilcox <willy@infradead.org> writes:

> On Mon, Mar 06, 2023 at 12:21:48PM +0530, Ritesh Harjani wrote:
>> "Matthew Wilcox (Oracle)" <willy@infradead.org> writes:
>>
>> > All the callers now have a folio, so pass that in and operate on folios.
>> > Removes four calls to compound_head().
>>
>> Why do you say four? Isn't it 3 calls of PageUptodate(page) which
>> removes calls to compound_head()? Which one did I miss?
>>
>> > -	BUG_ON(!PageLocked(page));
>> > +	BUG_ON(!folio_test_locked(folio));
>
> That one ;-)

__PAGEFLAG(Locked, locked, PF_NO_TAIL)

#define __PAGEFLAG(uname, lname, policy)				\
	TESTPAGEFLAG(uname, lname, policy)				\
	__SETPAGEFLAG(uname, lname, policy)				\
	__CLEARPAGEFLAG(uname, lname, policy)

#define TESTPAGEFLAG(uname, lname, policy)				\
static __always_inline bool folio_test_##lname(struct folio *folio)	\
{ return test_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); }	\
static __always_inline int Page##uname(struct page *page)		\
{ return test_bit(PG_##lname, &policy(page, 0)->flags); }

How? PageLocked(page) doesn't do any compount_head() calls no?

-ritesh

>
>> >  	} else if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
>> >  		for (i = 0; i < nr_wait; i++) {
>> >  			int err2;
>> >
>> > -			err2 = fscrypt_decrypt_pagecache_blocks(page, blocksize,
>> > -								bh_offset(wait[i]));
>> > +			err2 = fscrypt_decrypt_pagecache_blocks(&folio->page,
>> > +						blocksize, bh_offset(wait[i]));
>>
>> folio_decrypt_pagecache_blocks() takes folio as it's argument now.
>>
>> Other than that it looks good to me. Please feel free to add -
>> Reviewed-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
>
> Thanks.  I'll refresh this patchset next week.

Sure. Thanks!
Matthew Wilcox (Oracle) March 15, 2023, 4:40 a.m. UTC | #4
On Mon, Mar 06, 2023 at 08:51:45PM +0530, Ritesh Harjani wrote:
> Matthew Wilcox <willy@infradead.org> writes:
> 
> > On Mon, Mar 06, 2023 at 12:21:48PM +0530, Ritesh Harjani wrote:
> >> "Matthew Wilcox (Oracle)" <willy@infradead.org> writes:
> >>
> >> > All the callers now have a folio, so pass that in and operate on folios.
> >> > Removes four calls to compound_head().
> >>
> >> Why do you say four? Isn't it 3 calls of PageUptodate(page) which
> >> removes calls to compound_head()? Which one did I miss?
> >>
> >> > -	BUG_ON(!PageLocked(page));
> >> > +	BUG_ON(!folio_test_locked(folio));
> >
> > That one ;-)
> 
> __PAGEFLAG(Locked, locked, PF_NO_TAIL)
> 
> #define __PAGEFLAG(uname, lname, policy)				\
> 	TESTPAGEFLAG(uname, lname, policy)				\
> 	__SETPAGEFLAG(uname, lname, policy)				\
> 	__CLEARPAGEFLAG(uname, lname, policy)
> 
> #define TESTPAGEFLAG(uname, lname, policy)				\
> static __always_inline bool folio_test_##lname(struct folio *folio)	\
> { return test_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); }	\
> static __always_inline int Page##uname(struct page *page)		\
> { return test_bit(PG_##lname, &policy(page, 0)->flags); }
> 
> How? PageLocked(page) doesn't do any compount_head() calls no?

You missed one piece of the definition ...

#define PF_NO_TAIL(page, enforce) ({                                    \
                VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page);     \
                PF_POISONED_CHECK(compound_head(page)); })
Ritesh Harjani (IBM) March 15, 2023, 2:57 p.m. UTC | #5
Matthew Wilcox <willy@infradead.org> writes:

> On Mon, Mar 06, 2023 at 08:51:45PM +0530, Ritesh Harjani wrote:
>> Matthew Wilcox <willy@infradead.org> writes:
>> 
>> > On Mon, Mar 06, 2023 at 12:21:48PM +0530, Ritesh Harjani wrote:
>> >> "Matthew Wilcox (Oracle)" <willy@infradead.org> writes:
>> >>
>> >> > All the callers now have a folio, so pass that in and operate on folios.
>> >> > Removes four calls to compound_head().
>> >>
>> >> Why do you say four? Isn't it 3 calls of PageUptodate(page) which
>> >> removes calls to compound_head()? Which one did I miss?
>> >>
>> >> > -	BUG_ON(!PageLocked(page));
>> >> > +	BUG_ON(!folio_test_locked(folio));
>> >
>> > That one ;-)
>> 
>> __PAGEFLAG(Locked, locked, PF_NO_TAIL)
>> 
>> #define __PAGEFLAG(uname, lname, policy)				\
>> 	TESTPAGEFLAG(uname, lname, policy)				\
>> 	__SETPAGEFLAG(uname, lname, policy)				\
>> 	__CLEARPAGEFLAG(uname, lname, policy)
>> 
>> #define TESTPAGEFLAG(uname, lname, policy)				\
>> static __always_inline bool folio_test_##lname(struct folio *folio)	\
>> { return test_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); }	\
>> static __always_inline int Page##uname(struct page *page)		\
>> { return test_bit(PG_##lname, &policy(page, 0)->flags); }
>> 
>> How? PageLocked(page) doesn't do any compount_head() calls no?
>
> You missed one piece of the definition ...
>
> #define PF_NO_TAIL(page, enforce) ({                                    \
>                 VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page);     \
>                 PF_POISONED_CHECK(compound_head(page)); })

aah yes, right. Thanks for pointing it.

-ritesh
diff mbox series

Patch

diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index dbfc0670de75..507c7f88d737 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1055,12 +1055,12 @@  int do_journal_get_write_access(handle_t *handle, struct inode *inode,
 }
 
 #ifdef CONFIG_FS_ENCRYPTION
-static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
+static int ext4_block_write_begin(struct folio *folio, loff_t pos, unsigned len,
 				  get_block_t *get_block)
 {
 	unsigned from = pos & (PAGE_SIZE - 1);
 	unsigned to = from + len;
-	struct inode *inode = page->mapping->host;
+	struct inode *inode = folio->mapping->host;
 	unsigned block_start, block_end;
 	sector_t block;
 	int err = 0;
@@ -1070,22 +1070,24 @@  static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
 	int nr_wait = 0;
 	int i;
 
-	BUG_ON(!PageLocked(page));
+	BUG_ON(!folio_test_locked(folio));
 	BUG_ON(from > PAGE_SIZE);
 	BUG_ON(to > PAGE_SIZE);
 	BUG_ON(from > to);
 
-	if (!page_has_buffers(page))
-		create_empty_buffers(page, blocksize, 0);
-	head = page_buffers(page);
+	head = folio_buffers(folio);
+	if (!head) {
+		create_empty_buffers(&folio->page, blocksize, 0);
+		head = folio_buffers(folio);
+	}
 	bbits = ilog2(blocksize);
-	block = (sector_t)page->index << (PAGE_SHIFT - bbits);
+	block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
 
 	for (bh = head, block_start = 0; bh != head || !block_start;
 	    block++, block_start = block_end, bh = bh->b_this_page) {
 		block_end = block_start + blocksize;
 		if (block_end <= from || block_start >= to) {
-			if (PageUptodate(page)) {
+			if (folio_test_uptodate(folio)) {
 				set_buffer_uptodate(bh);
 			}
 			continue;
@@ -1098,19 +1100,20 @@  static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
 			if (err)
 				break;
 			if (buffer_new(bh)) {
-				if (PageUptodate(page)) {
+				if (folio_test_uptodate(folio)) {
 					clear_buffer_new(bh);
 					set_buffer_uptodate(bh);
 					mark_buffer_dirty(bh);
 					continue;
 				}
 				if (block_end > to || block_start < from)
-					zero_user_segments(page, to, block_end,
-							   block_start, from);
+					folio_zero_segments(folio, to,
+							    block_end,
+							    block_start, from);
 				continue;
 			}
 		}
-		if (PageUptodate(page)) {
+		if (folio_test_uptodate(folio)) {
 			set_buffer_uptodate(bh);
 			continue;
 		}
@@ -1130,13 +1133,13 @@  static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
 			err = -EIO;
 	}
 	if (unlikely(err)) {
-		page_zero_new_buffers(page, from, to);
+		page_zero_new_buffers(&folio->page, from, to);
 	} else if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
 		for (i = 0; i < nr_wait; i++) {
 			int err2;
 
-			err2 = fscrypt_decrypt_pagecache_blocks(page, blocksize,
-								bh_offset(wait[i]));
+			err2 = fscrypt_decrypt_pagecache_blocks(&folio->page,
+						blocksize, bh_offset(wait[i]));
 			if (err2) {
 				clear_buffer_uptodate(wait[i]);
 				err = err2;
@@ -1223,11 +1226,10 @@  static int ext4_write_begin(struct file *file, struct address_space *mapping,
 
 #ifdef CONFIG_FS_ENCRYPTION
 	if (ext4_should_dioread_nolock(inode))
-		ret = ext4_block_write_begin(&folio->page, pos, len,
+		ret = ext4_block_write_begin(folio, pos, len,
 					     ext4_get_block_unwritten);
 	else
-		ret = ext4_block_write_begin(&folio->page, pos, len,
-					     ext4_get_block);
+		ret = ext4_block_write_begin(folio, pos, len, ext4_get_block);
 #else
 	if (ext4_should_dioread_nolock(inode))
 		ret = __block_write_begin(&folio->page, pos, len,
@@ -3082,8 +3084,7 @@  static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
 	folio_wait_stable(folio);
 
 #ifdef CONFIG_FS_ENCRYPTION
-	ret = ext4_block_write_begin(&folio->page, pos, len,
-				     ext4_da_get_block_prep);
+	ret = ext4_block_write_begin(folio, pos, len, ext4_da_get_block_prep);
 #else
 	ret = __block_write_begin(&folio->page, pos, len, ext4_da_get_block_prep);
 #endif