diff mbox series

[2/4] ubifs: Convert ubifs_writepage to use a folio

Message ID 20230605165029.2908304-3-willy@infradead.org
State Not Applicable
Delegated to: Richard Weinberger
Headers show
Series ubifs: Convert writeback to use folios | expand

Commit Message

Matthew Wilcox (Oracle) June 5, 2023, 4:50 p.m. UTC
We still pass the page down to do_writepage(), but ubifs_writepage()
itself is now large folio safe.  It also contains far fewer hidden calls
to compound_head().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 fs/ubifs/file.c | 39 +++++++++++++++++----------------------
 1 file changed, 17 insertions(+), 22 deletions(-)

Comments

Zhihao Cheng June 7, 2023, 2:48 p.m. UTC | #1
在 2023/6/6 0:50, Matthew Wilcox (Oracle) 写道:
> We still pass the page down to do_writepage(), but ubifs_writepage()
> itself is now large folio safe.  It also contains far fewer hidden calls
> to compound_head().
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>   fs/ubifs/file.c | 39 +++++++++++++++++----------------------
>   1 file changed, 17 insertions(+), 22 deletions(-)
> 
> diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
> index 8bb4cb9d528f..1c7a99c36906 100644
> --- a/fs/ubifs/file.c
> +++ b/fs/ubifs/file.c
> @@ -1006,21 +1006,18 @@ static int do_writepage(struct page *page, int len)
>   static int ubifs_writepage(struct folio *folio, struct writeback_control *wbc,
>   		void *data)
>   {
> -	struct page *page = &folio->page;
> -	struct inode *inode = page->mapping->host;
> +	struct inode *inode = folio->mapping->host;
>   	struct ubifs_info *c = inode->i_sb->s_fs_info;
>   	struct ubifs_inode *ui = ubifs_inode(inode);
>   	loff_t i_size =  i_size_read(inode), synced_i_size;
> -	pgoff_t end_index = i_size >> PAGE_SHIFT;
> -	int err, len = i_size & (PAGE_SIZE - 1);
> -	void *kaddr;
> +	int err, len = folio_size(folio);
>   
>   	dbg_gen("ino %lu, pg %lu, pg flags %#lx",
> -		inode->i_ino, page->index, page->flags);
> -	ubifs_assert(c, PagePrivate(page));
> +		inode->i_ino, folio->index, folio->flags);
> +	ubifs_assert(c, folio->private != NULL);
>   
> -	/* Is the page fully outside @i_size? (truncate in progress) */
> -	if (page->index > end_index || (page->index == end_index && !len)) {
> +	/* Is the folio fully outside @i_size? (truncate in progress) */
> +	if (folio_pos(folio) >= i_size) {
>   		err = 0;
>   		goto out_unlock;
>   	}
> @@ -1029,9 +1026,9 @@ static int ubifs_writepage(struct folio *folio, struct writeback_control *wbc,
>   	synced_i_size = ui->synced_i_size;
>   	spin_unlock(&ui->ui_lock);
>   
> -	/* Is the page fully inside @i_size? */
> -	if (page->index < end_index) {
> -		if (page->index >= synced_i_size >> PAGE_SHIFT) {
> +	/* Is the folio fully inside i_size? */
> +	if (folio_pos(folio) + len < i_size) {

if (folio_pos(folio) + len <= i_size) ?

> +		if (folio_pos(folio) >= synced_i_size) {
>   			err = inode->i_sb->s_op->write_inode(inode, NULL);
>   			if (err)
>   				goto out_redirty;
> @@ -1044,20 +1041,18 @@ static int ubifs_writepage(struct folio *folio, struct writeback_control *wbc,
>   			 * with this.
>   			 */
>   		}
> -		return do_writepage(page, PAGE_SIZE);
> +		return do_writepage(&folio->page, len);
>   	}
>   
>   	/*
> -	 * The page straddles @i_size. It must be zeroed out on each and every
> +	 * The folio straddles @i_size. It must be zeroed out on each and every
>   	 * writepage invocation because it may be mmapped. "A file is mapped
>   	 * in multiples of the page size. For a file that is not a multiple of
>   	 * the page size, the remaining memory is zeroed when mapped, and
>   	 * writes to that region are not written out to the file."
>   	 */
> -	kaddr = kmap_atomic(page);
> -	memset(kaddr + len, 0, PAGE_SIZE - len);
> -	flush_dcache_page(page);
> -	kunmap_atomic(kaddr);
> +	folio_zero_segment(folio, offset_in_folio(folio, i_size), len);
> +	len = offset_in_folio(folio, i_size);
>   
>   	if (i_size > synced_i_size) {
>   		err = inode->i_sb->s_op->write_inode(inode, NULL);
> @@ -1065,16 +1060,16 @@ static int ubifs_writepage(struct folio *folio, struct writeback_control *wbc,
>   			goto out_redirty;
>   	}
>   
> -	return do_writepage(page, len);
> +	return do_writepage(&folio->page, len);
>   out_redirty:
>   	/*
> -	 * redirty_page_for_writepage() won't call ubifs_dirty_inode() because
> +	 * folio_redirty_for_writepage() won't call ubifs_dirty_inode() because
>   	 * it passes I_DIRTY_PAGES flag while calling __mark_inode_dirty(), so
>   	 * there is no need to do space budget for dirty inode.
>   	 */
> -	redirty_page_for_writepage(wbc, page);
> +	folio_redirty_for_writepage(wbc, folio);
>   out_unlock:
> -	unlock_page(page);
> +	folio_unlock(folio);
>   	return err;
>   }
>   
>
diff mbox series

Patch

diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 8bb4cb9d528f..1c7a99c36906 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1006,21 +1006,18 @@  static int do_writepage(struct page *page, int len)
 static int ubifs_writepage(struct folio *folio, struct writeback_control *wbc,
 		void *data)
 {
-	struct page *page = &folio->page;
-	struct inode *inode = page->mapping->host;
+	struct inode *inode = folio->mapping->host;
 	struct ubifs_info *c = inode->i_sb->s_fs_info;
 	struct ubifs_inode *ui = ubifs_inode(inode);
 	loff_t i_size =  i_size_read(inode), synced_i_size;
-	pgoff_t end_index = i_size >> PAGE_SHIFT;
-	int err, len = i_size & (PAGE_SIZE - 1);
-	void *kaddr;
+	int err, len = folio_size(folio);
 
 	dbg_gen("ino %lu, pg %lu, pg flags %#lx",
-		inode->i_ino, page->index, page->flags);
-	ubifs_assert(c, PagePrivate(page));
+		inode->i_ino, folio->index, folio->flags);
+	ubifs_assert(c, folio->private != NULL);
 
-	/* Is the page fully outside @i_size? (truncate in progress) */
-	if (page->index > end_index || (page->index == end_index && !len)) {
+	/* Is the folio fully outside @i_size? (truncate in progress) */
+	if (folio_pos(folio) >= i_size) {
 		err = 0;
 		goto out_unlock;
 	}
@@ -1029,9 +1026,9 @@  static int ubifs_writepage(struct folio *folio, struct writeback_control *wbc,
 	synced_i_size = ui->synced_i_size;
 	spin_unlock(&ui->ui_lock);
 
-	/* Is the page fully inside @i_size? */
-	if (page->index < end_index) {
-		if (page->index >= synced_i_size >> PAGE_SHIFT) {
+	/* Is the folio fully inside i_size? */
+	if (folio_pos(folio) + len < i_size) {
+		if (folio_pos(folio) >= synced_i_size) {
 			err = inode->i_sb->s_op->write_inode(inode, NULL);
 			if (err)
 				goto out_redirty;
@@ -1044,20 +1041,18 @@  static int ubifs_writepage(struct folio *folio, struct writeback_control *wbc,
 			 * with this.
 			 */
 		}
-		return do_writepage(page, PAGE_SIZE);
+		return do_writepage(&folio->page, len);
 	}
 
 	/*
-	 * The page straddles @i_size. It must be zeroed out on each and every
+	 * The folio straddles @i_size. It must be zeroed out on each and every
 	 * writepage invocation because it may be mmapped. "A file is mapped
 	 * in multiples of the page size. For a file that is not a multiple of
 	 * the page size, the remaining memory is zeroed when mapped, and
 	 * writes to that region are not written out to the file."
 	 */
-	kaddr = kmap_atomic(page);
-	memset(kaddr + len, 0, PAGE_SIZE - len);
-	flush_dcache_page(page);
-	kunmap_atomic(kaddr);
+	folio_zero_segment(folio, offset_in_folio(folio, i_size), len);
+	len = offset_in_folio(folio, i_size);
 
 	if (i_size > synced_i_size) {
 		err = inode->i_sb->s_op->write_inode(inode, NULL);
@@ -1065,16 +1060,16 @@  static int ubifs_writepage(struct folio *folio, struct writeback_control *wbc,
 			goto out_redirty;
 	}
 
-	return do_writepage(page, len);
+	return do_writepage(&folio->page, len);
 out_redirty:
 	/*
-	 * redirty_page_for_writepage() won't call ubifs_dirty_inode() because
+	 * folio_redirty_for_writepage() won't call ubifs_dirty_inode() because
 	 * it passes I_DIRTY_PAGES flag while calling __mark_inode_dirty(), so
 	 * there is no need to do space budget for dirty inode.
 	 */
-	redirty_page_for_writepage(wbc, page);
+	folio_redirty_for_writepage(wbc, folio);
 out_unlock:
-	unlock_page(page);
+	folio_unlock(folio);
 	return err;
 }