diff mbox series

[06/15] ubifs: Convert ubifs_vm_page_mkwrite() to use a folio

Message ID 20240120230824.2619716-7-willy@infradead.org
State Superseded
Headers show
Series ubifs folio conversion | expand

Commit Message

Matthew Wilcox Jan. 20, 2024, 11:08 p.m. UTC
Replace six implicit calls to compound_head() with one.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 Documentation/mm/page_cache.rst | 10 +++++++++
 fs/ubifs/file.c                 | 36 ++++++++++++++++-----------------
 2 files changed, 28 insertions(+), 18 deletions(-)

Comments

Zhihao Cheng Jan. 22, 2024, 11:38 a.m. UTC | #1
在 2024/1/21 7:08, Matthew Wilcox (Oracle) 写道:
> Replace six implicit calls to compound_head() with one.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>   Documentation/mm/page_cache.rst | 10 +++++++++
>   fs/ubifs/file.c                 | 36 ++++++++++++++++-----------------
>   2 files changed, 28 insertions(+), 18 deletions(-)

Reviewed-by: Zhihao Cheng <chengzhihao1@huawei.com>

> 
> diff --git a/Documentation/mm/page_cache.rst b/Documentation/mm/page_cache.rst
> index 75eba7c431b2..138d61f869df 100644
> --- a/Documentation/mm/page_cache.rst
> +++ b/Documentation/mm/page_cache.rst
> @@ -3,3 +3,13 @@
>   ==========
>   Page Cache
>   ==========
> +
> +The page cache is the primary way that the user and the rest of the kernel
> +interact with filesystems.  It can be bypassed (e.g. with O_DIRECT),
> +but normal reads, writes and mmaps go through the page cache.
> +
> +Folios
> +======
> +
> +The folio is the unit of memory management within the page cache.
> +Operations
> diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
> index e755d0a11146..9dfedd91b576 100644
> --- a/fs/ubifs/file.c
> +++ b/fs/ubifs/file.c
> @@ -1519,14 +1519,14 @@ static bool ubifs_release_folio(struct folio *folio, gfp_t unused_gfp_flags)
>    */
>   static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf)
>   {
> -	struct page *page = vmf->page;
> +	struct folio *folio = page_folio(vmf->page);
>   	struct inode *inode = file_inode(vmf->vma->vm_file);
>   	struct ubifs_info *c = inode->i_sb->s_fs_info;
>   	struct timespec64 now = current_time(inode);
>   	struct ubifs_budget_req req = { .new_page = 1 };
>   	int err, update_time;
>   
> -	dbg_gen("ino %lu, pg %lu, i_size %lld",	inode->i_ino, page->index,
> +	dbg_gen("ino %lu, pg %lu, i_size %lld",	inode->i_ino, folio->index,
>   		i_size_read(inode));
>   	ubifs_assert(c, !c->ro_media && !c->ro_mount);
>   
> @@ -1534,17 +1534,17 @@ static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf)
>   		return VM_FAULT_SIGBUS; /* -EROFS */
>   
>   	/*
> -	 * We have not locked @page so far so we may budget for changing the
> -	 * page. Note, we cannot do this after we locked the page, because
> +	 * We have not locked @folio so far so we may budget for changing the
> +	 * folio. Note, we cannot do this after we locked the folio, because
>   	 * budgeting may cause write-back which would cause deadlock.
>   	 *
> -	 * At the moment we do not know whether the page is dirty or not, so we
> -	 * assume that it is not and budget for a new page. We could look at
> +	 * At the moment we do not know whether the folio is dirty or not, so we
> +	 * assume that it is not and budget for a new folio. We could look at
>   	 * the @PG_private flag and figure this out, but we may race with write
> -	 * back and the page state may change by the time we lock it, so this
> +	 * back and the folio state may change by the time we lock it, so this
>   	 * would need additional care. We do not bother with this at the
>   	 * moment, although it might be good idea to do. Instead, we allocate
> -	 * budget for a new page and amend it later on if the page was in fact
> +	 * budget for a new folio and amend it later on if the folio was in fact
>   	 * dirty.
>   	 *
>   	 * The budgeting-related logic of this function is similar to what we
> @@ -1567,21 +1567,21 @@ static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf)
>   		return VM_FAULT_SIGBUS;
>   	}
>   
> -	lock_page(page);
> -	if (unlikely(page->mapping != inode->i_mapping ||
> -		     page_offset(page) > i_size_read(inode))) {
> -		/* Page got truncated out from underneath us */
> +	folio_lock(folio);
> +	if (unlikely(folio->mapping != inode->i_mapping ||
> +		     folio_pos(folio) >= i_size_read(inode))) {
> +		/* Folio got truncated out from underneath us */
>   		goto sigbus;
>   	}
>   
> -	if (PagePrivate(page))
> +	if (folio->private)
>   		release_new_page_budget(c);
>   	else {
> -		if (!PageChecked(page))
> +		if (!folio_test_checked(folio))
>   			ubifs_convert_page_budget(c);
> -		attach_page_private(page, (void *)1);
> +		folio_attach_private(folio, (void *)1);
>   		atomic_long_inc(&c->dirty_pg_cnt);
> -		__set_page_dirty_nobuffers(page);
> +		filemap_dirty_folio(folio->mapping, folio);
>   	}
>   
>   	if (update_time) {
> @@ -1597,11 +1597,11 @@ static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf)
>   			ubifs_release_dirty_inode_budget(c, ui);
>   	}
>   
> -	wait_for_stable_page(page);
> +	folio_wait_stable(folio);
>   	return VM_FAULT_LOCKED;
>   
>   sigbus:
> -	unlock_page(page);
> +	folio_unlock(folio);
>   	ubifs_release_budget(c, &req);
>   	return VM_FAULT_SIGBUS;
>   }
>
diff mbox series

Patch

diff --git a/Documentation/mm/page_cache.rst b/Documentation/mm/page_cache.rst
index 75eba7c431b2..138d61f869df 100644
--- a/Documentation/mm/page_cache.rst
+++ b/Documentation/mm/page_cache.rst
@@ -3,3 +3,13 @@ 
 ==========
 Page Cache
 ==========
+
+The page cache is the primary way that the user and the rest of the kernel
+interact with filesystems.  It can be bypassed (e.g. with O_DIRECT),
+but normal reads, writes and mmaps go through the page cache.
+
+Folios
+======
+
+The folio is the unit of memory management within the page cache.
+Operations 
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index e755d0a11146..9dfedd91b576 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1519,14 +1519,14 @@  static bool ubifs_release_folio(struct folio *folio, gfp_t unused_gfp_flags)
  */
 static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf)
 {
-	struct page *page = vmf->page;
+	struct folio *folio = page_folio(vmf->page);
 	struct inode *inode = file_inode(vmf->vma->vm_file);
 	struct ubifs_info *c = inode->i_sb->s_fs_info;
 	struct timespec64 now = current_time(inode);
 	struct ubifs_budget_req req = { .new_page = 1 };
 	int err, update_time;
 
-	dbg_gen("ino %lu, pg %lu, i_size %lld",	inode->i_ino, page->index,
+	dbg_gen("ino %lu, pg %lu, i_size %lld",	inode->i_ino, folio->index,
 		i_size_read(inode));
 	ubifs_assert(c, !c->ro_media && !c->ro_mount);
 
@@ -1534,17 +1534,17 @@  static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf)
 		return VM_FAULT_SIGBUS; /* -EROFS */
 
 	/*
-	 * We have not locked @page so far so we may budget for changing the
-	 * page. Note, we cannot do this after we locked the page, because
+	 * We have not locked @folio so far so we may budget for changing the
+	 * folio. Note, we cannot do this after we locked the folio, because
 	 * budgeting may cause write-back which would cause deadlock.
 	 *
-	 * At the moment we do not know whether the page is dirty or not, so we
-	 * assume that it is not and budget for a new page. We could look at
+	 * At the moment we do not know whether the folio is dirty or not, so we
+	 * assume that it is not and budget for a new folio. We could look at
 	 * the @PG_private flag and figure this out, but we may race with write
-	 * back and the page state may change by the time we lock it, so this
+	 * back and the folio state may change by the time we lock it, so this
 	 * would need additional care. We do not bother with this at the
 	 * moment, although it might be good idea to do. Instead, we allocate
-	 * budget for a new page and amend it later on if the page was in fact
+	 * budget for a new folio and amend it later on if the folio was in fact
 	 * dirty.
 	 *
 	 * The budgeting-related logic of this function is similar to what we
@@ -1567,21 +1567,21 @@  static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf)
 		return VM_FAULT_SIGBUS;
 	}
 
-	lock_page(page);
-	if (unlikely(page->mapping != inode->i_mapping ||
-		     page_offset(page) > i_size_read(inode))) {
-		/* Page got truncated out from underneath us */
+	folio_lock(folio);
+	if (unlikely(folio->mapping != inode->i_mapping ||
+		     folio_pos(folio) >= i_size_read(inode))) {
+		/* Folio got truncated out from underneath us */
 		goto sigbus;
 	}
 
-	if (PagePrivate(page))
+	if (folio->private)
 		release_new_page_budget(c);
 	else {
-		if (!PageChecked(page))
+		if (!folio_test_checked(folio))
 			ubifs_convert_page_budget(c);
-		attach_page_private(page, (void *)1);
+		folio_attach_private(folio, (void *)1);
 		atomic_long_inc(&c->dirty_pg_cnt);
-		__set_page_dirty_nobuffers(page);
+		filemap_dirty_folio(folio->mapping, folio);
 	}
 
 	if (update_time) {
@@ -1597,11 +1597,11 @@  static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf)
 			ubifs_release_dirty_inode_budget(c, ui);
 	}
 
-	wait_for_stable_page(page);
+	folio_wait_stable(folio);
 	return VM_FAULT_LOCKED;
 
 sigbus:
-	unlock_page(page);
+	folio_unlock(folio);
 	ubifs_release_budget(c, &req);
 	return VM_FAULT_SIGBUS;
 }