diff mbox series

[2/2] ext4: Do not create EA inode under buffer lock

Message ID 20240321162657.27420-2-jack@suse.cz
State New
Headers show
Series None | expand

Commit Message

Jan Kara March 21, 2024, 4:26 p.m. UTC
ext4_xattr_set_entry() creates new EA inodes while holding buffer lock
on the external xattr block. This is problematic as it nests all the
allocation locking (which acquires locks on other buffers) under the
buffer lock. This can even deadlock when the filesystem is corrupted and
e.g. quota file is setup to contain xattr block as data block. Move the
allocation of EA inode out of ext4_xattr_set_entry() into the callers.

Reported-by: syzbot+a43d4f48b8397d0e41a9@syzkaller.appspotmail.com
Signed-off-by: Jan Kara <jack@suse.cz>
---
 fs/ext4/xattr.c | 113 +++++++++++++++++++++++-------------------------
 1 file changed, 53 insertions(+), 60 deletions(-)

Comments

Andreas Dilger March 22, 2024, 6:06 p.m. UTC | #1
On Mar 21, 2024, at 10:26 AM, Jan Kara <jack@suse.cz> wrote:
> 
> ext4_xattr_set_entry() creates new EA inodes while holding buffer lock
> on the external xattr block. This is problematic as it nests all the
> allocation locking (which acquires locks on other buffers) under the
> buffer lock. This can even deadlock when the filesystem is corrupted and
> e.g. quota file is setup to contain xattr block as data block. Move the
> allocation of EA inode out of ext4_xattr_set_entry() into the callers.

This looks like it will allocate a new inode for every setxattr called,
even if the xattr is small and will likely fit inside the inode itself?
This would seem to add a lot of extra overhead for the 99% of cases when
an external inode is not needed.

The ext4_xattr_inode_lookup_create() call is not just doing an inode
bitmap lookup/allocation, it is also looking up the xattr hash in a hash
table, allocating and initializing an ext4 and VFS inode, quota, writing
it to the journal and using up journal credits (which need to be allocated
larger), etc. so it is by no means lightweight vs. memcpy() into the inode
buffer for most small xattrs.

It would be better to only preallocate the inode in the case when the
xattr size is large (say value_len > blocksize/2 or > blocksize * 3/4)
when there is a decent chance it will be needed.  Otherwise, do not
preallocate the xattr inode before calling ext4_xattr_set_entry(), and
have it return -EAGAIN if there wasn't enough room in the inode or in
the external xattr block to hold the value, and the caller can jump back
to allocate the xattr inode and try again (once only), something like:


@@ -1929,9 +1901,21 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,

+	/* If we need EA inode, prepare it before locking the buffer */
+	if (i->value && i->in_inode && i->value_len > i_blocksize(inode)/2) {
+alloc_inode:
+		ea_inode = ext4_xattr_inode_lookup_create(handle, inode,
+					i->value, i->value_len);
+		if (IS_ERR(ea_inode)) {
+			error = PTR_ERR(ea_inode);
+			ea_inode = NULL;
+			goto cleanup;
+		}
+	}
+
	if (s->base) {
		int offset = (char *)s->here - bs->bh->b_data;

@@ -1966,7 +1951,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
			}
			ea_bdebug(bs->bh, "modifying in-place");
			error = ext4_xattr_set_entry(i, s, handle, inode,
-						     true /* is_block */);
+					     ea_inode, true /* is_block */);
			ext4_xattr_block_csum_set(inode, bs->bh);
			unlock_buffer(bs->bh);
+			if (error == -EAGAIN && !ea_inode)
+				goto alloc_inode;
			if (error == -EFSCORRUPTED)

Cheers, Andreas

> Reported-by: syzbot+a43d4f48b8397d0e41a9@syzkaller.appspotmail.com
> Signed-off-by: Jan Kara <jack@suse.cz>
> ---
> fs/ext4/xattr.c | 113 +++++++++++++++++++++++-------------------------
> 1 file changed, 53 insertions(+), 60 deletions(-)
> 
> diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
> index 146690c10c73..04f90df8dbae 100644
> --- a/fs/ext4/xattr.c
> +++ b/fs/ext4/xattr.c
> @@ -1619,6 +1619,7 @@ static struct inode *ext4_xattr_inode_lookup_create(handle_t *handle,
> static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
> 				struct ext4_xattr_search *s,
> 				handle_t *handle, struct inode *inode,
> +				struct inode *new_ea_inode,
> 				bool is_block)
> {
> 	struct ext4_xattr_entry *last, *next;
> @@ -1626,7 +1627,6 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
> 	size_t min_offs = s->end - s->base, name_len = strlen(i->name);
> 	int in_inode = i->in_inode;
> 	struct inode *old_ea_inode = NULL;
> -	struct inode *new_ea_inode = NULL;
> 	size_t old_size, new_size;
> 	int ret;
> 
> @@ -1711,38 +1711,11 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
> 			old_ea_inode = NULL;
> 			goto out;
> 		}
> -	}
> -	if (i->value && in_inode) {
> -		WARN_ON_ONCE(!i->value_len);
> -
> -		new_ea_inode = ext4_xattr_inode_lookup_create(handle, inode,
> -					i->value, i->value_len);
> -		if (IS_ERR(new_ea_inode)) {
> -			ret = PTR_ERR(new_ea_inode);
> -			new_ea_inode = NULL;
> -			goto out;
> -		}
> -	}
> 
> -	if (old_ea_inode) {
> 		/* We are ready to release ref count on the old_ea_inode. */
> 		ret = ext4_xattr_inode_dec_ref(handle, old_ea_inode);
> -		if (ret) {
> -			/* Release newly required ref count on new_ea_inode. */
> -			if (new_ea_inode) {
> -				int err;
> -
> -				err = ext4_xattr_inode_dec_ref(handle,
> -							       new_ea_inode);
> -				if (err)
> -					ext4_warning_inode(new_ea_inode,
> -						  "dec ref new_ea_inode err=%d",
> -						  err);
> -				ext4_xattr_inode_free_quota(inode, new_ea_inode,
> -							    i->value_len);
> -			}
> +		if (ret)
> 			goto out;
> -		}
> 
> 		ext4_xattr_inode_free_quota(inode, old_ea_inode,
> 					    le32_to_cpu(here->e_value_size));
> @@ -1866,7 +1839,6 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
> 	ret = 0;
> out:
> 	iput(old_ea_inode);
> -	iput(new_ea_inode);
> 	return ret;
> }
> 
> @@ -1929,9 +1901,21 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
> 	size_t old_ea_inode_quota = 0;
> 	unsigned int ea_ino;
> 
> -
> #define header(x) ((struct ext4_xattr_header *)(x))
> 
> +	/* If we need EA inode, prepare it before locking the buffer */
> +	if (i->value && i->in_inode) {
> +		WARN_ON_ONCE(!i->value_len);
> +
> +		ea_inode = ext4_xattr_inode_lookup_create(handle, inode,
> +					i->value, i->value_len);
> +		if (IS_ERR(ea_inode)) {
> +			error = PTR_ERR(ea_inode);
> +			ea_inode = NULL;
> +			goto cleanup;
> +		}
> +	}
> +
> 	if (s->base) {
> 		int offset = (char *)s->here - bs->bh->b_data;
> 
> @@ -1940,6 +1924,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
> 						      EXT4_JTR_NONE);
> 		if (error)
> 			goto cleanup;
> +
> 		lock_buffer(bs->bh);
> 
> 		if (header(s->base)->h_refcount == cpu_to_le32(1)) {
> @@ -1966,7 +1951,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
> 			}
> 			ea_bdebug(bs->bh, "modifying in-place");
> 			error = ext4_xattr_set_entry(i, s, handle, inode,
> -						     true /* is_block */);
> +					     ea_inode, true /* is_block */);
> 			ext4_xattr_block_csum_set(inode, bs->bh);
> 			unlock_buffer(bs->bh);
> 			if (error == -EFSCORRUPTED)
> @@ -2034,29 +2019,13 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
> 		s->end = s->base + sb->s_blocksize;
> 	}
> 
> -	error = ext4_xattr_set_entry(i, s, handle, inode, true /* is_block */);
> +	error = ext4_xattr_set_entry(i, s, handle, inode, ea_inode,
> +				     true /* is_block */);
> 	if (error == -EFSCORRUPTED)
> 		goto bad_block;
> 	if (error)
> 		goto cleanup;
> 
> -	if (i->value && s->here->e_value_inum) {
> -		/*
> -		 * A ref count on ea_inode has been taken as part of the call to
> -		 * ext4_xattr_set_entry() above. We would like to drop this
> -		 * extra ref but we have to wait until the xattr block is
> -		 * initialized and has its own ref count on the ea_inode.
> -		 */
> -		ea_ino = le32_to_cpu(s->here->e_value_inum);
> -		error = ext4_xattr_inode_iget(inode, ea_ino,
> -					      le32_to_cpu(s->here->e_hash),
> -					      &ea_inode);
> -		if (error) {
> -			ea_inode = NULL;
> -			goto cleanup;
> -		}
> -	}
> -
> inserted:
> 	if (!IS_LAST_ENTRY(s->first)) {
> 		new_bh = ext4_xattr_block_cache_find(inode, header(s->base),
> @@ -2209,17 +2178,16 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
> 
> cleanup:
> 	if (ea_inode) {
> -		int error2;
> -
> -		error2 = ext4_xattr_inode_dec_ref(handle, ea_inode);
> -		if (error2)
> -			ext4_warning_inode(ea_inode, "dec ref error=%d",
> -					   error2);
> +		if (error) {
> +			int error2;
> 
> -		/* If there was an error, revert the quota charge. */
> -		if (error)
> +			error2 = ext4_xattr_inode_dec_ref(handle, ea_inode);
> +			if (error2)
> +				ext4_warning_inode(ea_inode, "dec ref error=%d",
> +						   error2);
> 			ext4_xattr_inode_free_quota(inode, ea_inode,
> 						    i_size_read(ea_inode));
> +		}
> 		iput(ea_inode);
> 	}
> 	if (ce)
> @@ -2277,14 +2245,38 @@ int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
> {
> 	struct ext4_xattr_ibody_header *header;
> 	struct ext4_xattr_search *s = &is->s;
> +	struct inode *ea_inode = NULL;
> 	int error;
> 
> 	if (!EXT4_INODE_HAS_XATTR_SPACE(inode))
> 		return -ENOSPC;
> 
> -	error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */);
> -	if (error)
> +	/* If we need EA inode, prepare it before locking the buffer */
> +	if (i->value && i->in_inode) {
> +		WARN_ON_ONCE(!i->value_len);
> +
> +		ea_inode = ext4_xattr_inode_lookup_create(handle, inode,
> +					i->value, i->value_len);
> +		if (IS_ERR(ea_inode))
> +			return PTR_ERR(ea_inode);
> +	}
> +	error = ext4_xattr_set_entry(i, s, handle, inode, ea_inode,
> +				     false /* is_block */);
> +	if (error) {
> +		if (ea_inode) {
> +			int error2;
> +
> +			error2 = ext4_xattr_inode_dec_ref(handle, ea_inode);
> +			if (error2)
> +				ext4_warning_inode(ea_inode, "dec ref error=%d",
> +						   error2);
> +
> +			ext4_xattr_inode_free_quota(inode, ea_inode,
> +						    i_size_read(ea_inode));
> +			iput(ea_inode);
> +		}
> 		return error;
> +	}
> 	header = IHDR(inode, ext4_raw_inode(&is->iloc));
> 	if (!IS_LAST_ENTRY(s->first)) {
> 		header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
> @@ -2293,6 +2285,7 @@ int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
> 		header->h_magic = cpu_to_le32(0);
> 		ext4_clear_inode_state(inode, EXT4_STATE_XATTR);
> 	}
> +	iput(ea_inode);
> 	return 0;
> }
> 
> --
> 2.35.3
> 
> 


Cheers, Andreas
Jan Kara March 25, 2024, 7:03 p.m. UTC | #2
On Fri 22-03-24 12:06:16, Andreas Dilger wrote:
> On Mar 21, 2024, at 10:26 AM, Jan Kara <jack@suse.cz> wrote:
> > 
> > ext4_xattr_set_entry() creates new EA inodes while holding buffer lock
> > on the external xattr block. This is problematic as it nests all the
> > allocation locking (which acquires locks on other buffers) under the
> > buffer lock. This can even deadlock when the filesystem is corrupted and
> > e.g. quota file is setup to contain xattr block as data block. Move the
> > allocation of EA inode out of ext4_xattr_set_entry() into the callers.
> 
> This looks like it will allocate a new inode for every setxattr called,
> even if the xattr is small and will likely fit inside the inode itself?
> This would seem to add a lot of extra overhead for the 99% of cases when
> an external inode is not needed.

This is not the case AFAICT. We call ext4_xattr_inode_lookup_create() only
in:

       if (i->value && i->in_inode) {

so that means we've already decided we need to put the xattr value in the
EA inode. Note that ext4_xattr_set_handle() for smaller xattr value first
calls ext4_xattr_block_set() with i.in_inode == 0 and if that fails due to
ENOSPC, it sets i.in_inode = 1 and tries again.

So I think everything is fine.

								Honza
diff mbox series

Patch

diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 146690c10c73..04f90df8dbae 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -1619,6 +1619,7 @@  static struct inode *ext4_xattr_inode_lookup_create(handle_t *handle,
 static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
 				struct ext4_xattr_search *s,
 				handle_t *handle, struct inode *inode,
+				struct inode *new_ea_inode,
 				bool is_block)
 {
 	struct ext4_xattr_entry *last, *next;
@@ -1626,7 +1627,6 @@  static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
 	size_t min_offs = s->end - s->base, name_len = strlen(i->name);
 	int in_inode = i->in_inode;
 	struct inode *old_ea_inode = NULL;
-	struct inode *new_ea_inode = NULL;
 	size_t old_size, new_size;
 	int ret;
 
@@ -1711,38 +1711,11 @@  static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
 			old_ea_inode = NULL;
 			goto out;
 		}
-	}
-	if (i->value && in_inode) {
-		WARN_ON_ONCE(!i->value_len);
-
-		new_ea_inode = ext4_xattr_inode_lookup_create(handle, inode,
-					i->value, i->value_len);
-		if (IS_ERR(new_ea_inode)) {
-			ret = PTR_ERR(new_ea_inode);
-			new_ea_inode = NULL;
-			goto out;
-		}
-	}
 
-	if (old_ea_inode) {
 		/* We are ready to release ref count on the old_ea_inode. */
 		ret = ext4_xattr_inode_dec_ref(handle, old_ea_inode);
-		if (ret) {
-			/* Release newly required ref count on new_ea_inode. */
-			if (new_ea_inode) {
-				int err;
-
-				err = ext4_xattr_inode_dec_ref(handle,
-							       new_ea_inode);
-				if (err)
-					ext4_warning_inode(new_ea_inode,
-						  "dec ref new_ea_inode err=%d",
-						  err);
-				ext4_xattr_inode_free_quota(inode, new_ea_inode,
-							    i->value_len);
-			}
+		if (ret)
 			goto out;
-		}
 
 		ext4_xattr_inode_free_quota(inode, old_ea_inode,
 					    le32_to_cpu(here->e_value_size));
@@ -1866,7 +1839,6 @@  static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
 	ret = 0;
 out:
 	iput(old_ea_inode);
-	iput(new_ea_inode);
 	return ret;
 }
 
@@ -1929,9 +1901,21 @@  ext4_xattr_block_set(handle_t *handle, struct inode *inode,
 	size_t old_ea_inode_quota = 0;
 	unsigned int ea_ino;
 
-
 #define header(x) ((struct ext4_xattr_header *)(x))
 
+	/* If we need EA inode, prepare it before locking the buffer */
+	if (i->value && i->in_inode) {
+		WARN_ON_ONCE(!i->value_len);
+
+		ea_inode = ext4_xattr_inode_lookup_create(handle, inode,
+					i->value, i->value_len);
+		if (IS_ERR(ea_inode)) {
+			error = PTR_ERR(ea_inode);
+			ea_inode = NULL;
+			goto cleanup;
+		}
+	}
+
 	if (s->base) {
 		int offset = (char *)s->here - bs->bh->b_data;
 
@@ -1940,6 +1924,7 @@  ext4_xattr_block_set(handle_t *handle, struct inode *inode,
 						      EXT4_JTR_NONE);
 		if (error)
 			goto cleanup;
+
 		lock_buffer(bs->bh);
 
 		if (header(s->base)->h_refcount == cpu_to_le32(1)) {
@@ -1966,7 +1951,7 @@  ext4_xattr_block_set(handle_t *handle, struct inode *inode,
 			}
 			ea_bdebug(bs->bh, "modifying in-place");
 			error = ext4_xattr_set_entry(i, s, handle, inode,
-						     true /* is_block */);
+					     ea_inode, true /* is_block */);
 			ext4_xattr_block_csum_set(inode, bs->bh);
 			unlock_buffer(bs->bh);
 			if (error == -EFSCORRUPTED)
@@ -2034,29 +2019,13 @@  ext4_xattr_block_set(handle_t *handle, struct inode *inode,
 		s->end = s->base + sb->s_blocksize;
 	}
 
-	error = ext4_xattr_set_entry(i, s, handle, inode, true /* is_block */);
+	error = ext4_xattr_set_entry(i, s, handle, inode, ea_inode,
+				     true /* is_block */);
 	if (error == -EFSCORRUPTED)
 		goto bad_block;
 	if (error)
 		goto cleanup;
 
-	if (i->value && s->here->e_value_inum) {
-		/*
-		 * A ref count on ea_inode has been taken as part of the call to
-		 * ext4_xattr_set_entry() above. We would like to drop this
-		 * extra ref but we have to wait until the xattr block is
-		 * initialized and has its own ref count on the ea_inode.
-		 */
-		ea_ino = le32_to_cpu(s->here->e_value_inum);
-		error = ext4_xattr_inode_iget(inode, ea_ino,
-					      le32_to_cpu(s->here->e_hash),
-					      &ea_inode);
-		if (error) {
-			ea_inode = NULL;
-			goto cleanup;
-		}
-	}
-
 inserted:
 	if (!IS_LAST_ENTRY(s->first)) {
 		new_bh = ext4_xattr_block_cache_find(inode, header(s->base),
@@ -2209,17 +2178,16 @@  ext4_xattr_block_set(handle_t *handle, struct inode *inode,
 
 cleanup:
 	if (ea_inode) {
-		int error2;
-
-		error2 = ext4_xattr_inode_dec_ref(handle, ea_inode);
-		if (error2)
-			ext4_warning_inode(ea_inode, "dec ref error=%d",
-					   error2);
+		if (error) {
+			int error2;
 
-		/* If there was an error, revert the quota charge. */
-		if (error)
+			error2 = ext4_xattr_inode_dec_ref(handle, ea_inode);
+			if (error2)
+				ext4_warning_inode(ea_inode, "dec ref error=%d",
+						   error2);
 			ext4_xattr_inode_free_quota(inode, ea_inode,
 						    i_size_read(ea_inode));
+		}
 		iput(ea_inode);
 	}
 	if (ce)
@@ -2277,14 +2245,38 @@  int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
 {
 	struct ext4_xattr_ibody_header *header;
 	struct ext4_xattr_search *s = &is->s;
+	struct inode *ea_inode = NULL;
 	int error;
 
 	if (!EXT4_INODE_HAS_XATTR_SPACE(inode))
 		return -ENOSPC;
 
-	error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */);
-	if (error)
+	/* If we need EA inode, prepare it before locking the buffer */
+	if (i->value && i->in_inode) {
+		WARN_ON_ONCE(!i->value_len);
+
+		ea_inode = ext4_xattr_inode_lookup_create(handle, inode,
+					i->value, i->value_len);
+		if (IS_ERR(ea_inode))
+			return PTR_ERR(ea_inode);
+	}
+	error = ext4_xattr_set_entry(i, s, handle, inode, ea_inode,
+				     false /* is_block */);
+	if (error) {
+		if (ea_inode) {
+			int error2;
+
+			error2 = ext4_xattr_inode_dec_ref(handle, ea_inode);
+			if (error2)
+				ext4_warning_inode(ea_inode, "dec ref error=%d",
+						   error2);
+
+			ext4_xattr_inode_free_quota(inode, ea_inode,
+						    i_size_read(ea_inode));
+			iput(ea_inode);
+		}
 		return error;
+	}
 	header = IHDR(inode, ext4_raw_inode(&is->iloc));
 	if (!IS_LAST_ENTRY(s->first)) {
 		header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
@@ -2293,6 +2285,7 @@  int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
 		header->h_magic = cpu_to_le32(0);
 		ext4_clear_inode_state(inode, EXT4_STATE_XATTR);
 	}
+	iput(ea_inode);
 	return 0;
 }