@@ -1687,8 +1687,9 @@ static int __block_write_full_page(struc
if (buffer_new(bh)) {
/* blockdev mappings never come here */
clear_buffer_new(bh);
- unmap_underlying_metadata(bh->b_bdev,
- bh->b_blocknr);
+ if (buffer_mapped(bh))
+ unmap_underlying_metadata(bh->b_bdev,
+ bh->b_blocknr);
}
}
bh = bh->b_this_page;
@@ -1873,7 +1874,8 @@ static int __block_prepare_write(struct
if (err)
break;
if (buffer_new(bh)) {
- unmap_underlying_metadata(bh->b_bdev,
+ if (buffer_mapped(bh))
+ unmap_underlying_metadata(bh->b_bdev,
bh->b_blocknr);
if (PageUptodate(page)) {
clear_buffer_new(bh);
@@ -2592,7 +2594,7 @@ int nobh_write_begin_newtrunc(struct fil
goto failed;
if (!buffer_mapped(bh))
is_mapped_to_disk = 0;
- if (buffer_new(bh))
+ if (buffer_new(bh) && buffer_mapped(bh))
unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
if (PageUptodate(page)) {
set_buffer_uptodate(bh);
@@ -52,6 +52,23 @@ static int ext3_release_file (struct ino
return 0;
}
+static const struct vm_operations_struct ext3_file_vm_ops = {
+ .fault = filemap_fault,
+ .page_mkwrite = ext3_page_mkwrite,
+};
+
+static int ext3_file_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct address_space *mapping = file->f_mapping;
+
+ if (!mapping->a_ops->readpage)
+ return -ENOEXEC;
+ file_accessed(file);
+ vma->vm_ops = &ext3_file_vm_ops;
+ vma->vm_flags |= VM_CAN_NONLINEAR;
+ return 0;
+}
+
const struct file_operations ext3_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
@@ -62,7 +79,7 @@ const struct file_operations ext3_file_o
#ifdef CONFIG_COMPAT
.compat_ioctl = ext3_compat_ioctl,
#endif
- .mmap = generic_file_mmap,
+ .mmap = ext3_file_mmap,
.open = dquot_file_open,
.release = ext3_release_file,
.fsync = ext3_sync_file,
@@ -38,6 +38,7 @@
#include <linux/bio.h>
#include <linux/fiemap.h>
#include <linux/namei.h>
+#include <linux/mount.h>
#include "xattr.h"
#include "acl.h"
@@ -562,10 +563,17 @@ static int ext3_alloc_blocks(handle_t *h
count--;
}
- if (count > 0)
+ if (index == indirect_blks)
break;
}
+ if (blks == 0) {
+ /* blks == 0 when allocating only indirect blocks */
+ new_blocks[index] = 0;
+ *err = 0;
+ return 0;
+ }
+
/* save the new block number for the first direct block */
new_blocks[index] = current_block;
@@ -676,7 +684,9 @@ failed:
for (i = 0; i <indirect_blks; i++)
ext3_free_blocks(handle, inode, new_blocks[i], 1);
- ext3_free_blocks(handle, inode, new_blocks[i], num);
+ if (num > 0)
+ /* num == 0 when allocating only indirect blocks */
+ ext3_free_blocks(handle, inode, new_blocks[i], num);
return err;
}
@@ -735,7 +745,8 @@ static int ext3_splice_branch(handle_t *
* in i_block_alloc_info, to assist find the proper goal block for next
* allocation
*/
- if (block_i) {
+ if (block_i && blks > 0) {
+ /* blks == 0 when allocating only indirect blocks */
block_i->last_alloc_logical_block = block + blks - 1;
block_i->last_alloc_physical_block =
le32_to_cpu(where[num].key) + blks - 1;
@@ -778,7 +789,9 @@ err_out:
ext3_journal_forget(handle, where[i].bh);
ext3_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1);
}
- ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks);
+ if (blks > 0)
+ /* blks == 0 when allocating only indirect blocks */
+ ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks);
return err;
}
@@ -905,6 +918,11 @@ int ext3_get_blocks_handle(handle_t *han
/* the number of blocks need to allocate for [d,t]indirect blocks */
indirect_blks = (chain + depth) - partial - 1;
+ if (indirect_blks + maxblocks == 0) {
+ /* maxblocks == 0 when allocating only indirect blocks */
+ mutex_unlock(&ei->truncate_mutex);
+ goto cleanup;
+ }
/*
* Next look up the indirect map to count the totoal number of
@@ -929,7 +947,8 @@ int ext3_get_blocks_handle(handle_t *han
err = ext3_splice_branch(handle, inode, iblock,
partial, indirect_blks, count);
mutex_unlock(&ei->truncate_mutex);
- if (err)
+ if (err || count == 0)
+ /* count == 0 when allocating only indirect blocks */
goto cleanup;
set_buffer_new(bh_result);
@@ -981,6 +1000,9 @@ static int ext3_get_block(struct inode *
started = 1;
}
+ if (create < 0)
+ /* create < 0 when allocating only indirect blocks */
+ max_blocks = 0;
ret = ext3_get_blocks_handle(handle, inode, iblock,
max_blocks, bh_result, create);
if (ret > 0) {
@@ -1827,6 +1849,43 @@ out:
}
/*
+ * Reserve block writes instead of allocation. Called only on buffer heads
+ * attached to a page (and thus for 1 block).
+ */
+static int ext3_da_get_block(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh, int create)
+{
+ int ret;
+
+ /* Buffer has already blocks reserved? */
+ if (buffer_delay(bh))
+ return 0;
+
+ /* passing -1 to allocate only indirect blocks */
+ ret = ext3_get_block(inode, iblock, bh, -1);
+ if (ret < 0)
+ return ret;
+ if (ret > 0 || !create)
+ return 0;
+ set_buffer_delay(bh);
+ set_buffer_new(bh);
+ return 0;
+}
+
+int ext3_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ int retry = 0;
+ int ret;
+ struct super_block *sb = vma->vm_file->f_path.mnt->mnt_sb;
+
+ do {
+ ret = block_page_mkwrite(vma, vmf, ext3_da_get_block);
+ } while (ret == VM_FAULT_SIGBUS &&
+ ext3_should_retry_alloc(sb, &retry));
+ return ret;
+}
+
+/*
* Pages can be marked dirty completely asynchronously from ext3's journalling
* activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do
* much here because ->set_page_dirty is called under VFS locks. The page is
@@ -909,6 +909,7 @@ extern void ext3_get_inode_flags(struct
extern void ext3_set_aops(struct inode *inode);
extern int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len);
+extern int ext3_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
/* ioctl.c */
extern long ext3_ioctl(struct file *, unsigned int, unsigned long);