summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
authorJosef Bacik2013-06-17 23:14:39 +0200
committerJosef Bacik2013-07-01 14:52:33 +0200
commita71754fc68f740b7ed46bb83123c63fbbc130611 (patch)
treec39655c68a713d4707cb476ac28702bb4e302ed2 /fs/btrfs/extent_io.c
parentBtrfs: optimize reada_for_balance (diff)
downloadkernel-qcow2-linux-a71754fc68f740b7ed46bb83123c63fbbc130611.tar.gz
kernel-qcow2-linux-a71754fc68f740b7ed46bb83123c63fbbc130611.tar.xz
kernel-qcow2-linux-a71754fc68f740b7ed46bb83123c63fbbc130611.zip
Btrfs: move btrfs_truncate_page to btrfs_cont_expand instead of btrfs_truncate
This has plagued us forever and I'm so over working around it. When we truncate down to a non-page aligned offset we will call btrfs_truncate_page to zero out the end of the page and write it back to disk, this will keep us from exposing stale data if we truncate back up from that point. The problem with this is it requires data space to do this, and people don't really expect to get ENOSPC from truncate() for these sort of things. This also tends to bite the orphan cleanup stuff too which keeps people from mounting. To get around this we can just move this into btrfs_cont_expand() to make sure if we are truncating up from a non-page size aligned i_size we will zero out the rest of this page so that we don't expose stale data. This will give ENOSPC if you try to truncate() up or if you try to write past the end of isize, which is much more reasonable. This fixes xfstests generic/083 failing to mount because of the orphan cleanup failing. Thanks, Signed-off-by: Josef Bacik <jbacik@fusionio.com>
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c11
1 files changed, 10 insertions, 1 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 03ca3ab95898..a83d7019ede9 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2477,11 +2477,12 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
struct extent_state *cached = NULL;
struct extent_state *state;
struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
+ struct inode *inode = page->mapping->host;
pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, "
"mirror=%lu\n", (u64)bio->bi_sector, err,
io_bio->mirror_num);
- tree = &BTRFS_I(page->mapping->host)->io_tree;
+ tree = &BTRFS_I(inode)->io_tree;
/* We always issue full-page reads, but if some block
* in a page fails to read, blk_update_request() will
@@ -2555,6 +2556,14 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
if (uptodate) {
+ loff_t i_size = i_size_read(inode);
+ pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
+ unsigned offset;
+
+ /* Zero out the end if this page straddles i_size */
+ offset = i_size & (PAGE_CACHE_SIZE-1);
+ if (page->index == end_index && offset)
+ zero_user_segment(page, offset, PAGE_CACHE_SIZE);
SetPageUptodate(page);
} else {
ClearPageUptodate(page);