summaryrefslogtreecommitdiffstats
path: root/fs/f2fs/node.c
diff options
context:
space:
mode:
authorJaegeuk Kim2013-02-26 05:10:46 +0100
committerJaegeuk Kim2013-03-18 13:00:33 +0100
commit266e97a81cf73d1a0dac5f68391da382630a80b7 (patch)
treee5c335396c512c72d5c324f789a8ddae93340b0c /fs/f2fs/node.c
parentf2fs: read with READ_SYNC when getting dnode page (diff)
downloadkernel-qcow2-linux-266e97a81cf73d1a0dac5f68391da382630a80b7.tar.gz
kernel-qcow2-linux-266e97a81cf73d1a0dac5f68391da382630a80b7.tar.xz
kernel-qcow2-linux-266e97a81cf73d1a0dac5f68391da382630a80b7.zip
f2fs: introduce readahead mode of node pages
Previously, f2fs reads several node pages ahead when get_dnode_of_data is called with RDONLY_NODE flag. And, this flag is set by the following functions. - get_data_block_ro - get_lock_data_page - do_write_data_page - truncate_blocks - truncate_hole However, this readahead mechanism is initially introduced for the use of get_data_block_ro to enhance the sequential read performance. So, let's clarify all the cases with the additional modes as follows. enum { ALLOC_NODE, /* allocate a new node page if needed */ LOOKUP_NODE, /* look up a node without readahead */ LOOKUP_NODE_RA, /* * look up a node with readahead called * by get_datablock_ro. */ } Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com> Reviewed-by: Namjae Jeon <namjae.jeon@samsung.com>
Diffstat (limited to 'fs/f2fs/node.c')
-rw-r--r--fs/f2fs/node.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index efcada7becd5..65ec2eabb392 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -384,7 +384,7 @@ got:
/*
* Caller should call f2fs_put_dnode(dn).
*/
-int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int ro)
+int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
{
struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
struct page *npage[4];
@@ -411,7 +411,7 @@ int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int ro)
for (i = 1; i <= level; i++) {
bool done = false;
- if (!nids[i] && !ro) {
+ if (!nids[i] && mode == ALLOC_NODE) {
mutex_lock_op(sbi, NODE_NEW);
/* alloc new node */
@@ -434,7 +434,7 @@ int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int ro)
alloc_nid_done(sbi, nids[i]);
mutex_unlock_op(sbi, NODE_NEW);
done = true;
- } else if (ro && i == level && level > 1) {
+ } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
npage[i] = get_node_page_ra(parent, offset[i - 1]);
if (IS_ERR(npage[i])) {
err = PTR_ERR(npage[i]);