summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/ctree.h
diff options
context:
space:
mode:
authorJosef Bacik2011-06-30 20:42:28 +0200
committerChris Mason2011-07-27 18:46:25 +0200
commitbab39bf998133510f2dad08158006197ec0dabea (patch)
tree0ea50b2b07a9f75988829de6c42b6936e2355545 /fs/btrfs/ctree.h
parentBtrfs: fix how we merge extent states and deal with cached states (diff)
downloadkernel-qcow2-linux-bab39bf998133510f2dad08158006197ec0dabea.tar.gz
kernel-qcow2-linux-bab39bf998133510f2dad08158006197ec0dabea.tar.xz
kernel-qcow2-linux-bab39bf998133510f2dad08158006197ec0dabea.zip
Btrfs: use a worker thread to do caching
A user reported a deadlock when copying a bunch of files. This is because they were low on memory and kthreadd got hung up trying to migrate pages for an allocation when starting the caching kthread. The page was locked by the person starting the caching kthread. To fix this we just need to use the async thread stuff so that the threads are already created and we don't have to worry about deadlocks. Thanks, Reported-by: Roman Mamedov <rm@romanrm.ru> Signed-off-by: Josef Bacik <josef@redhat.com>
Diffstat (limited to 'fs/btrfs/ctree.h')
-rw-r--r--fs/btrfs/ctree.h4
1 files changed, 3 insertions, 1 deletions
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 406c33876605..9f6f342900c9 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -767,7 +767,6 @@ struct btrfs_space_info {
struct list_head block_groups[BTRFS_NR_RAID_TYPES];
spinlock_t lock;
struct rw_semaphore groups_sem;
- atomic_t caching_threads;
wait_queue_head_t wait;
};
@@ -828,6 +827,7 @@ struct btrfs_caching_control {
struct list_head list;
struct mutex mutex;
wait_queue_head_t wait;
+ struct btrfs_work work;
struct btrfs_block_group_cache *block_group;
u64 progress;
atomic_t count;
@@ -1036,6 +1036,8 @@ struct btrfs_fs_info {
struct btrfs_workers endio_write_workers;
struct btrfs_workers endio_freespace_worker;
struct btrfs_workers submit_workers;
+ struct btrfs_workers caching_workers;
+
/*
* fixup workers take dirty pages that didn't properly go through
* the cow mechanism and make them safe to write. It happens