summaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache/sysfs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/bcache/sysfs.c')
-rw-r--r--drivers/md/bcache/sysfs.c155
1 files changed, 83 insertions, 72 deletions
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index d8458d477a12..b3ff57d61dde 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -54,7 +54,6 @@ sysfs_time_stats_attribute(btree_gc, sec, ms);
sysfs_time_stats_attribute(btree_split, sec, us);
sysfs_time_stats_attribute(btree_sort, ms, us);
sysfs_time_stats_attribute(btree_read, ms, us);
-sysfs_time_stats_attribute(try_harder, ms, us);
read_attribute(btree_nodes);
read_attribute(btree_used_percent);
@@ -406,7 +405,7 @@ struct bset_stats_op {
struct bset_stats stats;
};
-static int btree_bset_stats(struct btree_op *b_op, struct btree *b)
+static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b)
{
struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op);
@@ -424,7 +423,7 @@ static int bch_bset_print_stats(struct cache_set *c, char *buf)
memset(&op, 0, sizeof(op));
bch_btree_op_init(&op.op, -1);
- ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, btree_bset_stats);
+ ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats);
if (ret < 0)
return ret;
@@ -442,81 +441,81 @@ static int bch_bset_print_stats(struct cache_set *c, char *buf)
op.stats.floats, op.stats.failed);
}
-SHOW(__bch_cache_set)
+static unsigned bch_root_usage(struct cache_set *c)
{
- unsigned root_usage(struct cache_set *c)
- {
- unsigned bytes = 0;
- struct bkey *k;
- struct btree *b;
- struct btree_iter iter;
+ unsigned bytes = 0;
+ struct bkey *k;
+ struct btree *b;
+ struct btree_iter iter;
- goto lock_root;
+ goto lock_root;
- do {
- rw_unlock(false, b);
+ do {
+ rw_unlock(false, b);
lock_root:
- b = c->root;
- rw_lock(false, b, b->level);
- } while (b != c->root);
+ b = c->root;
+ rw_lock(false, b, b->level);
+ } while (b != c->root);
- for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
- bytes += bkey_bytes(k);
+ for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
+ bytes += bkey_bytes(k);
- rw_unlock(false, b);
+ rw_unlock(false, b);
- return (bytes * 100) / btree_bytes(c);
- }
+ return (bytes * 100) / btree_bytes(c);
+}
- size_t cache_size(struct cache_set *c)
- {
- size_t ret = 0;
- struct btree *b;
+static size_t bch_cache_size(struct cache_set *c)
+{
+ size_t ret = 0;
+ struct btree *b;
- mutex_lock(&c->bucket_lock);
- list_for_each_entry(b, &c->btree_cache, list)
- ret += 1 << (b->keys.page_order + PAGE_SHIFT);
+ mutex_lock(&c->bucket_lock);
+ list_for_each_entry(b, &c->btree_cache, list)
+ ret += 1 << (b->keys.page_order + PAGE_SHIFT);
- mutex_unlock(&c->bucket_lock);
- return ret;
- }
-
- unsigned cache_max_chain(struct cache_set *c)
- {
- unsigned ret = 0;
- struct hlist_head *h;
+ mutex_unlock(&c->bucket_lock);
+ return ret;
+}
- mutex_lock(&c->bucket_lock);
+static unsigned bch_cache_max_chain(struct cache_set *c)
+{
+ unsigned ret = 0;
+ struct hlist_head *h;
- for (h = c->bucket_hash;
- h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
- h++) {
- unsigned i = 0;
- struct hlist_node *p;
+ mutex_lock(&c->bucket_lock);
- hlist_for_each(p, h)
- i++;
+ for (h = c->bucket_hash;
+ h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
+ h++) {
+ unsigned i = 0;
+ struct hlist_node *p;
- ret = max(ret, i);
- }
+ hlist_for_each(p, h)
+ i++;
- mutex_unlock(&c->bucket_lock);
- return ret;
+ ret = max(ret, i);
}
- unsigned btree_used(struct cache_set *c)
- {
- return div64_u64(c->gc_stats.key_bytes * 100,
- (c->gc_stats.nodes ?: 1) * btree_bytes(c));
- }
+ mutex_unlock(&c->bucket_lock);
+ return ret;
+}
- unsigned average_key_size(struct cache_set *c)
- {
- return c->gc_stats.nkeys
- ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
- : 0;
- }
+static unsigned bch_btree_used(struct cache_set *c)
+{
+ return div64_u64(c->gc_stats.key_bytes * 100,
+ (c->gc_stats.nodes ?: 1) * btree_bytes(c));
+}
+static unsigned bch_average_key_size(struct cache_set *c)
+{
+ return c->gc_stats.nkeys
+ ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
+ : 0;
+}
+
+SHOW(__bch_cache_set)
+{
struct cache_set *c = container_of(kobj, struct cache_set, kobj);
sysfs_print(synchronous, CACHE_SYNC(&c->sb));
@@ -524,21 +523,20 @@ lock_root:
sysfs_hprint(bucket_size, bucket_bytes(c));
sysfs_hprint(block_size, block_bytes(c));
sysfs_print(tree_depth, c->root->level);
- sysfs_print(root_usage_percent, root_usage(c));
+ sysfs_print(root_usage_percent, bch_root_usage(c));
- sysfs_hprint(btree_cache_size, cache_size(c));
- sysfs_print(btree_cache_max_chain, cache_max_chain(c));
+ sysfs_hprint(btree_cache_size, bch_cache_size(c));
+ sysfs_print(btree_cache_max_chain, bch_cache_max_chain(c));
sysfs_print(cache_available_percent, 100 - c->gc_stats.in_use);
sysfs_print_time_stats(&c->btree_gc_time, btree_gc, sec, ms);
sysfs_print_time_stats(&c->btree_split_time, btree_split, sec, us);
sysfs_print_time_stats(&c->sort.time, btree_sort, ms, us);
sysfs_print_time_stats(&c->btree_read_time, btree_read, ms, us);
- sysfs_print_time_stats(&c->try_harder_time, try_harder, ms, us);
- sysfs_print(btree_used_percent, btree_used(c));
+ sysfs_print(btree_used_percent, bch_btree_used(c));
sysfs_print(btree_nodes, c->gc_stats.nodes);
- sysfs_hprint(average_key_size, average_key_size(c));
+ sysfs_hprint(average_key_size, bch_average_key_size(c));
sysfs_print(cache_read_races,
atomic_long_read(&c->cache_read_races));
@@ -709,7 +707,6 @@ static struct attribute *bch_cache_set_internal_files[] = {
sysfs_time_stats_attribute_list(btree_split, sec, us)
sysfs_time_stats_attribute_list(btree_sort, ms, us)
sysfs_time_stats_attribute_list(btree_read, ms, us)
- sysfs_time_stats_attribute_list(try_harder, ms, us)
&sysfs_btree_nodes,
&sysfs_btree_used_percent,
@@ -761,7 +758,9 @@ SHOW(__bch_cache)
int cmp(const void *l, const void *r)
{ return *((uint16_t *) r) - *((uint16_t *) l); }
- size_t n = ca->sb.nbuckets, i, unused, btree;
+ struct bucket *b;
+ size_t n = ca->sb.nbuckets, i;
+ size_t unused = 0, available = 0, dirty = 0, meta = 0;
uint64_t sum = 0;
/* Compute 31 quantiles */
uint16_t q[31], *p, *cached;
@@ -772,6 +771,17 @@ SHOW(__bch_cache)
return -ENOMEM;
mutex_lock(&ca->set->bucket_lock);
+ for_each_bucket(b, ca) {
+ if (!GC_SECTORS_USED(b))
+ unused++;
+ if (GC_MARK(b) == GC_MARK_RECLAIMABLE)
+ available++;
+ if (GC_MARK(b) == GC_MARK_DIRTY)
+ dirty++;
+ if (GC_MARK(b) == GC_MARK_METADATA)
+ meta++;
+ }
+
for (i = ca->sb.first_bucket; i < n; i++)
p[i] = ca->buckets[i].prio;
mutex_unlock(&ca->set->bucket_lock);
@@ -786,10 +796,7 @@ SHOW(__bch_cache)
while (cached < p + n &&
*cached == BTREE_PRIO)
- cached++;
-
- btree = cached - p;
- n -= btree;
+ cached++, n--;
for (i = 0; i < n; i++)
sum += INITIAL_PRIO - cached[i];
@@ -805,12 +812,16 @@ SHOW(__bch_cache)
ret = scnprintf(buf, PAGE_SIZE,
"Unused: %zu%%\n"
+ "Clean: %zu%%\n"
+ "Dirty: %zu%%\n"
"Metadata: %zu%%\n"
"Average: %llu\n"
"Sectors per Q: %zu\n"
"Quantiles: [",
unused * 100 / (size_t) ca->sb.nbuckets,
- btree * 100 / (size_t) ca->sb.nbuckets, sum,
+ available * 100 / (size_t) ca->sb.nbuckets,
+ dirty * 100 / (size_t) ca->sb.nbuckets,
+ meta * 100 / (size_t) ca->sb.nbuckets, sum,
n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
for (i = 0; i < ARRAY_SIZE(q); i++)