summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds2019-07-15 01:17:18 +0200
committerLinus Torvalds2019-07-15 01:17:18 +0200
commita1240cf74e8228f7c80d44af17914c0ffc5633fb (patch)
treeab8c5841940f9d20e6ffb6c91301a4146b63fdb8 /lib
parentMerge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/ke... (diff)
parentpercpu_ref: release percpu memory early without PERCPU_REF_ALLOW_REINIT (diff)
downloadkernel-qcow2-linux-a1240cf74e8228f7c80d44af17914c0ffc5633fb.tar.gz
kernel-qcow2-linux-a1240cf74e8228f7c80d44af17914c0ffc5633fb.tar.xz
kernel-qcow2-linux-a1240cf74e8228f7c80d44af17914c0ffc5633fb.zip
Merge branch 'for-5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu
Pull percpu updates from Dennis Zhou: "This includes changes to let percpu_ref release the backing percpu memory earlier after it has been switched to atomic in cases where the percpu ref is not revived. This will help recycle percpu memory earlier in cases where the refcounts are pinned for prolonged periods of time" * 'for-5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu: percpu_ref: release percpu memory early without PERCPU_REF_ALLOW_REINIT md: initialize percpu refcounters using PERCU_REF_ALLOW_REINIT io_uring: initialize percpu refcounters using PERCU_REF_ALLOW_REINIT percpu_ref: introduce PERCPU_REF_ALLOW_REINIT flag
Diffstat (limited to 'lib')
-rw-r--r--lib/percpu-refcount.c13
1 files changed, 11 insertions, 2 deletions
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 071a76c7bac0..4f6c6ebbbbde 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -70,11 +70,14 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
return -ENOMEM;
ref->force_atomic = flags & PERCPU_REF_INIT_ATOMIC;
+ ref->allow_reinit = flags & PERCPU_REF_ALLOW_REINIT;
- if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD))
+ if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) {
ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
- else
+ ref->allow_reinit = true;
+ } else {
start_count += PERCPU_COUNT_BIAS;
+ }
if (flags & PERCPU_REF_INIT_DEAD)
ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
@@ -120,6 +123,9 @@ static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu)
ref->confirm_switch = NULL;
wake_up_all(&percpu_ref_switch_waitq);
+ if (!ref->allow_reinit)
+ percpu_ref_exit(ref);
+
/* drop ref from percpu_ref_switch_to_atomic() */
percpu_ref_put(ref);
}
@@ -195,6 +201,9 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
return;
+ if (WARN_ON_ONCE(!ref->allow_reinit))
+ return;
+
atomic_long_add(PERCPU_COUNT_BIAS, &ref->count);
/*