summaryrefslogtreecommitdiffstats
path: root/mm/frontswap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/frontswap.c')
-rw-r--r--mm/frontswap.c23
1 files changed, 22 insertions, 1 deletions
diff --git a/mm/frontswap.c b/mm/frontswap.c
index 0547a35f798b..2890e67d6026 100644
--- a/mm/frontswap.c
+++ b/mm/frontswap.c
@@ -44,6 +44,13 @@ EXPORT_SYMBOL(frontswap_enabled);
*/
static bool frontswap_writethrough_enabled __read_mostly;
+/*
+ * If enabled, the underlying tmem implementation is capable of doing
+ * exclusive gets, so frontswap_load, on a successful tmem_get must
+ * mark the page as no longer in frontswap AND mark it dirty.
+ */
+static bool frontswap_tmem_exclusive_gets_enabled __read_mostly;
+
#ifdef CONFIG_DEBUG_FS
/*
* Counters available via /sys/kernel/debug/frontswap (if debugfs is
@@ -97,6 +104,15 @@ void frontswap_writethrough(bool enable)
EXPORT_SYMBOL(frontswap_writethrough);
/*
+ * Enable/disable frontswap exclusive gets (see above).
+ */
+void frontswap_tmem_exclusive_gets(bool enable)
+{
+ frontswap_tmem_exclusive_gets_enabled = enable;
+}
+EXPORT_SYMBOL(frontswap_tmem_exclusive_gets);
+
+/*
* Called when a swap device is swapon'd.
*/
void __frontswap_init(unsigned type)
@@ -174,8 +190,13 @@ int __frontswap_load(struct page *page)
BUG_ON(sis == NULL);
if (frontswap_test(sis, offset))
ret = frontswap_ops.load(type, offset, page);
- if (ret == 0)
+ if (ret == 0) {
inc_frontswap_loads();
+ if (frontswap_tmem_exclusive_gets_enabled) {
+ SetPageDirty(page);
+ frontswap_clear(sis, offset);
+ }
+ }
return ret;
}
EXPORT_SYMBOL(__frontswap_load);