summaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c1813
1 files changed, 1527 insertions, 286 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 954032b80bed..20a8193a7af8 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -6,6 +6,10 @@
* Copyright 2007 OpenVZ SWsoft Inc
* Author: Pavel Emelianov <xemul@openvz.org>
*
+ * Memory thresholds
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Kirill A. Shutemov
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -21,6 +25,7 @@
#include <linux/memcontrol.h>
#include <linux/cgroup.h>
#include <linux/mm.h>
+#include <linux/hugetlb.h>
#include <linux/pagemap.h>
#include <linux/smp.h>
#include <linux/page-flags.h>
@@ -32,7 +37,10 @@
#include <linux/rbtree.h>
#include <linux/slab.h>
#include <linux/swap.h>
+#include <linux/swapops.h>
#include <linux/spinlock.h>
+#include <linux/eventfd.h>
+#include <linux/sort.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/vmalloc.h>
@@ -55,7 +63,15 @@ static int really_do_swap_account __initdata = 1; /* for remember boot option*/
#define do_swap_account (0)
#endif
-#define SOFTLIMIT_EVENTS_THRESH (1000)
+/*
+ * Per memcg event counter is incremented at every pagein/pageout. This counter
+ * is used for trigger some periodic events. This is straightforward and better
+ * than using jiffies etc. to handle periodic memcg event.
+ *
+ * These values will be used as !((event) & ((1 <<(thresh)) - 1))
+ */
+#define THRESHOLDS_EVENTS_THRESH (7) /* once in 128 */
+#define SOFTLIMIT_EVENTS_THRESH (10) /* once in 1024 */
/*
* Statistics for memory cgroup.
@@ -69,62 +85,16 @@ enum mem_cgroup_stat_index {
MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */
MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */
- MEM_CGROUP_STAT_EVENTS, /* sum of pagein + pageout for internal use */
MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
+ MEM_CGROUP_EVENTS, /* incremented at every pagein/pageout */
MEM_CGROUP_STAT_NSTATS,
};
struct mem_cgroup_stat_cpu {
s64 count[MEM_CGROUP_STAT_NSTATS];
-} ____cacheline_aligned_in_smp;
-
-struct mem_cgroup_stat {
- struct mem_cgroup_stat_cpu cpustat[0];
};
-static inline void
-__mem_cgroup_stat_reset_safe(struct mem_cgroup_stat_cpu *stat,
- enum mem_cgroup_stat_index idx)
-{
- stat->count[idx] = 0;
-}
-
-static inline s64
-__mem_cgroup_stat_read_local(struct mem_cgroup_stat_cpu *stat,
- enum mem_cgroup_stat_index idx)
-{
- return stat->count[idx];
-}
-
-/*
- * For accounting under irq disable, no need for increment preempt count.
- */
-static inline void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat_cpu *stat,
- enum mem_cgroup_stat_index idx, int val)
-{
- stat->count[idx] += val;
-}
-
-static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
- enum mem_cgroup_stat_index idx)
-{
- int cpu;
- s64 ret = 0;
- for_each_possible_cpu(cpu)
- ret += stat->cpustat[cpu].count[idx];
- return ret;
-}
-
-static s64 mem_cgroup_local_usage(struct mem_cgroup_stat *stat)
-{
- s64 ret;
-
- ret = mem_cgroup_read_stat(stat, MEM_CGROUP_STAT_CACHE);
- ret += mem_cgroup_read_stat(stat, MEM_CGROUP_STAT_RSS);
- return ret;
-}
-
/*
* per-zone information in memory controller.
*/
@@ -174,6 +144,41 @@ struct mem_cgroup_tree {
static struct mem_cgroup_tree soft_limit_tree __read_mostly;
+struct mem_cgroup_threshold {
+ struct eventfd_ctx *eventfd;
+ u64 threshold;
+};
+
+/* For threshold */
+struct mem_cgroup_threshold_ary {
+ /* An array index points to threshold just below usage. */
+ int current_threshold;
+ /* Size of entries[] */
+ unsigned int size;
+ /* Array of thresholds */
+ struct mem_cgroup_threshold entries[0];
+};
+
+struct mem_cgroup_thresholds {
+ /* Primary thresholds array */
+ struct mem_cgroup_threshold_ary *primary;
+ /*
+ * Spare threshold array.
+ * This is needed to make mem_cgroup_unregister_event() "never fail".
+ * It must be able to store at least primary->size - 1 entries.
+ */
+ struct mem_cgroup_threshold_ary *spare;
+};
+
+/* for OOM */
+struct mem_cgroup_eventfd_list {
+ struct list_head list;
+ struct eventfd_ctx *eventfd;
+};
+
+static void mem_cgroup_threshold(struct mem_cgroup *mem);
+static void mem_cgroup_oom_notify(struct mem_cgroup *mem);
+
/*
* The memory controller data structure. The memory controller controls both
* page cache and RSS per cgroup. We would eventually like to provide
@@ -217,20 +222,75 @@ struct mem_cgroup {
* Should the accounting and control be hierarchical, per subtree?
*/
bool use_hierarchy;
- unsigned long last_oom_jiffies;
+ atomic_t oom_lock;
atomic_t refcnt;
unsigned int swappiness;
+ /* OOM-Killer disable */
+ int oom_kill_disable;
/* set when res.limit == memsw.limit */
bool memsw_is_minimum;
+ /* protect arrays of thresholds */
+ struct mutex thresholds_lock;
+
+ /* thresholds for memory usage. RCU-protected */
+ struct mem_cgroup_thresholds thresholds;
+
+ /* thresholds for mem+swap usage. RCU-protected */
+ struct mem_cgroup_thresholds memsw_thresholds;
+
+ /* For oom notifier event fd */
+ struct list_head oom_notify;
+
+ /*
+ * Should we move charges of a task when a task is moved into this
+ * mem_cgroup ? And what type of charges should we move ?
+ */
+ unsigned long move_charge_at_immigrate;
/*
- * statistics. This must be placed at the end of memcg.
+ * percpu counter.
*/
- struct mem_cgroup_stat stat;
+ struct mem_cgroup_stat_cpu *stat;
+};
+
+/* Stuffs for move charges at task migration. */
+/*
+ * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a
+ * left-shifted bitmap of these types.
+ */
+enum move_type {
+ MOVE_CHARGE_TYPE_ANON, /* private anonymous page and swap of it */
+ MOVE_CHARGE_TYPE_FILE, /* file page(including tmpfs) and swap of it */
+ NR_MOVE_TYPE,
};
+/* "mc" and its members are protected by cgroup_mutex */
+static struct move_charge_struct {
+ struct mem_cgroup *from;
+ struct mem_cgroup *to;
+ unsigned long precharge;
+ unsigned long moved_charge;
+ unsigned long moved_swap;
+ struct task_struct *moving_task; /* a task moving charges */
+ wait_queue_head_t waitq; /* a waitq for other context */
+} mc = {
+ .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
+};
+
+static bool move_anon(void)
+{
+ return test_bit(MOVE_CHARGE_TYPE_ANON,
+ &mc.to->move_charge_at_immigrate);
+}
+
+static bool move_file(void)
+{
+ return test_bit(MOVE_CHARGE_TYPE_FILE,
+ &mc.to->move_charge_at_immigrate);
+}
+
/*
* Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
* limit reclaim to prevent infinite loops, if they ever occur.
@@ -258,9 +318,12 @@ enum charge_type {
/* for encoding cft->private value on file */
#define _MEM (0)
#define _MEMSWAP (1)
+#define _OOM_TYPE (2)
#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
#define MEMFILE_TYPE(val) (((val) >> 16) & 0xffff)
#define MEMFILE_ATTR(val) ((val) & 0xffff)
+/* Used for OOM nofiier */
+#define OOM_CONTROL (0)
/*
* Reclaim flags for mem_cgroup_hierarchical_reclaim
@@ -371,23 +434,6 @@ mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
spin_unlock(&mctz->lock);
}
-static bool mem_cgroup_soft_limit_check(struct mem_cgroup *mem)
-{
- bool ret = false;
- int cpu;
- s64 val;
- struct mem_cgroup_stat_cpu *cpustat;
-
- cpu = get_cpu();
- cpustat = &mem->stat.cpustat[cpu];
- val = __mem_cgroup_stat_read_local(cpustat, MEM_CGROUP_STAT_EVENTS);
- if (unlikely(val > SOFTLIMIT_EVENTS_THRESH)) {
- __mem_cgroup_stat_reset_safe(cpustat, MEM_CGROUP_STAT_EVENTS);
- ret = true;
- }
- put_cpu();
- return ret;
-}
static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
{
@@ -481,17 +527,31 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
return mz;
}
+static s64 mem_cgroup_read_stat(struct mem_cgroup *mem,
+ enum mem_cgroup_stat_index idx)
+{
+ int cpu;
+ s64 val = 0;
+
+ for_each_possible_cpu(cpu)
+ val += per_cpu(mem->stat->count[idx], cpu);
+ return val;
+}
+
+static s64 mem_cgroup_local_usage(struct mem_cgroup *mem)
+{
+ s64 ret;
+
+ ret = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
+ ret += mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
+ return ret;
+}
+
static void mem_cgroup_swap_statistics(struct mem_cgroup *mem,
bool charge)
{
int val = (charge) ? 1 : -1;
- struct mem_cgroup_stat *stat = &mem->stat;
- struct mem_cgroup_stat_cpu *cpustat;
- int cpu = get_cpu();
-
- cpustat = &stat->cpustat[cpu];
- __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_SWAPOUT, val);
- put_cpu();
+ this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
}
static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
@@ -499,24 +559,21 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
bool charge)
{
int val = (charge) ? 1 : -1;
- struct mem_cgroup_stat *stat = &mem->stat;
- struct mem_cgroup_stat_cpu *cpustat;
- int cpu = get_cpu();
- cpustat = &stat->cpustat[cpu];
+ preempt_disable();
+
if (PageCgroupCache(pc))
- __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val);
+ __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], val);
else
- __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_RSS, val);
+ __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], val);
if (charge)
- __mem_cgroup_stat_add_safe(cpustat,
- MEM_CGROUP_STAT_PGPGIN_COUNT, 1);
+ __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGIN_COUNT]);
else
- __mem_cgroup_stat_add_safe(cpustat,
- MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
- __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_EVENTS, 1);
- put_cpu();
+ __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGOUT_COUNT]);
+ __this_cpu_inc(mem->stat->count[MEM_CGROUP_EVENTS]);
+
+ preempt_enable();
}
static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
@@ -534,6 +591,29 @@ static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
return total;
}
+static bool __memcg_event_check(struct mem_cgroup *mem, int event_mask_shift)
+{
+ s64 val;
+
+ val = this_cpu_read(mem->stat->count[MEM_CGROUP_EVENTS]);
+
+ return !(val & ((1 << event_mask_shift) - 1));
+}
+
+/*
+ * Check events in order.
+ *
+ */
+static void memcg_check_events(struct mem_cgroup *mem, struct page *page)
+{
+ /* threshold event is triggered in finer grain than soft limit */
+ if (unlikely(__memcg_event_check(mem, THRESHOLDS_EVENTS_THRESH))) {
+ mem_cgroup_threshold(mem);
+ if (unlikely(__memcg_event_check(mem, SOFTLIMIT_EVENTS_THRESH)))
+ mem_cgroup_update_tree(mem, page);
+ }
+}
+
static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
{
return container_of(cgroup_subsys_state(cont,
@@ -1000,7 +1080,7 @@ static int mem_cgroup_count_children_cb(struct mem_cgroup *mem, void *data)
}
/**
- * mem_cgroup_print_mem_info: Called from OOM with tasklist_lock held in read mode.
+ * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode.
* @memcg: The memory cgroup that went over limit
* @p: Task that is going to be killed
*
@@ -1174,7 +1254,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
}
}
}
- if (!mem_cgroup_local_usage(&victim->stat)) {
+ if (!mem_cgroup_local_usage(victim)) {
/* this cgroup's local usage == 0 */
css_put(&victim->css);
continue;
@@ -1205,32 +1285,141 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
return total;
}
-bool mem_cgroup_oom_called(struct task_struct *task)
+static int mem_cgroup_oom_lock_cb(struct mem_cgroup *mem, void *data)
{
- bool ret = false;
- struct mem_cgroup *mem;
- struct mm_struct *mm;
+ int *val = (int *)data;
+ int x;
+ /*
+ * Logically, we can stop scanning immediately when we find
+ * a memcg is already locked. But condidering unlock ops and
+ * creation/removal of memcg, scan-all is simple operation.
+ */
+ x = atomic_inc_return(&mem->oom_lock);
+ *val = max(x, *val);
+ return 0;
+}
+/*
+ * Check OOM-Killer is already running under our hierarchy.
+ * If someone is running, return false.
+ */
+static bool mem_cgroup_oom_lock(struct mem_cgroup *mem)
+{
+ int lock_count = 0;
- rcu_read_lock();
- mm = task->mm;
- if (!mm)
- mm = &init_mm;
- mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
- if (mem && time_before(jiffies, mem->last_oom_jiffies + HZ/10))
- ret = true;
- rcu_read_unlock();
- return ret;
+ mem_cgroup_walk_tree(mem, &lock_count, mem_cgroup_oom_lock_cb);
+
+ if (lock_count == 1)
+ return true;
+ return false;
}
-static int record_last_oom_cb(struct mem_cgroup *mem, void *data)
+static int mem_cgroup_oom_unlock_cb(struct mem_cgroup *mem, void *data)
{
- mem->last_oom_jiffies = jiffies;
+ /*
+ * When a new child is created while the hierarchy is under oom,
+ * mem_cgroup_oom_lock() may not be called. We have to use
+ * atomic_add_unless() here.
+ */
+ atomic_add_unless(&mem->oom_lock, -1, 0);
return 0;
}
-static void record_last_oom(struct mem_cgroup *mem)
+static void mem_cgroup_oom_unlock(struct mem_cgroup *mem)
+{
+ mem_cgroup_walk_tree(mem, NULL, mem_cgroup_oom_unlock_cb);
+}
+
+static DEFINE_MUTEX(memcg_oom_mutex);
+static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
+
+struct oom_wait_info {
+ struct mem_cgroup *mem;
+ wait_queue_t wait;
+};
+
+static int memcg_oom_wake_function(wait_queue_t *wait,
+ unsigned mode, int sync, void *arg)
+{
+ struct mem_cgroup *wake_mem = (struct mem_cgroup *)arg;
+ struct oom_wait_info *oom_wait_info;
+
+ oom_wait_info = container_of(wait, struct oom_wait_info, wait);
+
+ if (oom_wait_info->mem == wake_mem)
+ goto wakeup;
+ /* if no hierarchy, no match */
+ if (!oom_wait_info->mem->use_hierarchy || !wake_mem->use_hierarchy)
+ return 0;
+ /*
+ * Both of oom_wait_info->mem and wake_mem are stable under us.
+ * Then we can use css_is_ancestor without taking care of RCU.
+ */
+ if (!css_is_ancestor(&oom_wait_info->mem->css, &wake_mem->css) &&
+ !css_is_ancestor(&wake_mem->css, &oom_wait_info->mem->css))
+ return 0;
+
+wakeup:
+ return autoremove_wake_function(wait, mode, sync, arg);
+}
+
+static void memcg_wakeup_oom(struct mem_cgroup *mem)
{
- mem_cgroup_walk_tree(mem, NULL, record_last_oom_cb);
+ /* for filtering, pass "mem" as argument. */
+ __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, mem);
+}
+
+static void memcg_oom_recover(struct mem_cgroup *mem)
+{
+ if (atomic_read(&mem->oom_lock))
+ memcg_wakeup_oom(mem);
+}
+
+/*
+ * try to call OOM killer. returns false if we should exit memory-reclaim loop.
+ */
+bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
+{
+ struct oom_wait_info owait;
+ bool locked, need_to_kill;
+
+ owait.mem = mem;
+ owait.wait.flags = 0;
+ owait.wait.func = memcg_oom_wake_function;
+ owait.wait.private = current;
+ INIT_LIST_HEAD(&owait.wait.task_list);
+ need_to_kill = true;
+ /* At first, try to OOM lock hierarchy under mem.*/
+ mutex_lock(&memcg_oom_mutex);
+ locked = mem_cgroup_oom_lock(mem);
+ /*
+ * Even if signal_pending(), we can't quit charge() loop without
+ * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
+ * under OOM is always welcomed, use TASK_KILLABLE here.
+ */
+ prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
+ if (!locked || mem->oom_kill_disable)
+ need_to_kill = false;
+ if (locked)
+ mem_cgroup_oom_notify(mem);
+ mutex_unlock(&memcg_oom_mutex);
+
+ if (need_to_kill) {
+ finish_wait(&memcg_oom_waitq, &owait.wait);
+ mem_cgroup_out_of_memory(mem, mask);
+ } else {
+ schedule();
+ finish_wait(&memcg_oom_waitq, &owait.wait);
+ }
+ mutex_lock(&memcg_oom_mutex);
+ mem_cgroup_oom_unlock(mem);
+ memcg_wakeup_oom(mem);
+ mutex_unlock(&memcg_oom_mutex);
+
+ if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
+ return false;
+ /* Give chance to dying process */
+ schedule_timeout(1);
+ return true;
}
/*
@@ -1240,9 +1429,6 @@ static void record_last_oom(struct mem_cgroup *mem)
void mem_cgroup_update_file_mapped(struct page *page, int val)
{
struct mem_cgroup *mem;
- struct mem_cgroup_stat *stat;
- struct mem_cgroup_stat_cpu *cpustat;
- int cpu;
struct page_cgroup *pc;
pc = lookup_page_cgroup(page);
@@ -1251,20 +1437,20 @@ void mem_cgroup_update_file_mapped(struct page *page, int val)
lock_page_cgroup(pc);
mem = pc->mem_cgroup;
- if (!mem)
- goto done;
-
- if (!PageCgroupUsed(pc))
+ if (!mem || !PageCgroupUsed(pc))
goto done;
/*
- * Preemption is already disabled, we don't need get_cpu()
+ * Preemption is already disabled. We can use __this_cpu_xxx
*/
- cpu = smp_processor_id();
- stat = &mem->stat;
- cpustat = &stat->cpustat[cpu];
+ if (val > 0) {
+ __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
+ SetPageCgroupFileMapped(pc);
+ } else {
+ __this_cpu_dec(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
+ ClearPageCgroupFileMapped(pc);
+ }
- __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_FILE_MAPPED, val);
done:
unlock_page_cgroup(pc);
}
@@ -1330,7 +1516,7 @@ static void drain_local_stock(struct work_struct *dummy)
/*
* Cache charges(val) which is from res_counter, to local per_cpu area.
- * This will be consumed by consumt_stock() function, later.
+ * This will be consumed by consume_stock() function, later.
*/
static void refill_stock(struct mem_cgroup *mem, int val)
{
@@ -1401,19 +1587,21 @@ static int __cpuinit memcg_stock_cpu_callback(struct notifier_block *nb,
* oom-killer can be invoked.
*/
static int __mem_cgroup_try_charge(struct mm_struct *mm,
- gfp_t gfp_mask, struct mem_cgroup **memcg,
- bool oom, struct page *page)
+ gfp_t gfp_mask, struct mem_cgroup **memcg, bool oom)
{
struct mem_cgroup *mem, *mem_over_limit;
int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
struct res_counter *fail_res;
int csize = CHARGE_SIZE;
- if (unlikely(test_thread_flag(TIF_MEMDIE))) {
- /* Don't account this! */
- *memcg = NULL;
- return 0;
- }
+ /*
+ * Unlike gloval-vm's OOM-kill, we're not in memory shortage
+ * in system level. So, allow to go ahead dying process in addition to
+ * MEMDIE process.
+ */
+ if (unlikely(test_thread_flag(TIF_MEMDIE)
+ || fatal_signal_pending(current)))
+ goto bypass;
/*
* We always charge the cgroup the mm_struct belongs to.
@@ -1440,7 +1628,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
unsigned long flags = 0;
if (consume_stock(mem))
- goto charged;
+ goto done;
ret = res_counter_charge(&mem->res, csize, &fail_res);
if (likely(!ret)) {
@@ -1483,28 +1671,68 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
if (mem_cgroup_check_under_limit(mem_over_limit))
continue;
+ /* try to avoid oom while someone is moving charge */
+ if (mc.moving_task && current != mc.moving_task) {
+ struct mem_cgroup *from, *to;
+ bool do_continue = false;
+ /*
+ * There is a small race that "from" or "to" can be
+ * freed by rmdir, so we use css_tryget().
+ */
+ from = mc.from;
+ to = mc.to;
+ if (from && css_tryget(&from->css)) {
+ if (mem_over_limit->use_hierarchy)
+ do_continue = css_is_ancestor(
+ &from->css,
+ &mem_over_limit->css);
+ else
+ do_continue = (from == mem_over_limit);
+ css_put(&from->css);
+ }
+ if (!do_continue && to && css_tryget(&to->css)) {
+ if (mem_over_limit->use_hierarchy)
+ do_continue = css_is_ancestor(
+ &to->css,
+ &mem_over_limit->css);
+ else
+ do_continue = (to == mem_over_limit);
+ css_put(&to->css);
+ }
+ if (do_continue) {
+ DEFINE_WAIT(wait);
+ prepare_to_wait(&mc.waitq, &wait,
+ TASK_INTERRUPTIBLE);
+ /* moving charge context might have finished. */
+ if (mc.moving_task)
+ schedule();
+ finish_wait(&mc.waitq, &wait);
+ continue;
+ }
+ }
+
if (!nr_retries--) {
- if (oom) {
- mem_cgroup_out_of_memory(mem_over_limit, gfp_mask);
- record_last_oom(mem_over_limit);
+ if (!oom)
+ goto nomem;
+ if (mem_cgroup_handle_oom(mem_over_limit, gfp_mask)) {
+ nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
+ continue;
}
- goto nomem;
+ /* When we reach here, current task is dying .*/
+ css_put(&mem->css);
+ goto bypass;
}
}
if (csize > PAGE_SIZE)
refill_stock(mem, csize - PAGE_SIZE);
-charged:
- /*
- * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
- * if they exceeds softlimit.
- */
- if (mem_cgroup_soft_limit_check(mem))
- mem_cgroup_update_tree(mem, page);
done:
return 0;
nomem:
css_put(&mem->css);
return -ENOMEM;
+bypass:
+ *memcg = NULL;
+ return 0;
}
/*
@@ -1512,14 +1740,23 @@ nomem:
* This function is for that and do uncharge, put css's refcnt.
* gotten by try_charge().
*/
-static void mem_cgroup_cancel_charge(struct mem_cgroup *mem)
+static void __mem_cgroup_cancel_charge(struct mem_cgroup *mem,
+ unsigned long count)
{
if (!mem_cgroup_is_root(mem)) {
- res_counter_uncharge(&mem->res, PAGE_SIZE);
+ res_counter_uncharge(&mem->res, PAGE_SIZE * count);
if (do_swap_account)
- res_counter_uncharge(&mem->memsw, PAGE_SIZE);
+ res_counter_uncharge(&mem->memsw, PAGE_SIZE * count);
+ VM_BUG_ON(test_bit(CSS_ROOT, &mem->css.flags));
+ WARN_ON_ONCE(count > INT_MAX);
+ __css_put(&mem->css, (int)count);
}
- css_put(&mem->css);
+ /* we don't need css_put for root */
+}
+
+static void mem_cgroup_cancel_charge(struct mem_cgroup *mem)
+{
+ __mem_cgroup_cancel_charge(mem, 1);
}
/*
@@ -1615,6 +1852,12 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
mem_cgroup_charge_statistics(mem, pc, true);
unlock_page_cgroup(pc);
+ /*
+ * "charge_statistics" updated event counter. Then, check it.
+ * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
+ * if they exceeds softlimit.
+ */
+ memcg_check_events(mem, pc->page);
}
/**
@@ -1622,61 +1865,48 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
* @pc: page_cgroup of the page.
* @from: mem_cgroup which the page is moved from.
* @to: mem_cgroup which the page is moved to. @from != @to.
+ * @uncharge: whether we should call uncharge and css_put against @from.
*
* The caller must confirm following.
* - page is not on LRU (isolate_page() is useful.)
* - the pc is locked, used, and ->mem_cgroup points to @from.
*
- * This function does "uncharge" from old cgroup but doesn't do "charge" to
- * new cgroup. It should be done by a caller.
+ * This function doesn't do "charge" nor css_get to new cgroup. It should be
+ * done by a caller(__mem_cgroup_try_charge would be usefull). If @uncharge is
+ * true, this function does "uncharge" from old cgroup, but it doesn't if
+ * @uncharge is false, so a caller should do "uncharge".
*/
static void __mem_cgroup_move_account(struct page_cgroup *pc,
- struct mem_cgroup *from, struct mem_cgroup *to)
+ struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge)
{
- struct page *page;
- int cpu;
- struct mem_cgroup_stat *stat;
- struct mem_cgroup_stat_cpu *cpustat;
-
VM_BUG_ON(from == to);
VM_BUG_ON(PageLRU(pc->page));
VM_BUG_ON(!PageCgroupLocked(pc));
VM_BUG_ON(!PageCgroupUsed(pc));
VM_BUG_ON(pc->mem_cgroup != from);
- if (!mem_cgroup_is_root(from))
- res_counter_uncharge(&from->res, PAGE_SIZE);
- mem_cgroup_charge_statistics(from, pc, false);
-
- page = pc->page;
- if (page_mapped(page) && !PageAnon(page)) {
- cpu = smp_processor_id();
- /* Update mapped_file data for mem_cgroup "from" */
- stat = &from->stat;
- cpustat = &stat->cpustat[cpu];
- __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_FILE_MAPPED,
- -1);
-
- /* Update mapped_file data for mem_cgroup "to" */
- stat = &to->stat;
- cpustat = &stat->cpustat[cpu];
- __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_FILE_MAPPED,
- 1);
+ if (PageCgroupFileMapped(pc)) {
+ /* Update mapped_file data for mem_cgroup */
+ preempt_disable();
+ __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
+ __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
+ preempt_enable();
}
+ mem_cgroup_charge_statistics(from, pc, false);
+ if (uncharge)
+ /* This is not "cancel", but cancel_charge does all we need. */
+ mem_cgroup_cancel_charge(from);
- if (do_swap_account && !mem_cgroup_is_root(from))
- res_counter_uncharge(&from->memsw, PAGE_SIZE);
- css_put(&from->css);
-
- css_get(&to->css);
+ /* caller should have done css_get */
pc->mem_cgroup = to;
mem_cgroup_charge_statistics(to, pc, true);
/*
* We charges against "to" which may not have any tasks. Then, "to"
* can be under rmdir(). But in current implementation, caller of
- * this function is just force_empty() and it's garanteed that
- * "to" is never removed. So, we don't check rmdir status here.
+ * this function is just force_empty() and move charge, so it's
+ * garanteed that "to" is never removed. So, we don't check rmdir
+ * status here.
*/
}
@@ -1685,15 +1915,20 @@ static void __mem_cgroup_move_account(struct page_cgroup *pc,
* __mem_cgroup_move_account()
*/
static int mem_cgroup_move_account(struct page_cgroup *pc,
- struct mem_cgroup *from, struct mem_cgroup *to)
+ struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge)
{
int ret = -EINVAL;
lock_page_cgroup(pc);
if (PageCgroupUsed(pc) && pc->mem_cgroup == from) {
- __mem_cgroup_move_account(pc, from, to);
+ __mem_cgroup_move_account(pc, from, to, uncharge);
ret = 0;
}
unlock_page_cgroup(pc);
+ /*
+ * check events
+ */
+ memcg_check_events(to, pc->page);
+ memcg_check_events(from, pc->page);
return ret;
}
@@ -1722,15 +1957,13 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc,
goto put;
parent = mem_cgroup_from_cont(pcg);
- ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false, page);
+ ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
if (ret || !parent)
goto put_back;
- ret = mem_cgroup_move_account(pc, child, parent);
- if (!ret)
- css_put(&parent->css); /* drop extra refcnt by try_charge() */
- else
- mem_cgroup_cancel_charge(parent); /* does css_put */
+ ret = mem_cgroup_move_account(pc, child, parent, true);
+ if (ret)
+ mem_cgroup_cancel_charge(parent);
put_back:
putback_lru_page(page);
put:
@@ -1760,7 +1993,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
prefetchw(pc);
mem = memcg;
- ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true, page);
+ ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
if (ret || !mem)
return ret;
@@ -1880,14 +2113,14 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
if (!mem)
goto charge_cur_mm;
*ptr = mem;
- ret = __mem_cgroup_try_charge(NULL, mask, ptr, true, page);
+ ret = __mem_cgroup_try_charge(NULL, mask, ptr, true);
/* drop extra refcnt from tryget */
css_put(&mem->css);
return ret;
charge_cur_mm:
if (unlikely(!mm))
mm = &init_mm;
- return __mem_cgroup_try_charge(mm, mask, ptr, true, page);
+ return __mem_cgroup_try_charge(mm, mask, ptr, true);
}
static void
@@ -1963,15 +2196,6 @@ __do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype)
/* If swapout, usage of swap doesn't decrease */
if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
uncharge_memsw = false;
- /*
- * do_batch > 0 when unmapping pages or inode invalidate/truncate.
- * In those cases, all pages freed continously can be expected to be in
- * the same cgroup and we have chance to coalesce uncharges.
- * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
- * because we want to do uncharge as soon as possible.
- */
- if (!current->memcg_batch.do_batch || test_thread_flag(TIF_MEMDIE))
- goto direct_uncharge;
batch = &current->memcg_batch;
/*
@@ -1982,6 +2206,17 @@ __do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype)
if (!batch->memcg)
batch->memcg = mem;
/*
+ * do_batch > 0 when unmapping pages or inode invalidate/truncate.
+ * In those cases, all pages freed continously can be expected to be in
+ * the same cgroup and we have chance to coalesce uncharges.
+ * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
+ * because we want to do uncharge as soon as possible.
+ */
+
+ if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
+ goto direct_uncharge;
+
+ /*
* In typical case, batch->memcg == mem. This means we can
* merge a series of uncharges to an uncharge of res_counter.
* If not, we uncharge res_counter ony by one.
@@ -1997,6 +2232,8 @@ direct_uncharge:
res_counter_uncharge(&mem->res, PAGE_SIZE);
if (uncharge_memsw)
res_counter_uncharge(&mem->memsw, PAGE_SIZE);
+ if (unlikely(batch->memcg != mem))
+ memcg_oom_recover(mem);
return;
}
@@ -2033,7 +2270,8 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
switch (ctype) {
case MEM_CGROUP_CHARGE_TYPE_MAPPED:
case MEM_CGROUP_CHARGE_TYPE_DROP:
- if (page_mapped(page))
+ /* See mem_cgroup_prepare_migration() */
+ if (page_mapped(page) || PageCgroupMigration(pc))
goto unlock_out;
break;
case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
@@ -2064,8 +2302,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
mz = page_cgroup_zoneinfo(pc);
unlock_page_cgroup(pc);
- if (mem_cgroup_soft_limit_check(mem))
- mem_cgroup_update_tree(mem, page);
+ memcg_check_events(mem, page);
/* at swapout, this memcg will be accessed to record to swap */
if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
css_put(&mem->css);
@@ -2134,6 +2371,7 @@ void mem_cgroup_uncharge_end(void)
res_counter_uncharge(&batch->memcg->res, batch->bytes);
if (batch->memsw_bytes)
res_counter_uncharge(&batch->memcg->memsw, batch->memsw_bytes);
+ memcg_oom_recover(batch->memcg);
/* forget this pointer (for sanity check) */
batch->memcg = NULL;
}
@@ -2192,16 +2430,76 @@ void mem_cgroup_uncharge_swap(swp_entry_t ent)
}
rcu_read_unlock();
}
+
+/**
+ * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
+ * @entry: swap entry to be moved
+ * @from: mem_cgroup which the entry is moved from
+ * @to: mem_cgroup which the entry is moved to
+ * @need_fixup: whether we should fixup res_counters and refcounts.
+ *
+ * It succeeds only when the swap_cgroup's record for this entry is the same
+ * as the mem_cgroup's id of @from.
+ *
+ * Returns 0 on success, -EINVAL on failure.
+ *
+ * The caller must have charged to @to, IOW, called res_counter_charge() about
+ * both res and memsw, and called css_get().
+ */
+static int mem_cgroup_move_swap_account(swp_entry_t entry,
+ struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
+{
+ unsigned short old_id, new_id;
+
+ old_id = css_id(&from->css);
+ new_id = css_id(&to->css);
+
+ if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
+ mem_cgroup_swap_statistics(from, false);
+ mem_cgroup_swap_statistics(to, true);
+ /*
+ * This function is only called from task migration context now.
+ * It postpones res_counter and refcount handling till the end
+ * of task migration(mem_cgroup_clear_mc()) for performance
+ * improvement. But we cannot postpone mem_cgroup_get(to)
+ * because if the process that has been moved to @to does
+ * swap-in, the refcount of @to might be decreased to 0.
+ */
+ mem_cgroup_get(to);
+ if (need_fixup) {
+ if (!mem_cgroup_is_root(from))
+ res_counter_uncharge(&from->memsw, PAGE_SIZE);
+ mem_cgroup_put(from);
+ /*
+ * we charged both to->res and to->memsw, so we should
+ * uncharge to->res.
+ */
+ if (!mem_cgroup_is_root(to))
+ res_counter_uncharge(&to->res, PAGE_SIZE);
+ css_put(&to->css);
+ }
+ return 0;
+ }
+ return -EINVAL;
+}
+#else
+static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
+ struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
+{
+ return -EINVAL;
+}
#endif
/*
* Before starting migration, account PAGE_SIZE to mem_cgroup that the old
* page belongs to.
*/
-int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
+int mem_cgroup_prepare_migration(struct page *page,
+ struct page *newpage, struct mem_cgroup **ptr)
{
struct page_cgroup *pc;
struct mem_cgroup *mem = NULL;
+ enum charge_type ctype;
int ret = 0;
if (mem_cgroup_disabled())
@@ -2212,70 +2510,125 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
if (PageCgroupUsed(pc)) {
mem = pc->mem_cgroup;
css_get(&mem->css);
+ /*
+ * At migrating an anonymous page, its mapcount goes down
+ * to 0 and uncharge() will be called. But, even if it's fully
+ * unmapped, migration may fail and this page has to be
+ * charged again. We set MIGRATION flag here and delay uncharge
+ * until end_migration() is called
+ *
+ * Corner Case Thinking
+ * A)
+ * When the old page was mapped as Anon and it's unmap-and-freed
+ * while migration was ongoing.
+ * If unmap finds the old page, uncharge() of it will be delayed
+ * until end_migration(). If unmap finds a new page, it's
+ * uncharged when it make mapcount to be 1->0. If unmap code
+ * finds swap_migration_entry, the new page will not be mapped
+ * and end_migration() will find it(mapcount==0).
+ *
+ * B)
+ * When the old page was mapped but migraion fails, the kernel
+ * remaps it. A charge for it is kept by MIGRATION flag even
+ * if mapcount goes down to 0. We can do remap successfully
+ * without charging it again.
+ *
+ * C)
+ * The "old" page is under lock_page() until the end of
+ * migration, so, the old page itself will not be swapped-out.
+ * If the new page is swapped out before end_migraton, our
+ * hook to usual swap-out path will catch the event.
+ */
+ if (PageAnon(page))
+ SetPageCgroupMigration(pc);
}
unlock_page_cgroup(pc);
+ /*
+ * If the page is not charged at this point,
+ * we return here.
+ */
+ if (!mem)
+ return 0;
- if (mem) {
- ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false,
- page);
- css_put(&mem->css);
- }
*ptr = mem;
+ ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false);
+ css_put(&mem->css);/* drop extra refcnt */
+ if (ret || *ptr == NULL) {
+ if (PageAnon(page)) {
+ lock_page_cgroup(pc);
+ ClearPageCgroupMigration(pc);
+ unlock_page_cgroup(pc);
+ /*
+ * The old page may be fully unmapped while we kept it.
+ */
+ mem_cgroup_uncharge_page(page);
+ }
+ return -ENOMEM;
+ }
+ /*
+ * We charge new page before it's used/mapped. So, even if unlock_page()
+ * is called before end_migration, we can catch all events on this new
+ * page. In the case new page is migrated but not remapped, new page's
+ * mapcount will be finally 0 and we call uncharge in end_migration().
+ */
+ pc = lookup_page_cgroup(newpage);
+ if (PageAnon(page))
+ ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
+ else if (page_is_file_cache(page))
+ ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
+ else
+ ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
+ __mem_cgroup_commit_charge(mem, pc, ctype);
return ret;
}
/* remove redundant charge if migration failed*/
void mem_cgroup_end_migration(struct mem_cgroup *mem,
- struct page *oldpage, struct page *newpage)
+ struct page *oldpage, struct page *newpage)
{
- struct page *target, *unused;
+ struct page *used, *unused;
struct page_cgroup *pc;
- enum charge_type ctype;
if (!mem)
return;
+ /* blocks rmdir() */
cgroup_exclude_rmdir(&mem->css);
/* at migration success, oldpage->mapping is NULL. */
if (oldpage->mapping) {
- target = oldpage;
- unused = NULL;
+ used = oldpage;
+ unused = newpage;
} else {
- target = newpage;
+ used = newpage;
unused = oldpage;
}
-
- if (PageAnon(target))
- ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
- else if (page_is_file_cache(target))
- ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
- else
- ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
-
- /* unused page is not on radix-tree now. */
- if (unused)
- __mem_cgroup_uncharge_common(unused, ctype);
-
- pc = lookup_page_cgroup(target);
/*
- * __mem_cgroup_commit_charge() check PCG_USED bit of page_cgroup.
- * So, double-counting is effectively avoided.
+ * We disallowed uncharge of pages under migration because mapcount
+ * of the page goes down to zero, temporarly.
+ * Clear the flag and check the page should be charged.
*/
- __mem_cgroup_commit_charge(mem, pc, ctype);
+ pc = lookup_page_cgroup(oldpage);
+ lock_page_cgroup(pc);
+ ClearPageCgroupMigration(pc);
+ unlock_page_cgroup(pc);
+
+ if (unused != oldpage)
+ pc = lookup_page_cgroup(unused);
+ __mem_cgroup_uncharge_common(unused, MEM_CGROUP_CHARGE_TYPE_FORCE);
+ pc = lookup_page_cgroup(used);
/*
- * Both of oldpage and newpage are still under lock_page().
- * Then, we don't have to care about race in radix-tree.
- * But we have to be careful that this page is unmapped or not.
- *
- * There is a case for !page_mapped(). At the start of
- * migration, oldpage was mapped. But now, it's zapped.
- * But we know *target* page is not freed/reused under us.
- * mem_cgroup_uncharge_page() does all necessary checks.
+ * If a page is a file cache, radix-tree replacement is very atomic
+ * and we can skip this check. When it was an Anon page, its mapcount
+ * goes down to 0. But because we added MIGRATION flage, it's not
+ * uncharged yet. There are several case but page->mapcount check
+ * and USED bit check in mem_cgroup_uncharge_page() will do enough
+ * check. (see prepare_charge() also)
*/
- if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
- mem_cgroup_uncharge_page(target);
+ if (PageAnon(used))
+ mem_cgroup_uncharge_page(used);
/*
- * At migration, we may charge account against cgroup which has no tasks
+ * At migration, we may charge account against cgroup which has no
+ * tasks.
* So, rmdir()->pre_destroy() can be called while we do this charge.
* In that case, we need to call pre_destroy() again. check it here.
*/
@@ -2313,10 +2666,11 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
unsigned long long val)
{
int retry_count;
- u64 memswlimit;
+ u64 memswlimit, memlimit;
int ret = 0;
int children = mem_cgroup_count_children(memcg);
u64 curusage, oldusage;
+ int enlarge;
/*
* For keeping hierarchical_reclaim simple, how long we should retry
@@ -2327,6 +2681,7 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
+ enlarge = 0;
while (retry_count) {
if (signal_pending(current)) {
ret = -EINTR;
@@ -2344,6 +2699,11 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
mutex_unlock(&set_limit_mutex);
break;
}
+
+ memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
+ if (memlimit < val)
+ enlarge = 1;
+
ret = res_counter_set_limit(&memcg->res, val);
if (!ret) {
if (memswlimit == val)
@@ -2365,6 +2725,8 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
else
oldusage = curusage;
}
+ if (!ret && enlarge)
+ memcg_oom_recover(memcg);
return ret;
}
@@ -2373,9 +2735,10 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
unsigned long long val)
{
int retry_count;
- u64 memlimit, oldusage, curusage;
+ u64 memlimit, memswlimit, oldusage, curusage;
int children = mem_cgroup_count_children(memcg);
int ret = -EBUSY;
+ int enlarge = 0;
/* see mem_cgroup_resize_res_limit */
retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
@@ -2397,6 +2760,9 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
mutex_unlock(&set_limit_mutex);
break;
}
+ memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
+ if (memswlimit < val)
+ enlarge = 1;
ret = res_counter_set_limit(&memcg->memsw, val);
if (!ret) {
if (memlimit == val)
@@ -2419,6 +2785,8 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
else
oldusage = curusage;
}
+ if (!ret && enlarge)
+ memcg_oom_recover(memcg);
return ret;
}
@@ -2545,7 +2913,7 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
pc = list_entry(list->prev, struct page_cgroup, lru);
if (busy == pc) {
list_move(&pc->lru, list);
- busy = 0;
+ busy = NULL;
spin_unlock_irqrestore(&zone->lru_lock, flags);
continue;
}
@@ -2610,6 +2978,7 @@ move_account:
if (ret)
break;
}
+ memcg_oom_recover(mem);
/* it seems parent cgroup doesn't have enough mem */
if (ret == -ENOMEM)
goto try_to_free;
@@ -2704,7 +3073,7 @@ static int
mem_cgroup_get_idx_stat(struct mem_cgroup *mem, void *data)
{
struct mem_cgroup_idx_data *d = data;
- d->val += mem_cgroup_read_stat(&mem->stat, d->idx);
+ d->val += mem_cgroup_read_stat(mem, d->idx);
return 0;
}
@@ -2719,40 +3088,50 @@ mem_cgroup_get_recursive_idx_stat(struct mem_cgroup *mem,
*val = d.val;
}
+static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap)
+{
+ u64 idx_val, val;
+
+ if (!mem_cgroup_is_root(mem)) {
+ if (!swap)
+ return res_counter_read_u64(&mem->res, RES_USAGE);
+ else
+ return res_counter_read_u64(&mem->memsw, RES_USAGE);
+ }
+
+ mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_CACHE, &idx_val);
+ val = idx_val;
+ mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_RSS, &idx_val);
+ val += idx_val;
+
+ if (swap) {
+ mem_cgroup_get_recursive_idx_stat(mem,
+ MEM_CGROUP_STAT_SWAPOUT, &idx_val);
+ val += idx_val;
+ }
+
+ return val << PAGE_SHIFT;
+}
+
static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
{
struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
- u64 idx_val, val;
+ u64 val;
int type, name;
type = MEMFILE_TYPE(cft->private);
name = MEMFILE_ATTR(cft->private);
switch (type) {
case _MEM:
- if (name == RES_USAGE && mem_cgroup_is_root(mem)) {
- mem_cgroup_get_recursive_idx_stat(mem,
- MEM_CGROUP_STAT_CACHE, &idx_val);
- val = idx_val;
- mem_cgroup_get_recursive_idx_stat(mem,
- MEM_CGROUP_STAT_RSS, &idx_val);
- val += idx_val;
- val <<= PAGE_SHIFT;
- } else
+ if (name == RES_USAGE)
+ val = mem_cgroup_usage(mem, false);
+ else
val = res_counter_read_u64(&mem->res, name);
break;
case _MEMSWAP:
- if (name == RES_USAGE && mem_cgroup_is_root(mem)) {
- mem_cgroup_get_recursive_idx_stat(mem,
- MEM_CGROUP_STAT_CACHE, &idx_val);
- val = idx_val;
- mem_cgroup_get_recursive_idx_stat(mem,
- MEM_CGROUP_STAT_RSS, &idx_val);
- val += idx_val;
- mem_cgroup_get_recursive_idx_stat(mem,
- MEM_CGROUP_STAT_SWAPOUT, &idx_val);
- val += idx_val;
- val <<= PAGE_SHIFT;
- } else
+ if (name == RES_USAGE)
+ val = mem_cgroup_usage(mem, true);
+ else
val = res_counter_read_u64(&mem->memsw, name);
break;
default:
@@ -2865,6 +3244,39 @@ static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
return 0;
}
+static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp,
+ struct cftype *cft)
+{
+ return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate;
+}
+
+#ifdef CONFIG_MMU
+static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
+ struct cftype *cft, u64 val)
+{
+ struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
+
+ if (val >= (1 << NR_MOVE_TYPE))
+ return -EINVAL;
+ /*
+ * We check this value several times in both in can_attach() and
+ * attach(), so we need cgroup lock to prevent this value from being
+ * inconsistent.
+ */
+ cgroup_lock();
+ mem->move_charge_at_immigrate = val;
+ cgroup_unlock();
+
+ return 0;
+}
+#else
+static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
+ struct cftype *cft, u64 val)
+{
+ return -ENOSYS;
+}
+#endif
+
/* For read statistics */
enum {
@@ -2910,18 +3322,18 @@ static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data)
s64 val;
/* per cpu stat */
- val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_CACHE);
+ val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
s->stat[MCS_CACHE] += val * PAGE_SIZE;
- val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
+ val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
s->stat[MCS_RSS] += val * PAGE_SIZE;
- val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_FILE_MAPPED);
+ val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_MAPPED);
s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
- val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGIN_COUNT);
+ val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_PGPGIN_COUNT);
s->stat[MCS_PGPGIN] += val;
- val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGOUT_COUNT);
+ val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_PGPGOUT_COUNT);
s->stat[MCS_PGPGOUT] += val;
if (do_swap_account) {
- val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_SWAPOUT);
+ val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT);
s->stat[MCS_SWAP] += val * PAGE_SIZE;
}
@@ -3049,12 +3461,339 @@ static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
return 0;
}
+static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
+{
+ struct mem_cgroup_threshold_ary *t;
+ u64 usage;
+ int i;
+
+ rcu_read_lock();
+ if (!swap)
+ t = rcu_dereference(memcg->thresholds.primary);
+ else
+ t = rcu_dereference(memcg->memsw_thresholds.primary);
+
+ if (!t)
+ goto unlock;
+
+ usage = mem_cgroup_usage(memcg, swap);
+
+ /*
+ * current_threshold points to threshold just below usage.
+ * If it's not true, a threshold was crossed after last
+ * call of __mem_cgroup_threshold().
+ */
+ i = t->current_threshold;
+
+ /*
+ * Iterate backward over array of thresholds starting from
+ * current_threshold and check if a threshold is crossed.
+ * If none of thresholds below usage is crossed, we read
+ * only one element of the array here.
+ */
+ for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
+ eventfd_signal(t->entries[i].eventfd, 1);
+
+ /* i = current_threshold + 1 */
+ i++;
+
+ /*
+ * Iterate forward over array of thresholds starting from
+ * current_threshold+1 and check if a threshold is crossed.
+ * If none of thresholds above usage is crossed, we read
+ * only one element of the array here.
+ */
+ for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
+ eventfd_signal(t->entries[i].eventfd, 1);
+
+ /* Update current_threshold */
+ t->current_threshold = i - 1;
+unlock:
+ rcu_read_unlock();
+}
+
+static void mem_cgroup_threshold(struct mem_cgroup *memcg)
+{
+ __mem_cgroup_threshold(memcg, false);
+ if (do_swap_account)
+ __mem_cgroup_threshold(memcg, true);
+}
+
+static int compare_thresholds(const void *a, const void *b)
+{
+ const struct mem_cgroup_threshold *_a = a;
+ const struct mem_cgroup_threshold *_b = b;
+
+ return _a->threshold - _b->threshold;
+}
+
+static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem, void *data)
+{
+ struct mem_cgroup_eventfd_list *ev;
+
+ list_for_each_entry(ev, &mem->oom_notify, list)
+ eventfd_signal(ev->eventfd, 1);
+ return 0;
+}
+
+static void mem_cgroup_oom_notify(struct mem_cgroup *mem)
+{
+ mem_cgroup_walk_tree(mem, NULL, mem_cgroup_oom_notify_cb);
+}
+
+static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
+ struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
+{
+ struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+ struct mem_cgroup_thresholds *thresholds;
+ struct mem_cgroup_threshold_ary *new;
+ int type = MEMFILE_TYPE(cft->private);
+ u64 threshold, usage;
+ int i, size, ret;
+
+ ret = res_counter_memparse_write_strategy(args, &threshold);
+ if (ret)
+ return ret;
+
+ mutex_lock(&memcg->thresholds_lock);
+
+ if (type == _MEM)
+ thresholds = &memcg->thresholds;
+ else if (type == _MEMSWAP)
+ thresholds = &memcg->memsw_thresholds;
+ else
+ BUG();
+
+ usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
+
+ /* Check if a threshold crossed before adding a new one */
+ if (thresholds->primary)
+ __mem_cgroup_threshold(memcg, type == _MEMSWAP);
+
+ size = thresholds->primary ? thresholds->primary->size + 1 : 1;
+
+ /* Allocate memory for new array of thresholds */
+ new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
+ GFP_KERNEL);
+ if (!new) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ new->size = size;
+
+ /* Copy thresholds (if any) to new array */
+ if (thresholds->primary) {
+ memcpy(new->entries, thresholds->primary->entries, (size - 1) *
+ sizeof(struct mem_cgroup_threshold));
+ }
+
+ /* Add new threshold */
+ new->entries[size - 1].eventfd = eventfd;
+ new->entries[size - 1].threshold = threshold;
+
+ /* Sort thresholds. Registering of new threshold isn't time-critical */
+ sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
+ compare_thresholds, NULL);
+
+ /* Find current threshold */
+ new->current_threshold = -1;
+ for (i = 0; i < size; i++) {
+ if (new->entries[i].threshold < usage) {
+ /*
+ * new->current_threshold will not be used until
+ * rcu_assign_pointer(), so it's safe to increment
+ * it here.
+ */
+ ++new->current_threshold;
+ }
+ }
+
+ /* Free old spare buffer and save old primary buffer as spare */
+ kfree(thresholds->spare);
+ thresholds->spare = thresholds->primary;
+
+ rcu_assign_pointer(thresholds->primary, new);
+
+ /* To be sure that nobody uses thresholds */
+ synchronize_rcu();
+
+unlock:
+ mutex_unlock(&memcg->thresholds_lock);
+
+ return ret;
+}
+
+static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
+ struct cftype *cft, struct eventfd_ctx *eventfd)
+{
+ struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+ struct mem_cgroup_thresholds *thresholds;
+ struct mem_cgroup_threshold_ary *new;
+ int type = MEMFILE_TYPE(cft->private);
+ u64 usage;
+ int i, j, size;
+
+ mutex_lock(&memcg->thresholds_lock);
+ if (type == _MEM)
+ thresholds = &memcg->thresholds;
+ else if (type == _MEMSWAP)
+ thresholds = &memcg->memsw_thresholds;
+ else
+ BUG();
+
+ /*
+ * Something went wrong if we trying to unregister a threshold
+ * if we don't have thresholds
+ */
+ BUG_ON(!thresholds);
+
+ usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
+
+ /* Check if a threshold crossed before removing */
+ __mem_cgroup_threshold(memcg, type == _MEMSWAP);
+
+ /* Calculate new number of threshold */
+ size = 0;
+ for (i = 0; i < thresholds->primary->size; i++) {
+ if (thresholds->primary->entries[i].eventfd != eventfd)
+ size++;
+ }
+
+ new = thresholds->spare;
+
+ /* Set thresholds array to NULL if we don't have thresholds */
+ if (!size) {
+ kfree(new);
+ new = NULL;
+ goto swap_buffers;
+ }
+
+ new->size = size;
+
+ /* Copy thresholds and find current threshold */
+ new->current_threshold = -1;
+ for (i = 0, j = 0; i < thresholds->primary->size; i++) {
+ if (thresholds->primary->entries[i].eventfd == eventfd)
+ continue;
+
+ new->entries[j] = thresholds->primary->entries[i];
+ if (new->entries[j].threshold < usage) {
+ /*
+ * new->current_threshold will not be used
+ * until rcu_assign_pointer(), so it's safe to increment
+ * it here.
+ */
+ ++new->current_threshold;
+ }
+ j++;
+ }
+
+swap_buffers:
+ /* Swap primary and spare array */
+ thresholds->spare = thresholds->primary;
+ rcu_assign_pointer(thresholds->primary, new);
+
+ /* To be sure that nobody uses thresholds */
+ synchronize_rcu();
+
+ mutex_unlock(&memcg->thresholds_lock);
+}
+
+static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
+ struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
+{
+ struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+ struct mem_cgroup_eventfd_list *event;
+ int type = MEMFILE_TYPE(cft->private);
+
+ BUG_ON(type != _OOM_TYPE);
+ event = kmalloc(sizeof(*event), GFP_KERNEL);
+ if (!event)
+ return -ENOMEM;
+
+ mutex_lock(&memcg_oom_mutex);
+
+ event->eventfd = eventfd;
+ list_add(&event->list, &memcg->oom_notify);
+
+ /* already in OOM ? */
+ if (atomic_read(&memcg->oom_lock))
+ eventfd_signal(eventfd, 1);
+ mutex_unlock(&memcg_oom_mutex);
+
+ return 0;
+}
+
+static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
+ struct cftype *cft, struct eventfd_ctx *eventfd)
+{
+ struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
+ struct mem_cgroup_eventfd_list *ev, *tmp;
+ int type = MEMFILE_TYPE(cft->private);
+
+ BUG_ON(type != _OOM_TYPE);
+
+ mutex_lock(&memcg_oom_mutex);
+
+ list_for_each_entry_safe(ev, tmp, &mem->oom_notify, list) {
+ if (ev->eventfd == eventfd) {
+ list_del(&ev->list);
+ kfree(ev);
+ }
+ }
+
+ mutex_unlock(&memcg_oom_mutex);
+}
+
+static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
+ struct cftype *cft, struct cgroup_map_cb *cb)
+{
+ struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
+
+ cb->fill(cb, "oom_kill_disable", mem->oom_kill_disable);
+
+ if (atomic_read(&mem->oom_lock))
+ cb->fill(cb, "under_oom", 1);
+ else
+ cb->fill(cb, "under_oom", 0);
+ return 0;
+}
+
+/*
+ */
+static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
+ struct cftype *cft, u64 val)
+{
+ struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
+ struct mem_cgroup *parent;
+
+ /* cannot set to root cgroup and only 0 and 1 are allowed */
+ if (!cgrp->parent || !((val == 0) || (val == 1)))
+ return -EINVAL;
+
+ parent = mem_cgroup_from_cont(cgrp->parent);
+
+ cgroup_lock();
+ /* oom-kill-disable is a flag for subhierarchy. */
+ if ((parent->use_hierarchy) ||
+ (mem->use_hierarchy && !list_empty(&cgrp->children))) {
+ cgroup_unlock();
+ return -EINVAL;
+ }
+ mem->oom_kill_disable = val;
+ if (!val)
+ memcg_oom_recover(mem);
+ cgroup_unlock();
+ return 0;
+}
static struct cftype mem_cgroup_files[] = {
{
.name = "usage_in_bytes",
.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
.read_u64 = mem_cgroup_read,
+ .register_event = mem_cgroup_usage_register_event,
+ .unregister_event = mem_cgroup_usage_unregister_event,
},
{
.name = "max_usage_in_bytes",
@@ -3098,6 +3837,19 @@ static struct cftype mem_cgroup_files[] = {
.read_u64 = mem_cgroup_swappiness_read,
.write_u64 = mem_cgroup_swappiness_write,
},
+ {
+ .name = "move_charge_at_immigrate",
+ .read_u64 = mem_cgroup_move_charge_read,
+ .write_u64 = mem_cgroup_move_charge_write,
+ },
+ {
+ .name = "oom_control",
+ .read_map = mem_cgroup_oom_control_read,
+ .write_u64 = mem_cgroup_oom_control_write,
+ .register_event = mem_cgroup_oom_register_event,
+ .unregister_event = mem_cgroup_oom_unregister_event,
+ .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
+ },
};
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
@@ -3106,6 +3858,8 @@ static struct cftype memsw_cgroup_files[] = {
.name = "memsw.usage_in_bytes",
.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
.read_u64 = mem_cgroup_read,
+ .register_event = mem_cgroup_usage_register_event,
+ .unregister_event = mem_cgroup_usage_unregister_event,
},
{
.name = "memsw.max_usage_in_bytes",
@@ -3180,24 +3934,29 @@ static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
kfree(mem->info.nodeinfo[node]);
}
-static int mem_cgroup_size(void)
-{
- int cpustat_size = nr_cpu_ids * sizeof(struct mem_cgroup_stat_cpu);
- return sizeof(struct mem_cgroup) + cpustat_size;
-}
-
static struct mem_cgroup *mem_cgroup_alloc(void)
{
struct mem_cgroup *mem;
- int size = mem_cgroup_size();
+ int size = sizeof(struct mem_cgroup);
+ /* Can be very big if MAX_NUMNODES is very big */
if (size < PAGE_SIZE)
mem = kmalloc(size, GFP_KERNEL);
else
mem = vmalloc(size);
- if (mem)
- memset(mem, 0, size);
+ if (!mem)
+ return NULL;
+
+ memset(mem, 0, size);
+ mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
+ if (!mem->stat) {
+ if (size < PAGE_SIZE)
+ kfree(mem);
+ else
+ vfree(mem);
+ mem = NULL;
+ }
return mem;
}
@@ -3222,7 +3981,8 @@ static void __mem_cgroup_free(struct mem_cgroup *mem)
for_each_node_state(node, N_POSSIBLE)
free_mem_cgroup_per_zone_info(mem, node);
- if (mem_cgroup_size() < PAGE_SIZE)
+ free_percpu(mem->stat);
+ if (sizeof(struct mem_cgroup) < PAGE_SIZE)
kfree(mem);
else
vfree(mem);
@@ -3233,9 +3993,9 @@ static void mem_cgroup_get(struct mem_cgroup *mem)
atomic_inc(&mem->refcnt);
}
-static void mem_cgroup_put(struct mem_cgroup *mem)
+static void __mem_cgroup_put(struct mem_cgroup *mem, int count)
{
- if (atomic_dec_and_test(&mem->refcnt)) {
+ if (atomic_sub_and_test(count, &mem->refcnt)) {
struct mem_cgroup *parent = parent_mem_cgroup(mem);
__mem_cgroup_free(mem);
if (parent)
@@ -3243,6 +4003,11 @@ static void mem_cgroup_put(struct mem_cgroup *mem)
}
}
+static void mem_cgroup_put(struct mem_cgroup *mem)
+{
+ __mem_cgroup_put(mem, 1);
+}
+
/*
* Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
*/
@@ -3319,10 +4084,10 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
INIT_WORK(&stock->work, drain_local_stock);
}
hotcpu_notifier(memcg_stock_cpu_callback, 0);
-
} else {
parent = mem_cgroup_from_cont(cont->parent);
mem->use_hierarchy = parent->use_hierarchy;
+ mem->oom_kill_disable = parent->oom_kill_disable;
}
if (parent && parent->use_hierarchy) {
@@ -3341,10 +4106,13 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
}
mem->last_scanned_child = 0;
spin_lock_init(&mem->reclaim_param_lock);
+ INIT_LIST_HEAD(&mem->oom_notify);
if (parent)
mem->swappiness = get_swappiness(parent);
atomic_set(&mem->refcnt, 1);
+ mem->move_charge_at_immigrate = 0;
+ mutex_init(&mem->thresholds_lock);
return &mem->css;
free_out:
__mem_cgroup_free(mem);
@@ -3381,17 +4149,488 @@ static int mem_cgroup_populate(struct cgroup_subsys *ss,
return ret;
}
+#ifdef CONFIG_MMU
+/* Handlers for move charge at task migration. */
+#define PRECHARGE_COUNT_AT_ONCE 256
+static int mem_cgroup_do_precharge(unsigned long count)
+{
+ int ret = 0;
+ int batch_count = PRECHARGE_COUNT_AT_ONCE;
+ struct mem_cgroup *mem = mc.to;
+
+ if (mem_cgroup_is_root(mem)) {
+ mc.precharge += count;
+ /* we don't need css_get for root */
+ return ret;
+ }
+ /* try to charge at once */
+ if (count > 1) {
+ struct res_counter *dummy;
+ /*
+ * "mem" cannot be under rmdir() because we've already checked
+ * by cgroup_lock_live_cgroup() that it is not removed and we
+ * are still under the same cgroup_mutex. So we can postpone
+ * css_get().
+ */
+ if (res_counter_charge(&mem->res, PAGE_SIZE * count, &dummy))
+ goto one_by_one;
+ if (do_swap_account && res_counter_charge(&mem->memsw,
+ PAGE_SIZE * count, &dummy)) {
+ res_counter_uncharge(&mem->res, PAGE_SIZE * count);
+ goto one_by_one;
+ }
+ mc.precharge += count;
+ VM_BUG_ON(test_bit(CSS_ROOT, &mem->css.flags));
+ WARN_ON_ONCE(count > INT_MAX);
+ __css_get(&mem->css, (int)count);
+ return ret;
+ }
+one_by_one:
+ /* fall back to one by one charge */
+ while (count--) {
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+ if (!batch_count--) {
+ batch_count = PRECHARGE_COUNT_AT_ONCE;
+ cond_resched();
+ }
+ ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
+ if (ret || !mem)
+ /* mem_cgroup_clear_mc() will do uncharge later */
+ return -ENOMEM;
+ mc.precharge++;
+ }
+ return ret;
+}
+
+/**
+ * is_target_pte_for_mc - check a pte whether it is valid for move charge
+ * @vma: the vma the pte to be checked belongs
+ * @addr: the address corresponding to the pte to be checked
+ * @ptent: the pte to be checked
+ * @target: the pointer the target page or swap ent will be stored(can be NULL)
+ *
+ * Returns
+ * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
+ * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
+ * move charge. if @target is not NULL, the page is stored in target->page
+ * with extra refcnt got(Callers should handle it).
+ * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
+ * target for charge migration. if @target is not NULL, the entry is stored
+ * in target->ent.
+ *
+ * Called with pte lock held.
+ */
+union mc_target {
+ struct page *page;
+ swp_entry_t ent;
+};
+
+enum mc_target_type {
+ MC_TARGET_NONE, /* not used */
+ MC_TARGET_PAGE,
+ MC_TARGET_SWAP,
+};
+
+static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
+ unsigned long addr, pte_t ptent)
+{
+ struct page *page = vm_normal_page(vma, addr, ptent);
+
+ if (!page || !page_mapped(page))
+ return NULL;
+ if (PageAnon(page)) {
+ /* we don't move shared anon */
+ if (!move_anon() || page_mapcount(page) > 2)
+ return NULL;
+ } else if (!move_file())
+ /* we ignore mapcount for file pages */
+ return NULL;
+ if (!get_page_unless_zero(page))
+ return NULL;
+
+ return page;
+}
+
+static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
+ unsigned long addr, pte_t ptent, swp_entry_t *entry)
+{
+ int usage_count;
+ struct page *page = NULL;
+ swp_entry_t ent = pte_to_swp_entry(ptent);
+
+ if (!move_anon() || non_swap_entry(ent))
+ return NULL;
+ usage_count = mem_cgroup_count_swap_user(ent, &page);
+ if (usage_count > 1) { /* we don't move shared anon */
+ if (page)
+ put_page(page);
+ return NULL;
+ }
+ if (do_swap_account)
+ entry->val = ent.val;
+
+ return page;
+}
+
+static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
+ unsigned long addr, pte_t ptent, swp_entry_t *entry)
+{
+ struct page *page = NULL;
+ struct inode *inode;
+ struct address_space *mapping;
+ pgoff_t pgoff;
+
+ if (!vma->vm_file) /* anonymous vma */
+ return NULL;
+ if (!move_file())
+ return NULL;
+
+ inode = vma->vm_file->f_path.dentry->d_inode;
+ mapping = vma->vm_file->f_mapping;
+ if (pte_none(ptent))
+ pgoff = linear_page_index(vma, addr);
+ else /* pte_file(ptent) is true */
+ pgoff = pte_to_pgoff(ptent);
+
+ /* page is moved even if it's not RSS of this task(page-faulted). */
+ if (!mapping_cap_swap_backed(mapping)) { /* normal file */
+ page = find_get_page(mapping, pgoff);
+ } else { /* shmem/tmpfs file. we should take account of swap too. */
+ swp_entry_t ent;
+ mem_cgroup_get_shmem_target(inode, pgoff, &page, &ent);
+ if (do_swap_account)
+ entry->val = ent.val;
+ }
+
+ return page;
+}
+
+static int is_target_pte_for_mc(struct vm_area_struct *vma,
+ unsigned long addr, pte_t ptent, union mc_target *target)
+{
+ struct page *page = NULL;
+ struct page_cgroup *pc;
+ int ret = 0;
+ swp_entry_t ent = { .val = 0 };
+
+ if (pte_present(ptent))
+ page = mc_handle_present_pte(vma, addr, ptent);
+ else if (is_swap_pte(ptent))
+ page = mc_handle_swap_pte(vma, addr, ptent, &ent);
+ else if (pte_none(ptent) || pte_file(ptent))
+ page = mc_handle_file_pte(vma, addr, ptent, &ent);
+
+ if (!page && !ent.val)
+ return 0;
+ if (page) {
+ pc = lookup_page_cgroup(page);
+ /*
+ * Do only loose check w/o page_cgroup lock.
+ * mem_cgroup_move_account() checks the pc is valid or not under
+ * the lock.
+ */
+ if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
+ ret = MC_TARGET_PAGE;
+ if (target)
+ target->page = page;
+ }
+ if (!ret || !target)
+ put_page(page);
+ }
+ /* There is a swap entry and a page doesn't exist or isn't charged */
+ if (ent.val && !ret &&
+ css_id(&mc.from->css) == lookup_swap_cgroup(ent)) {
+ ret = MC_TARGET_SWAP;
+ if (target)
+ target->ent = ent;
+ }
+ return ret;
+}
+
+static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
+ unsigned long addr, unsigned long end,
+ struct mm_walk *walk)
+{
+ struct vm_area_struct *vma = walk->private;
+ pte_t *pte;
+ spinlock_t *ptl;
+
+ pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+ for (; addr != end; pte++, addr += PAGE_SIZE)
+ if (is_target_pte_for_mc(vma, addr, *pte, NULL))
+ mc.precharge++; /* increment precharge temporarily */
+ pte_unmap_unlock(pte - 1, ptl);
+ cond_resched();
+
+ return 0;
+}
+
+static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
+{
+ unsigned long precharge;
+ struct vm_area_struct *vma;
+
+ down_read(&mm->mmap_sem);
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ struct mm_walk mem_cgroup_count_precharge_walk = {
+ .pmd_entry = mem_cgroup_count_precharge_pte_range,
+ .mm = mm,
+ .private = vma,
+ };
+ if (is_vm_hugetlb_page(vma))
+ continue;
+ walk_page_range(vma->vm_start, vma->vm_end,
+ &mem_cgroup_count_precharge_walk);
+ }
+ up_read(&mm->mmap_sem);
+
+ precharge = mc.precharge;
+ mc.precharge = 0;
+
+ return precharge;
+}
+
+static int mem_cgroup_precharge_mc(struct mm_struct *mm)
+{
+ return mem_cgroup_do_precharge(mem_cgroup_count_precharge(mm));
+}
+
+static void mem_cgroup_clear_mc(void)
+{
+ /* we must uncharge all the leftover precharges from mc.to */
+ if (mc.precharge) {
+ __mem_cgroup_cancel_charge(mc.to, mc.precharge);
+ mc.precharge = 0;
+ memcg_oom_recover(mc.to);
+ }
+ /*
+ * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
+ * we must uncharge here.
+ */
+ if (mc.moved_charge) {
+ __mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
+ mc.moved_charge = 0;
+ memcg_oom_recover(mc.from);
+ }
+ /* we must fixup refcnts and charges */
+ if (mc.moved_swap) {
+ WARN_ON_ONCE(mc.moved_swap > INT_MAX);
+ /* uncharge swap account from the old cgroup */
+ if (!mem_cgroup_is_root(mc.from))
+ res_counter_uncharge(&mc.from->memsw,
+ PAGE_SIZE * mc.moved_swap);
+ __mem_cgroup_put(mc.from, mc.moved_swap);
+
+ if (!mem_cgroup_is_root(mc.to)) {
+ /*
+ * we charged both to->res and to->memsw, so we should
+ * uncharge to->res.
+ */
+ res_counter_uncharge(&mc.to->res,
+ PAGE_SIZE * mc.moved_swap);
+ VM_BUG_ON(test_bit(CSS_ROOT, &mc.to->css.flags));
+ __css_put(&mc.to->css, mc.moved_swap);
+ }
+ /* we've already done mem_cgroup_get(mc.to) */
+
+ mc.moved_swap = 0;
+ }
+ mc.from = NULL;
+ mc.to = NULL;
+ mc.moving_task = NULL;
+ wake_up_all(&mc.waitq);
+}
+
+static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
+ struct cgroup *cgroup,
+ struct task_struct *p,
+ bool threadgroup)
+{
+ int ret = 0;
+ struct mem_cgroup *mem = mem_cgroup_from_cont(cgroup);
+
+ if (mem->move_charge_at_immigrate) {
+ struct mm_struct *mm;
+ struct mem_cgroup *from = mem_cgroup_from_task(p);
+
+ VM_BUG_ON(from == mem);
+
+ mm = get_task_mm(p);
+ if (!mm)
+ return 0;
+ /* We move charges only when we move a owner of the mm */
+ if (mm->owner == p) {
+ VM_BUG_ON(mc.from);
+ VM_BUG_ON(mc.to);
+ VM_BUG_ON(mc.precharge);
+ VM_BUG_ON(mc.moved_charge);
+ VM_BUG_ON(mc.moved_swap);
+ VM_BUG_ON(mc.moving_task);
+ mc.from = from;
+ mc.to = mem;
+ mc.precharge = 0;
+ mc.moved_charge = 0;
+ mc.moved_swap = 0;
+ mc.moving_task = current;
+
+ ret = mem_cgroup_precharge_mc(mm);
+ if (ret)
+ mem_cgroup_clear_mc();
+ }
+ mmput(mm);
+ }
+ return ret;
+}
+
+static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
+ struct cgroup *cgroup,
+ struct task_struct *p,
+ bool threadgroup)
+{
+ mem_cgroup_clear_mc();
+}
+
+static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
+ unsigned long addr, unsigned long end,
+ struct mm_walk *walk)
+{
+ int ret = 0;
+ struct vm_area_struct *vma = walk->private;
+ pte_t *pte;
+ spinlock_t *ptl;
+
+retry:
+ pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+ for (; addr != end; addr += PAGE_SIZE) {
+ pte_t ptent = *(pte++);
+ union mc_target target;
+ int type;
+ struct page *page;
+ struct page_cgroup *pc;
+ swp_entry_t ent;
+
+ if (!mc.precharge)
+ break;
+
+ type = is_target_pte_for_mc(vma, addr, ptent, &target);
+ switch (type) {
+ case MC_TARGET_PAGE:
+ page = target.page;
+ if (isolate_lru_page(page))
+ goto put;
+ pc = lookup_page_cgroup(page);
+ if (!mem_cgroup_move_account(pc,
+ mc.from, mc.to, false)) {
+ mc.precharge--;
+ /* we uncharge from mc.from later. */
+ mc.moved_charge++;
+ }
+ putback_lru_page(page);
+put: /* is_target_pte_for_mc() gets the page */
+ put_page(page);
+ break;
+ case MC_TARGET_SWAP:
+ ent = target.ent;
+ if (!mem_cgroup_move_swap_account(ent,
+ mc.from, mc.to, false)) {
+ mc.precharge--;
+ /* we fixup refcnts and charges later. */
+ mc.moved_swap++;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ pte_unmap_unlock(pte - 1, ptl);
+ cond_resched();
+
+ if (addr != end) {
+ /*
+ * We have consumed all precharges we got in can_attach().
+ * We try charge one by one, but don't do any additional
+ * charges to mc.to if we have failed in charge once in attach()
+ * phase.
+ */
+ ret = mem_cgroup_do_precharge(1);
+ if (!ret)
+ goto retry;
+ }
+
+ return ret;
+}
+
+static void mem_cgroup_move_charge(struct mm_struct *mm)
+{
+ struct vm_area_struct *vma;
+
+ lru_add_drain_all();
+ down_read(&mm->mmap_sem);
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ int ret;
+ struct mm_walk mem_cgroup_move_charge_walk = {
+ .pmd_entry = mem_cgroup_move_charge_pte_range,
+ .mm = mm,
+ .private = vma,
+ };
+ if (is_vm_hugetlb_page(vma))
+ continue;
+ ret = walk_page_range(vma->vm_start, vma->vm_end,
+ &mem_cgroup_move_charge_walk);
+ if (ret)
+ /*
+ * means we have consumed all precharges and failed in
+ * doing additional charge. Just abandon here.
+ */
+ break;
+ }
+ up_read(&mm->mmap_sem);
+}
+
static void mem_cgroup_move_task(struct cgroup_subsys *ss,
struct cgroup *cont,
struct cgroup *old_cont,
struct task_struct *p,
bool threadgroup)
{
- /*
- * FIXME: It's better to move charges of this process from old
- * memcg to new memcg. But it's just on TODO-List now.
- */
+ struct mm_struct *mm;
+
+ if (!mc.to)
+ /* no need to move charge */
+ return;
+
+ mm = get_task_mm(p);
+ if (mm) {
+ mem_cgroup_move_charge(mm);
+ mmput(mm);
+ }
+ mem_cgroup_clear_mc();
+}
+#else /* !CONFIG_MMU */
+static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
+ struct cgroup *cgroup,
+ struct task_struct *p,
+ bool threadgroup)
+{
+ return 0;
}
+static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
+ struct cgroup *cgroup,
+ struct task_struct *p,
+ bool threadgroup)
+{
+}
+static void mem_cgroup_move_task(struct cgroup_subsys *ss,
+ struct cgroup *cont,
+ struct cgroup *old_cont,
+ struct task_struct *p,
+ bool threadgroup)
+{
+}
+#endif
struct cgroup_subsys mem_cgroup_subsys = {
.name = "memory",
@@ -3400,6 +4639,8 @@ struct cgroup_subsys mem_cgroup_subsys = {
.pre_destroy = mem_cgroup_pre_destroy,
.destroy = mem_cgroup_destroy,
.populate = mem_cgroup_populate,
+ .can_attach = mem_cgroup_can_attach,
+ .cancel_attach = mem_cgroup_cancel_attach,
.attach = mem_cgroup_move_task,
.early_init = 0,
.use_id = 1,