What CONFIG_INET and CONFIG_LEGACY_KMEM guard inside the memory
controller code is insignificant, having these conditionals is not
worth the complication and fragility that comes with them.
[akpm@linux-foundation.org: rework mem_cgroup_css_free() statement ordering]
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Acked-by: Vladimir Davydov <vdavydov@virtuozzo.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
*/
struct mem_cgroup_stat_cpu __percpu *stat;
*/
struct mem_cgroup_stat_cpu __percpu *stat;
-#if defined(CONFIG_MEMCG_LEGACY_KMEM) && defined(CONFIG_INET)
+ unsigned long socket_pressure;
+
+ /* Legacy tcp memory accounting */
#ifndef CONFIG_SLOB
/* Index in the kmem_cache->memcg_params.memcg_caches array */
int kmemcg_id;
#ifndef CONFIG_SLOB
/* Index in the kmem_cache->memcg_params.memcg_caches array */
int kmemcg_id;
struct wb_domain cgwb_domain;
#endif
struct wb_domain cgwb_domain;
#endif
-#ifdef CONFIG_INET
- unsigned long socket_pressure;
-#endif
-
/* List of events which userspace want to receive */
struct list_head event_list;
spinlock_t event_list_lock;
/* List of events which userspace want to receive */
struct list_head event_list;
spinlock_t event_list_lock;
void sock_release_memcg(struct sock *sk);
bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
void sock_release_memcg(struct sock *sk);
bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
-#if defined(CONFIG_MEMCG) && defined(CONFIG_INET)
extern struct static_key_false memcg_sockets_enabled_key;
#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
{
extern struct static_key_false memcg_sockets_enabled_key;
#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
{
-#ifdef CONFIG_MEMCG_LEGACY_KMEM
if (memcg->tcp_mem.memory_pressure)
return true;
if (memcg->tcp_mem.memory_pressure)
return true;
do {
if (time_before(jiffies, memcg->socket_pressure))
return true;
do {
if (time_before(jiffies, memcg->socket_pressure))
return true;
For those who want to have the feature enabled by default should
select this option (if, for some reason, they need to disable it
then swapaccount=0 does the trick).
For those who want to have the feature enabled by default should
select this option (if, for some reason, they need to disable it
then swapaccount=0 does the trick).
-config MEMCG_LEGACY_KMEM
- bool
-config MEMCG_KMEM
- bool "Legacy Memory Resource Controller Kernel Memory accounting"
- depends on MEMCG
- depends on SLUB || SLAB
- select MEMCG_LEGACY_KMEM
- help
- The Kernel Memory extension for Memory Resource Controller can limit
- the amount of memory used by kernel objects in the system. Those are
- fundamentally different from the entities handled by the standard
- Memory Controller, which are page-based, and can be swapped. Users of
- the kmem extension can use it to guarantee that no group of processes
- will ever exhaust kernel resources alone.
config BLK_CGROUP
bool "IO controller"
config BLK_CGROUP
bool "IO controller"
to provide different user info for different servers.
When user namespaces are enabled in the kernel it is
to provide different user info for different servers.
When user namespaces are enabled in the kernel it is
- recommended that the MEMCG and MEMCG_KMEM options also be
- enabled and that user-space use the memory control groups to
- limit the amount of memory a memory unprivileged users can
- use.
+ recommended that the MEMCG option also be enabled and that
+ user-space use the memory control groups to limit the amount
+ of memory a memory unprivileged users can use.
case _KMEM:
counter = &memcg->kmem;
break;
case _KMEM:
counter = &memcg->kmem;
break;
-#if defined(CONFIG_MEMCG_LEGACY_KMEM) && defined(CONFIG_INET)
case _TCP:
counter = &memcg->tcp_mem.memory_allocated;
break;
case _TCP:
counter = &memcg->tcp_mem.memory_allocated;
break;
}
#endif /* !CONFIG_SLOB */
}
#endif /* !CONFIG_SLOB */
-#ifdef CONFIG_MEMCG_LEGACY_KMEM
static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
unsigned long limit)
{
static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
unsigned long limit)
{
mutex_unlock(&memcg_limit_mutex);
return ret;
}
mutex_unlock(&memcg_limit_mutex);
return ret;
}
-#else
-static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
- unsigned long limit)
-{
- return -EINVAL;
-}
-#endif /* CONFIG_MEMCG_LEGACY_KMEM */
-
-#if defined(CONFIG_MEMCG_LEGACY_KMEM) && defined(CONFIG_INET)
static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
{
int ret;
static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
{
int ret;
mutex_unlock(&memcg_limit_mutex);
return ret;
}
mutex_unlock(&memcg_limit_mutex);
return ret;
}
-#else
-static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
-{
- return -EINVAL;
-}
-#endif /* CONFIG_MEMCG_LEGACY_KMEM && CONFIG_INET */
/*
* The user of this function is...
/*
* The user of this function is...
case _KMEM:
counter = &memcg->kmem;
break;
case _KMEM:
counter = &memcg->kmem;
break;
-#if defined(CONFIG_MEMCG_LEGACY_KMEM) && defined(CONFIG_INET)
case _TCP:
counter = &memcg->tcp_mem.memory_allocated;
break;
case _TCP:
counter = &memcg->tcp_mem.memory_allocated;
break;
.seq_show = memcg_numa_stat_show,
},
#endif
.seq_show = memcg_numa_stat_show,
},
#endif
-#ifdef CONFIG_MEMCG_LEGACY_KMEM
{
.name = "kmem.limit_in_bytes",
.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
{
.name = "kmem.limit_in_bytes",
.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
.seq_show = memcg_slab_show,
},
#endif
.seq_show = memcg_slab_show,
},
#endif
{
.name = "kmem.tcp.limit_in_bytes",
.private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
{
.name = "kmem.tcp.limit_in_bytes",
.private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
.write = mem_cgroup_reset,
.read_u64 = mem_cgroup_read_u64,
},
.write = mem_cgroup_reset,
.read_u64 = mem_cgroup_read_u64,
},
vmpressure_init(&memcg->vmpressure);
INIT_LIST_HEAD(&memcg->event_list);
spin_lock_init(&memcg->event_list_lock);
vmpressure_init(&memcg->vmpressure);
INIT_LIST_HEAD(&memcg->event_list);
spin_lock_init(&memcg->event_list_lock);
+ memcg->socket_pressure = jiffies;
#ifndef CONFIG_SLOB
memcg->kmemcg_id = -1;
#endif
#ifdef CONFIG_CGROUP_WRITEBACK
INIT_LIST_HEAD(&memcg->cgwb_list);
#ifndef CONFIG_SLOB
memcg->kmemcg_id = -1;
#endif
#ifdef CONFIG_CGROUP_WRITEBACK
INIT_LIST_HEAD(&memcg->cgwb_list);
-#endif
-#ifdef CONFIG_INET
- memcg->socket_pressure = jiffies;
#endif
return &memcg->css;
#endif
return &memcg->css;
memcg->soft_limit = PAGE_COUNTER_MAX;
page_counter_init(&memcg->memsw, &parent->memsw);
page_counter_init(&memcg->kmem, &parent->kmem);
memcg->soft_limit = PAGE_COUNTER_MAX;
page_counter_init(&memcg->memsw, &parent->memsw);
page_counter_init(&memcg->kmem, &parent->kmem);
-#if defined(CONFIG_MEMCG_LEGACY_KMEM) && defined(CONFIG_INET)
page_counter_init(&memcg->tcp_mem.memory_allocated,
&parent->tcp_mem.memory_allocated);
page_counter_init(&memcg->tcp_mem.memory_allocated,
&parent->tcp_mem.memory_allocated);
/*
* No need to take a reference to the parent because cgroup
/*
* No need to take a reference to the parent because cgroup
memcg->soft_limit = PAGE_COUNTER_MAX;
page_counter_init(&memcg->memsw, NULL);
page_counter_init(&memcg->kmem, NULL);
memcg->soft_limit = PAGE_COUNTER_MAX;
page_counter_init(&memcg->memsw, NULL);
page_counter_init(&memcg->kmem, NULL);
-#if defined(CONFIG_MEMCG_LEGACY_KMEM) && defined(CONFIG_INET)
page_counter_init(&memcg->tcp_mem.memory_allocated, NULL);
page_counter_init(&memcg->tcp_mem.memory_allocated, NULL);
/*
* Deeper hierachy with use_hierarchy == false doesn't make
* much sense so let cgroup subsystem know about this
/*
* Deeper hierachy with use_hierarchy == false doesn't make
* much sense so let cgroup subsystem know about this
if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
static_branch_inc(&memcg_sockets_enabled_key);
if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
static_branch_inc(&memcg_sockets_enabled_key);
/*
* Make sure the memcg is initialized: mem_cgroup_iter()
/*
* Make sure the memcg is initialized: mem_cgroup_iter()
{
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
{
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
static_branch_dec(&memcg_sockets_enabled_key);
if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
static_branch_dec(&memcg_sockets_enabled_key);
-#endif
-
- memcg_free_kmem(memcg);
-#if defined(CONFIG_MEMCG_LEGACY_KMEM) && defined(CONFIG_INET)
if (memcg->tcp_mem.active)
static_branch_dec(&memcg_sockets_enabled_key);
if (memcg->tcp_mem.active)
static_branch_dec(&memcg_sockets_enabled_key);
+ memcg_free_kmem(memcg);
__mem_cgroup_free(memcg);
}
__mem_cgroup_free(memcg);
}
commit_charge(newpage, memcg, true);
}
commit_charge(newpage, memcg, true);
}
DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
EXPORT_SYMBOL(memcg_sockets_enabled_key);
DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
EXPORT_SYMBOL(memcg_sockets_enabled_key);
memcg = mem_cgroup_from_task(current);
if (memcg == root_mem_cgroup)
goto out;
memcg = mem_cgroup_from_task(current);
if (memcg == root_mem_cgroup)
goto out;
-#ifdef CONFIG_MEMCG_LEGACY_KMEM
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcp_mem.active)
goto out;
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcp_mem.active)
goto out;
if (css_tryget_online(&memcg->css))
sk->sk_memcg = memcg;
out:
if (css_tryget_online(&memcg->css))
sk->sk_memcg = memcg;
out:
{
gfp_t gfp_mask = GFP_KERNEL;
{
gfp_t gfp_mask = GFP_KERNEL;
-#ifdef CONFIG_MEMCG_LEGACY_KMEM
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
struct page_counter *counter;
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
struct page_counter *counter;
memcg->tcp_mem.memory_pressure = 1;
return false;
}
memcg->tcp_mem.memory_pressure = 1;
return false;
}
/* Don't block in the packet receive path */
if (in_softirq())
gfp_mask = GFP_NOWAIT;
/* Don't block in the packet receive path */
if (in_softirq())
gfp_mask = GFP_NOWAIT;
*/
void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
{
*/
void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
{
-#ifdef CONFIG_MEMCG_LEGACY_KMEM
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
page_counter_uncharge(&memcg->tcp_mem.memory_allocated,
nr_pages);
return;
}
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
page_counter_uncharge(&memcg->tcp_mem.memory_allocated,
nr_pages);
return;
}
page_counter_uncharge(&memcg->memory, nr_pages);
css_put_many(&memcg->css, nr_pages);
}
page_counter_uncharge(&memcg->memory, nr_pages);
css_put_many(&memcg->css, nr_pages);
}
-#endif /* CONFIG_INET */
-
static int __init cgroup_memory(char *s)
{
char *token;
static int __init cgroup_memory(char *s)
{
char *token;
level = vmpressure_calc_level(scanned, reclaimed);
level = vmpressure_calc_level(scanned, reclaimed);
if (level > VMPRESSURE_LOW) {
/*
* Let the socket buffer allocator know that
if (level > VMPRESSURE_LOW) {
/*
* Let the socket buffer allocator know that
*/
memcg->socket_pressure = jiffies + HZ;
}
*/
memcg->socket_pressure = jiffies + HZ;
}