Lines Matching +full:atomic +full:- +full:threshold +full:- +full:us
1 // SPDX-License-Identifier: GPL-2.0-or-later
7 #include <linux/backing-dev.h>
17 #include "memcontrol-v1.h"
20 * Cgroups above their limits are maintained in a RB-Tree, independent of
113 struct rb_node **p = &mctz->rb_root.rb_node; in __mem_cgroup_insert_exceeded()
118 if (mz->on_tree) in __mem_cgroup_insert_exceeded()
121 mz->usage_in_excess = new_usage_in_excess; in __mem_cgroup_insert_exceeded()
122 if (!mz->usage_in_excess) in __mem_cgroup_insert_exceeded()
128 if (mz->usage_in_excess < mz_node->usage_in_excess) { in __mem_cgroup_insert_exceeded()
129 p = &(*p)->rb_left; in __mem_cgroup_insert_exceeded()
132 p = &(*p)->rb_right; in __mem_cgroup_insert_exceeded()
137 mctz->rb_rightmost = &mz->tree_node; in __mem_cgroup_insert_exceeded()
139 rb_link_node(&mz->tree_node, parent, p); in __mem_cgroup_insert_exceeded()
140 rb_insert_color(&mz->tree_node, &mctz->rb_root); in __mem_cgroup_insert_exceeded()
141 mz->on_tree = true; in __mem_cgroup_insert_exceeded()
147 if (!mz->on_tree) in __mem_cgroup_remove_exceeded()
150 if (&mz->tree_node == mctz->rb_rightmost) in __mem_cgroup_remove_exceeded()
151 mctz->rb_rightmost = rb_prev(&mz->tree_node); in __mem_cgroup_remove_exceeded()
153 rb_erase(&mz->tree_node, &mctz->rb_root); in __mem_cgroup_remove_exceeded()
154 mz->on_tree = false; in __mem_cgroup_remove_exceeded()
162 spin_lock_irqsave(&mctz->lock, flags); in mem_cgroup_remove_exceeded()
164 spin_unlock_irqrestore(&mctz->lock, flags); in mem_cgroup_remove_exceeded()
169 unsigned long nr_pages = page_counter_read(&memcg->memory); in soft_limit_excess()
170 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); in soft_limit_excess()
174 excess = nr_pages - soft_limit; in soft_limit_excess()
199 mz = memcg->nodeinfo[nid]; in memcg1_update_tree()
202 * We have to update the tree if mz is on RB-tree or in memcg1_update_tree()
205 if (excess || mz->on_tree) { in memcg1_update_tree()
208 spin_lock_irqsave(&mctz->lock, flags); in memcg1_update_tree()
209 /* if on-tree, remove it */ in memcg1_update_tree()
210 if (mz->on_tree) in memcg1_update_tree()
213 * Insert again. mz->usage_in_excess will be updated. in memcg1_update_tree()
217 spin_unlock_irqrestore(&mctz->lock, flags); in memcg1_update_tree()
229 mz = memcg->nodeinfo[nid]; in memcg1_remove_from_trees()
243 if (!mctz->rb_rightmost) in __mem_cgroup_largest_soft_limit_node()
246 mz = rb_entry(mctz->rb_rightmost, in __mem_cgroup_largest_soft_limit_node()
254 if (!soft_limit_excess(mz->memcg) || in __mem_cgroup_largest_soft_limit_node()
255 !css_tryget(&mz->memcg->css)) in __mem_cgroup_largest_soft_limit_node()
266 spin_lock_irq(&mctz->lock); in mem_cgroup_largest_soft_limit_node()
268 spin_unlock_irq(&mctz->lock); in mem_cgroup_largest_soft_limit_node()
339 mctz = soft_limit_tree.rb_tree_per_node[pgdat->node_id]; in memcg1_soft_limit_reclaim()
346 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root)) in memcg1_soft_limit_reclaim()
362 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat, in memcg1_soft_limit_reclaim()
365 spin_lock_irq(&mctz->lock); in memcg1_soft_limit_reclaim()
375 excess = soft_limit_excess(mz->memcg); in memcg1_soft_limit_reclaim()
386 spin_unlock_irq(&mctz->lock); in memcg1_soft_limit_reclaim()
387 css_put(&mz->memcg->css); in memcg1_soft_limit_reclaim()
400 css_put(&next_mz->memcg->css); in memcg1_soft_limit_reclaim()
415 "Please report your usecase to linux-[email protected] if you " in mem_cgroup_move_charge_write()
419 return -EINVAL; in mem_cgroup_move_charge_write()
426 return -ENOSYS; in mem_cgroup_move_charge_write()
438 t = rcu_dereference(memcg->thresholds.primary); in __mem_cgroup_threshold()
440 t = rcu_dereference(memcg->memsw_thresholds.primary); in __mem_cgroup_threshold()
448 * current_threshold points to threshold just below or equal to usage. in __mem_cgroup_threshold()
449 * If it's not true, a threshold was crossed after last in __mem_cgroup_threshold()
452 i = t->current_threshold; in __mem_cgroup_threshold()
456 * current_threshold and check if a threshold is crossed. in __mem_cgroup_threshold()
460 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) in __mem_cgroup_threshold()
461 eventfd_signal(t->entries[i].eventfd); in __mem_cgroup_threshold()
468 * current_threshold+1 and check if a threshold is crossed. in __mem_cgroup_threshold()
472 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) in __mem_cgroup_threshold()
473 eventfd_signal(t->entries[i].eventfd); in __mem_cgroup_threshold()
476 t->current_threshold = i - 1; in __mem_cgroup_threshold()
492 /* Cgroup1: threshold notifications & softlimit tree updates */
505 nr_pages = -nr_pages; /* for event */ in memcg1_charge_statistics()
508 __this_cpu_add(memcg->events_percpu->nr_page_events, nr_pages); in memcg1_charge_statistics()
519 val = __this_cpu_read(memcg->events_percpu->nr_page_events); in memcg1_event_ratelimit()
520 next = __this_cpu_read(memcg->events_percpu->targets[target]); in memcg1_event_ratelimit()
522 if ((long)(next - val) < 0) { in memcg1_event_ratelimit()
533 __this_cpu_write(memcg->events_percpu->targets[target], next); in memcg1_event_ratelimit()
548 /* threshold event is triggered in finer grain than soft limit */ in memcg1_check_events()
575 * i_pages lock which is taken with interrupts-off. It is in memcg1_swapout()
577 * only synchronisation we have for updating the per-CPU variables. in memcg1_swapout()
581 memcg1_charge_statistics(memcg, -folio_nr_pages(folio)); in memcg1_swapout()
593 __this_cpu_add(memcg->events_percpu->nr_page_events, nr_memory); in memcg1_uncharge_batch()
603 if (_a->threshold > _b->threshold) in compare_thresholds()
606 if (_a->threshold < _b->threshold) in compare_thresholds()
607 return -1; in compare_thresholds()
618 list_for_each_entry(ev, &memcg->oom_notify, list) in mem_cgroup_oom_notify_cb()
619 eventfd_signal(ev->eventfd); in mem_cgroup_oom_notify_cb()
638 unsigned long threshold; in __mem_cgroup_usage_register_event() local
642 ret = page_counter_memparse(args, "-1", &threshold); in __mem_cgroup_usage_register_event()
646 mutex_lock(&memcg->thresholds_lock); in __mem_cgroup_usage_register_event()
649 thresholds = &memcg->thresholds; in __mem_cgroup_usage_register_event()
652 thresholds = &memcg->memsw_thresholds; in __mem_cgroup_usage_register_event()
657 /* Check if a threshold crossed before adding a new one */ in __mem_cgroup_usage_register_event()
658 if (thresholds->primary) in __mem_cgroup_usage_register_event()
661 size = thresholds->primary ? thresholds->primary->size + 1 : 1; in __mem_cgroup_usage_register_event()
666 ret = -ENOMEM; in __mem_cgroup_usage_register_event()
669 new->size = size; in __mem_cgroup_usage_register_event()
672 if (thresholds->primary) in __mem_cgroup_usage_register_event()
673 memcpy(new->entries, thresholds->primary->entries, in __mem_cgroup_usage_register_event()
674 flex_array_size(new, entries, size - 1)); in __mem_cgroup_usage_register_event()
676 /* Add new threshold */ in __mem_cgroup_usage_register_event()
677 new->entries[size - 1].eventfd = eventfd; in __mem_cgroup_usage_register_event()
678 new->entries[size - 1].threshold = threshold; in __mem_cgroup_usage_register_event()
680 /* Sort thresholds. Registering of new threshold isn't time-critical */ in __mem_cgroup_usage_register_event()
681 sort(new->entries, size, sizeof(*new->entries), in __mem_cgroup_usage_register_event()
684 /* Find current threshold */ in __mem_cgroup_usage_register_event()
685 new->current_threshold = -1; in __mem_cgroup_usage_register_event()
687 if (new->entries[i].threshold <= usage) { in __mem_cgroup_usage_register_event()
689 * new->current_threshold will not be used until in __mem_cgroup_usage_register_event()
693 ++new->current_threshold; in __mem_cgroup_usage_register_event()
699 kfree(thresholds->spare); in __mem_cgroup_usage_register_event()
700 thresholds->spare = thresholds->primary; in __mem_cgroup_usage_register_event()
702 rcu_assign_pointer(thresholds->primary, new); in __mem_cgroup_usage_register_event()
708 mutex_unlock(&memcg->thresholds_lock); in __mem_cgroup_usage_register_event()
733 mutex_lock(&memcg->thresholds_lock); in __mem_cgroup_usage_unregister_event()
736 thresholds = &memcg->thresholds; in __mem_cgroup_usage_unregister_event()
739 thresholds = &memcg->memsw_thresholds; in __mem_cgroup_usage_unregister_event()
744 if (!thresholds->primary) in __mem_cgroup_usage_unregister_event()
747 /* Check if a threshold crossed before removing */ in __mem_cgroup_usage_unregister_event()
750 /* Calculate new number of threshold */ in __mem_cgroup_usage_unregister_event()
752 for (i = 0; i < thresholds->primary->size; i++) { in __mem_cgroup_usage_unregister_event()
753 if (thresholds->primary->entries[i].eventfd != eventfd) in __mem_cgroup_usage_unregister_event()
759 new = thresholds->spare; in __mem_cgroup_usage_unregister_event()
772 new->size = size; in __mem_cgroup_usage_unregister_event()
774 /* Copy thresholds and find current threshold */ in __mem_cgroup_usage_unregister_event()
775 new->current_threshold = -1; in __mem_cgroup_usage_unregister_event()
776 for (i = 0, j = 0; i < thresholds->primary->size; i++) { in __mem_cgroup_usage_unregister_event()
777 if (thresholds->primary->entries[i].eventfd == eventfd) in __mem_cgroup_usage_unregister_event()
780 new->entries[j] = thresholds->primary->entries[i]; in __mem_cgroup_usage_unregister_event()
781 if (new->entries[j].threshold <= usage) { in __mem_cgroup_usage_unregister_event()
783 * new->current_threshold will not be used in __mem_cgroup_usage_unregister_event()
787 ++new->current_threshold; in __mem_cgroup_usage_unregister_event()
794 thresholds->spare = thresholds->primary; in __mem_cgroup_usage_unregister_event()
796 rcu_assign_pointer(thresholds->primary, new); in __mem_cgroup_usage_unregister_event()
803 kfree(thresholds->spare); in __mem_cgroup_usage_unregister_event()
804 thresholds->spare = NULL; in __mem_cgroup_usage_unregister_event()
807 mutex_unlock(&memcg->thresholds_lock); in __mem_cgroup_usage_unregister_event()
829 return -ENOMEM; in mem_cgroup_oom_register_event()
833 event->eventfd = eventfd; in mem_cgroup_oom_register_event()
834 list_add(&event->list, &memcg->oom_notify); in mem_cgroup_oom_register_event()
837 if (memcg->under_oom) in mem_cgroup_oom_register_event()
851 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { in mem_cgroup_oom_unregister_event()
852 if (ev->eventfd == eventfd) { in mem_cgroup_oom_unregister_event()
853 list_del(&ev->list); in mem_cgroup_oom_unregister_event()
866 * This is way over-engineered. It tries to support fully configurable
883 struct mem_cgroup *memcg = event->memcg; in memcg_event_remove()
885 remove_wait_queue(event->wqh, &event->wait); in memcg_event_remove()
887 event->unregister_event(memcg, event->eventfd); in memcg_event_remove()
890 eventfd_signal(event->eventfd); in memcg_event_remove()
892 eventfd_ctx_put(event->eventfd); in memcg_event_remove()
894 css_put(&memcg->css); in memcg_event_remove()
900 * Called with wqh->lock held and interrupts disabled.
907 struct mem_cgroup *memcg = event->memcg; in memcg_event_wake()
914 * for us. in memcg_event_wake()
917 * side will require wqh->lock via remove_wait_queue(), in memcg_event_wake()
920 spin_lock(&memcg->event_list_lock); in memcg_event_wake()
921 if (!list_empty(&event->list)) { in memcg_event_wake()
922 list_del_init(&event->list); in memcg_event_wake()
924 * We are in atomic context, but cgroup_event_remove() in memcg_event_wake()
927 schedule_work(&event->remove); in memcg_event_wake()
929 spin_unlock(&memcg->event_list_lock); in memcg_event_wake()
941 event->wqh = wqh; in memcg_event_ptable_queue_proc()
942 add_wait_queue(wqh, &event->wait); in memcg_event_ptable_queue_proc()
967 return -EOPNOTSUPP; in memcg_write_event_control()
973 return -EINVAL; in memcg_write_event_control()
982 return -EINVAL; in memcg_write_event_control()
986 return -EBADF; in memcg_write_event_control()
992 return -ENOMEM; in memcg_write_event_control()
994 event->memcg = memcg; in memcg_write_event_control()
995 INIT_LIST_HEAD(&event->list); in memcg_write_event_control()
996 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc); in memcg_write_event_control()
997 init_waitqueue_func_entry(&event->wait, memcg_event_wake); in memcg_write_event_control()
998 INIT_WORK(&event->remove, memcg_event_remove); in memcg_write_event_control()
1000 event->eventfd = eventfd_ctx_fileget(fd_file(efile)); in memcg_write_event_control()
1001 if (IS_ERR(event->eventfd)) { in memcg_write_event_control()
1002 ret = PTR_ERR(event->eventfd); in memcg_write_event_control()
1007 ret = -EBADF; in memcg_write_event_control()
1021 cdentry = fd_file(cfile)->f_path.dentry; in memcg_write_event_control()
1022 if (cdentry->d_sb->s_type != &cgroup_fs_type || !d_is_reg(cdentry)) { in memcg_write_event_control()
1023 ret = -EINVAL; in memcg_write_event_control()
1035 name = cdentry->d_name.name; in memcg_write_event_control()
1038 event->register_event = mem_cgroup_usage_register_event; in memcg_write_event_control()
1039 event->unregister_event = mem_cgroup_usage_unregister_event; in memcg_write_event_control()
1042 "Please report your usecase to linux-mm-@kvack.org" in memcg_write_event_control()
1044 event->register_event = mem_cgroup_oom_register_event; in memcg_write_event_control()
1045 event->unregister_event = mem_cgroup_oom_unregister_event; in memcg_write_event_control()
1048 "Please report your usecase to linux-mm-@kvack.org " in memcg_write_event_control()
1050 event->register_event = vmpressure_register_event; in memcg_write_event_control()
1051 event->unregister_event = vmpressure_unregister_event; in memcg_write_event_control()
1053 event->register_event = memsw_cgroup_usage_register_event; in memcg_write_event_control()
1054 event->unregister_event = memsw_cgroup_usage_unregister_event; in memcg_write_event_control()
1056 ret = -EINVAL; in memcg_write_event_control()
1065 cfile_css = css_tryget_online_from_dir(cdentry->d_parent, in memcg_write_event_control()
1067 ret = -EINVAL; in memcg_write_event_control()
1073 ret = event->register_event(memcg, event->eventfd, buf); in memcg_write_event_control()
1077 vfs_poll(fd_file(efile), &event->pt); in memcg_write_event_control()
1079 spin_lock_irq(&memcg->event_list_lock); in memcg_write_event_control()
1080 list_add(&event->list, &memcg->event_list); in memcg_write_event_control()
1081 spin_unlock_irq(&memcg->event_list_lock); in memcg_write_event_control()
1087 eventfd_ctx_put(event->eventfd); in memcg_write_event_control()
1095 INIT_LIST_HEAD(&memcg->oom_notify); in memcg1_memcg_init()
1096 mutex_init(&memcg->thresholds_lock); in memcg1_memcg_init()
1097 INIT_LIST_HEAD(&memcg->event_list); in memcg1_memcg_init()
1098 spin_lock_init(&memcg->event_list_lock); in memcg1_memcg_init()
1110 spin_lock_irq(&memcg->event_list_lock); in memcg1_css_offline()
1111 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { in memcg1_css_offline()
1112 list_del_init(&event->list); in memcg1_css_offline()
1113 schedule_work(&event->remove); in memcg1_css_offline()
1115 spin_unlock_irq(&memcg->event_list_lock); in memcg1_css_offline()
1119 * Check OOM-Killer is already running under our hierarchy.
1129 if (iter->oom_lock) { in mem_cgroup_oom_trylock()
1138 iter->oom_lock = true; in mem_cgroup_oom_trylock()
1151 iter->oom_lock = false; in mem_cgroup_oom_trylock()
1168 iter->oom_lock = false; in mem_cgroup_oom_unlock()
1178 iter->under_oom++; in mem_cgroup_mark_under_oom()
1192 if (iter->under_oom > 0) in mem_cgroup_unmark_under_oom()
1193 iter->under_oom--; in mem_cgroup_unmark_under_oom()
1212 oom_wait_memcg = oom_wait_info->memcg; in memcg_oom_wake_function()
1223 * For the following lockless ->under_oom test, the only required in memcg1_oom_recover()
1230 if (memcg && memcg->under_oom) in memcg1_oom_recover()
1235 * mem_cgroup_oom_synchronize - complete memcg OOM handling
1253 struct mem_cgroup *memcg = current->memcg_in_oom; in mem_cgroup_oom_synchronize()
1285 current->memcg_in_oom = NULL; in mem_cgroup_oom_synchronize()
1286 css_put(&memcg->css); in mem_cgroup_oom_synchronize()
1303 * On the other hand, in-kernel OOM killer allows for an async victim in memcg1_oom_prepare()
1311 if (READ_ONCE(memcg->oom_kill_disable)) { in memcg1_oom_prepare()
1312 if (current->in_user_fault) { in memcg1_oom_prepare()
1313 css_get(&memcg->css); in memcg1_oom_prepare()
1314 current->memcg_in_oom = memcg; in memcg1_oom_prepare()
1346 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory; in mem_cgroup_resize_max()
1350 ret = -EINTR; in mem_cgroup_resize_max()
1359 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) : in mem_cgroup_resize_max()
1360 max <= memcg->memsw.max; in mem_cgroup_resize_max()
1363 ret = -EINVAL; in mem_cgroup_resize_max()
1366 if (max > counter->max) in mem_cgroup_resize_max()
1382 ret = -EBUSY; in mem_cgroup_resize_max()
1402 /* we call try-to-free pages for make this cgroup empty */ in mem_cgroup_force_empty()
1408 while (nr_retries && page_counter_read(&memcg->memory)) { in mem_cgroup_force_empty()
1410 return -EINTR; in mem_cgroup_force_empty()
1414 nr_retries--; in mem_cgroup_force_empty()
1427 return -EINVAL; in mem_cgroup_force_empty_write()
1443 pr_warn_once("Non-hierarchical mode is deprecated. " in mem_cgroup_hierarchy_write()
1444 "Please report your usecase to linux-[email protected] if you " in mem_cgroup_hierarchy_write()
1447 return -EINVAL; in mem_cgroup_hierarchy_write()
1456 switch (MEMFILE_TYPE(cft->private)) { in mem_cgroup_read_u64()
1458 counter = &memcg->memory; in mem_cgroup_read_u64()
1461 counter = &memcg->memsw; in mem_cgroup_read_u64()
1464 counter = &memcg->kmem; in mem_cgroup_read_u64()
1467 counter = &memcg->tcpmem; in mem_cgroup_read_u64()
1473 switch (MEMFILE_ATTR(cft->private)) { in mem_cgroup_read_u64()
1475 if (counter == &memcg->memory) in mem_cgroup_read_u64()
1477 if (counter == &memcg->memsw) in mem_cgroup_read_u64()
1481 return (u64)counter->max * PAGE_SIZE; in mem_cgroup_read_u64()
1483 return (u64)counter->watermark * PAGE_SIZE; in mem_cgroup_read_u64()
1485 return counter->failcnt; in mem_cgroup_read_u64()
1487 return (u64)READ_ONCE(memcg->soft_limit) * PAGE_SIZE; in mem_cgroup_read_u64()
1500 return -EINVAL; in mem_cgroup_dummy_seq_show()
1509 ret = page_counter_set_max(&memcg->tcpmem, max); in memcg_update_tcp_max()
1513 if (!memcg->tcpmem_active) { in memcg_update_tcp_max()
1531 memcg->tcpmem_active = true; in memcg_update_tcp_max()
1550 ret = page_counter_memparse(buf, "-1", &nr_pages); in mem_cgroup_write()
1554 switch (MEMFILE_ATTR(of_cft(of)->private)) { in mem_cgroup_write()
1557 ret = -EINVAL; in mem_cgroup_write()
1560 switch (MEMFILE_TYPE(of_cft(of)->private)) { in mem_cgroup_write()
1570 "Please report your usecase to linux-[email protected] if you " in mem_cgroup_write()
1576 "Please report your usecase to linux-[email protected] if you " in mem_cgroup_write()
1584 ret = -EOPNOTSUPP; in mem_cgroup_write()
1587 "Please report your usecase to linux-[email protected] if you " in mem_cgroup_write()
1589 WRITE_ONCE(memcg->soft_limit, nr_pages); in mem_cgroup_write()
1603 switch (MEMFILE_TYPE(of_cft(of)->private)) { in mem_cgroup_reset()
1605 counter = &memcg->memory; in mem_cgroup_reset()
1608 counter = &memcg->memsw; in mem_cgroup_reset()
1611 counter = &memcg->kmem; in mem_cgroup_reset()
1614 counter = &memcg->tcpmem; in mem_cgroup_reset()
1620 switch (MEMFILE_ATTR(of_cft(of)->private)) { in mem_cgroup_reset()
1625 counter->failcnt = 0; in mem_cgroup_reset()
1638 #define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
1698 seq_printf(m, "%s=%lu", stat->name, in memcg_numa_stat_show()
1699 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, in memcg_numa_stat_show()
1704 stat->lru_mask, false)); in memcg_numa_stat_show()
1710 seq_printf(m, "hierarchical_%s=%lu", stat->name, in memcg_numa_stat_show()
1711 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, in memcg_numa_stat_show()
1716 stat->lru_mask, true)); in memcg_numa_stat_show()
1797 memory = min(memory, READ_ONCE(mi->memory.max)); in memcg1_stat_format()
1798 memsw = min(memsw, READ_ONCE(mi->memsw.max)); in memcg1_stat_format()
1831 mz = memcg->nodeinfo[pgdat->node_id]; in memcg1_stat_format()
1833 anon_cost += mz->lruvec.anon_cost; in memcg1_stat_format()
1834 file_cost += mz->lruvec.file_cost; in memcg1_stat_format()
1856 return -EINVAL; in mem_cgroup_swappiness_write()
1859 WRITE_ONCE(memcg->swappiness, val); in mem_cgroup_swappiness_write()
1870 seq_printf(sf, "oom_kill_disable %d\n", READ_ONCE(memcg->oom_kill_disable)); in mem_cgroup_oom_control_read()
1871 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); in mem_cgroup_oom_control_read()
1873 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL])); in mem_cgroup_oom_control_read()
1883 "Please report your usecase to linux-mm-@kvack.org if you " in mem_cgroup_oom_control_write()
1888 return -EINVAL; in mem_cgroup_oom_control_write()
1890 WRITE_ONCE(memcg->oom_kill_disable, val); in mem_cgroup_oom_control_write()
2067 page_counter_charge(&memcg->kmem, nr_pages); in memcg1_account_kmem()
2069 page_counter_uncharge(&memcg->kmem, -nr_pages); in memcg1_account_kmem()
2078 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) { in memcg1_charge_skmem()
2079 memcg->tcpmem_pressure = 0; in memcg1_charge_skmem()
2082 memcg->tcpmem_pressure = 1; in memcg1_charge_skmem()
2084 page_counter_charge(&memcg->tcpmem, nr_pages); in memcg1_charge_skmem()
2092 memcg->events_percpu = alloc_percpu_gfp(struct memcg1_events_percpu, in memcg1_alloc_events()
2094 return !!memcg->events_percpu; in memcg1_alloc_events()
2099 if (memcg->events_percpu) in memcg1_free_events()
2100 free_percpu(memcg->events_percpu); in memcg1_free_events()
2112 rtpn->rb_root = RB_ROOT; in memcg1_init()
2113 rtpn->rb_rightmost = NULL; in memcg1_init()
2114 spin_lock_init(&rtpn->lock); in memcg1_init()