Lines Matching full:counter
50 * page_counter_cancel - take pages out of the local counter
51 * @counter: counter
54 void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages) in page_counter_cancel() argument
58 new = atomic_long_sub_return(nr_pages, &counter->usage); in page_counter_cancel()
63 atomic_long_set(&counter->usage, new); in page_counter_cancel()
65 if (track_protection(counter)) in page_counter_cancel()
66 propagate_protected_usage(counter, new); in page_counter_cancel()
71 * @counter: counter
74 * NOTE: This does not consider any configured counter limits.
76 void page_counter_charge(struct page_counter *counter, unsigned long nr_pages) in page_counter_charge() argument
79 bool protection = track_protection(counter); in page_counter_charge()
81 for (c = counter; c; c = c->parent) { in page_counter_charge()
111 * @counter: counter
113 * @fail: points first counter to hit its limit, if any
115 * Returns %true on success, or %false and @fail if the counter or one
118 bool page_counter_try_charge(struct page_counter *counter, in page_counter_try_charge() argument
123 bool protection = track_protection(counter); in page_counter_try_charge()
125 for (c = counter; c; c = c->parent) { in page_counter_try_charge()
139 * counter has changed and retries. in page_counter_try_charge()
166 for (c = counter; c != *fail; c = c->parent) in page_counter_try_charge()
174 * @counter: counter
177 void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages) in page_counter_uncharge() argument
181 for (c = counter; c; c = c->parent) in page_counter_uncharge()
187 * @counter: counter
191 * counter already exceeds the specified limit.
193 * The caller must serialize invocations on the same counter.
195 int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages) in page_counter_set_max() argument
203 * below the concurrently-changing counter value. in page_counter_set_max()
210 * modified counter and retry. in page_counter_set_max()
212 usage = page_counter_read(counter); in page_counter_set_max()
217 old = xchg(&counter->max, nr_pages); in page_counter_set_max()
219 if (page_counter_read(counter) <= usage || nr_pages >= old) in page_counter_set_max()
222 counter->max = old; in page_counter_set_max()
229 * @counter: counter
232 * The caller must serialize invocations on the same counter.
234 void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages) in page_counter_set_min() argument
238 WRITE_ONCE(counter->min, nr_pages); in page_counter_set_min()
240 for (c = counter; c; c = c->parent) in page_counter_set_min()
246 * @counter: counter
249 * The caller must serialize invocations on the same counter.
251 void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages) in page_counter_set_low() argument
255 WRITE_ONCE(counter->low, nr_pages); in page_counter_set_low()
257 for (c = counter; c; c = c->parent) in page_counter_set_low()
262 * page_counter_memparse - memparse() for page counter limits
293 * This function calculates an individual page counter's effective
314 * if one counter claims much more protection than it uses memory,
319 * budget is NOT proportional. A counter's protection from a sibling
323 * without having to declare each individual counter's fixed share
326 * proportion to each counter's *usage*. This makes the protection
414 * @counter: the page_counter the counter to update
423 struct page_counter *counter, in page_counter_calculate_protection() argument
427 struct page_counter *parent = counter->parent; in page_counter_calculate_protection()
436 if (root == counter) in page_counter_calculate_protection()
439 usage = page_counter_read(counter); in page_counter_calculate_protection()
444 counter->emin = READ_ONCE(counter->min); in page_counter_calculate_protection()
445 counter->elow = READ_ONCE(counter->low); in page_counter_calculate_protection()
451 WRITE_ONCE(counter->emin, effective_protection(usage, parent_usage, in page_counter_calculate_protection()
452 READ_ONCE(counter->min), in page_counter_calculate_protection()
457 WRITE_ONCE(counter->elow, effective_protection(usage, parent_usage, in page_counter_calculate_protection()
458 READ_ONCE(counter->low), in page_counter_calculate_protection()