@@ -1871,14 +1871,22 @@ static void drain_local_stock(struct work_struct *dummy)
obj_cgroup_put(old);
}
-/*
- * Cache charges(val) to local per_cpu area.
- * This will be consumed by consume_stock() function, later.
- */
-static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
+static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
{
struct memcg_stock_pcp *stock;
unsigned int stock_pages;
+ unsigned long flags;
+
+ VM_WARN_ON_ONCE(mem_cgroup_is_root(memcg));
+
+ if (!local_trylock_irqsave(&memcg_stock.stock_lock, flags)) {
+ /*
+ * In case of unlikely failure to lock percpu stock_lock
+ * uncharge memcg directly.
+ */
+ memcg_uncharge(memcg, nr_pages);
+ return;
+ }
stock = this_cpu_ptr(&memcg_stock);
if (READ_ONCE(stock->cached) != memcg) { /* reset if necessary */
@@ -1891,23 +1899,7 @@ static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
if (stock_pages > MEMCG_CHARGE_BATCH)
drain_stock(stock);
-}
-
-static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
-{
- unsigned long flags;
-
- VM_WARN_ON_ONCE(mem_cgroup_is_root(memcg));
- if (!local_trylock_irqsave(&memcg_stock.stock_lock, flags)) {
- /*
- * In case of unlikely failure to lock percpu stock_lock
- * uncharge memcg directly.
- */
- memcg_uncharge(memcg, nr_pages);
- return;
- }
- __refill_stock(memcg, nr_pages);
local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
}