blob: cfa839284b923e7954812d0ed1ea9a22d3694b17 [file] [log] [blame]
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*
* Swap reorganised 29.12.95, Stephen Tweedie.
* kswapd added: 7.1.96 sct
* Removed kswapd_ctl limits, and swap out as many pages as needed
* to bring the system back to freepages.high: 2.4.97, Rik van Riel.
* Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
* Multiqueue VM started 5.8.00, Rik van Riel.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/mm.h>
#include <linux/sched/mm.h>
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/kernel_stat.h>
#include <linux/swap.h>
#include <linux/pagemap.h>
#include <linux/init.h>
#include <linux/highmem.h>
#include <linux/vmpressure.h>
#include <linux/vmstat.h>
#include <linux/file.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h> /* for buffer_heads_over_limit */
#include <linux/mm_inline.h>
#include <linux/backing-dev.h>
#include <linux/rmap.h>
#include <linux/topology.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/compaction.h>
#include <linux/notifier.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/memcontrol.h>
#include <linux/migrate.h>
#include <linux/delayacct.h>
#include <linux/sysctl.h>
#include <linux/memory-tiers.h>
#include <linux/oom.h>
#include <linux/pagevec.h>
#include <linux/prefetch.h>
#include <linux/printk.h>
#include <linux/dax.h>
#include <linux/psi.h>
#include <linux/pagewalk.h>
#include <linux/shmem_fs.h>
#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/khugepaged.h>
#include <linux/rculist_nulls.h>
#include <linux/random.h>
#include <asm/tlbflush.h>
#include <asm/div64.h>
#include <linux/swapops.h>
#include <linux/balloon_compaction.h>
#include <linux/sched/sysctl.h>
#include "internal.h"
#include "swap.h"
#define CREATE_TRACE_POINTS
#include <trace/events/vmscan.h>
struct scan_control {
/* How many pages shrink_list() should reclaim */
unsigned long nr_to_reclaim;
/*
* Nodemask of nodes allowed by the caller. If NULL, all nodes
* are scanned.
*/
nodemask_t *nodemask;
/*
* The memory cgroup that hit its limit and as a result is the
* primary target of this reclaim invocation.
*/
struct mem_cgroup *target_mem_cgroup;
/*
* Scan pressure balancing between anon and file LRUs
*/
unsigned long anon_cost;
unsigned long file_cost;
#ifdef CONFIG_MEMCG
/* Swappiness value for proactive reclaim. Always use sc_swappiness()! */
int *proactive_swappiness;
#endif
/* Can active folios be deactivated as part of reclaim? */
#define DEACTIVATE_ANON 1
#define DEACTIVATE_FILE 2
unsigned int may_deactivate:2;
unsigned int force_deactivate:1;
unsigned int skipped_deactivate:1;
/* Writepage batching in laptop mode; RECLAIM_WRITE */
unsigned int may_writepage:1;
/* Can mapped folios be reclaimed? */
unsigned int may_unmap:1;
/* Can folios be swapped as part of reclaim? */
unsigned int may_swap:1;
/* Not allow cache_trim_mode to be turned on as part of reclaim? */
unsigned int no_cache_trim_mode:1;
/* Has cache_trim_mode failed at least once? */
unsigned int cache_trim_mode_failed:1;
/* Proactive reclaim invoked by userspace through memory.reclaim */
unsigned int proactive:1;
/*
* Cgroup memory below memory.low is protected as long as we
* don't threaten to OOM. If any cgroup is reclaimed at
* reduced force or passed over entirely due to its memory.low
* setting (memcg_low_skipped), and nothing is reclaimed as a
* result, then go back for one more cycle that reclaims the protected
* memory (memcg_low_reclaim) to avert OOM.
*/
unsigned int memcg_low_reclaim:1;
unsigned int memcg_low_skipped:1;
/* Shared cgroup tree walk failed, rescan the whole tree */
unsigned int memcg_full_walk:1;
unsigned int hibernation_mode:1;
/* One of the zones is ready for compaction */
unsigned int compaction_ready:1;
/* There is easily reclaimable cold cache in the current node */
unsigned int cache_trim_mode:1;
/* The file folios on the current node are dangerously low */
unsigned int file_is_tiny:1;
/* Always discard instead of demoting to lower tier memory */
unsigned int no_demotion:1;
/* Allocation order */
s8 order;
/* Scan (total_size >> priority) pages at once */
s8 priority;
/* The highest zone to isolate folios for reclaim from */
s8 reclaim_idx;
/* This context's GFP mask */
gfp_t gfp_mask;
/* Incremented by the number of inactive pages that were scanned */
unsigned long nr_scanned;
/* Number of pages freed so far during a call to shrink_zones() */
unsigned long nr_reclaimed;
struct {
unsigned int dirty;
unsigned int unqueued_dirty;
unsigned int congested;
unsigned int writeback;
unsigned int immediate;
unsigned int file_taken;
unsigned int taken;
} nr;
/* for recording the reclaimed slab by now */
struct reclaim_state reclaim_state;
};
#ifdef ARCH_HAS_PREFETCHW
#define prefetchw_prev_lru_folio(_folio, _base, _field) \
do { \
if ((_folio)->lru.prev != _base) { \
struct folio *prev; \
\
prev = lru_to_folio(&(_folio->lru)); \
prefetchw(&prev->_field); \
} \
} while (0)
#else
#define prefetchw_prev_lru_folio(_folio, _base, _field) do { } while (0)
#endif
/*
* From 0 .. MAX_SWAPPINESS. Higher means more swappy.
*/
int vm_swappiness = 60;
#ifdef CONFIG_MEMCG
/* Returns true for reclaim through cgroup limits or cgroup interfaces. */
static bool cgroup_reclaim(struct scan_control *sc)
{
return sc->target_mem_cgroup;
}
/*
* Returns true for reclaim on the root cgroup. This is true for direct
* allocator reclaim and reclaim through cgroup interfaces on the root cgroup.
*/
static bool root_reclaim(struct scan_control *sc)
{
return !sc->target_mem_cgroup || mem_cgroup_is_root(sc->target_mem_cgroup);
}
/**
* writeback_throttling_sane - is the usual dirty throttling mechanism available?
* @sc: scan_control in question
*
* The normal page dirty throttling mechanism in balance_dirty_pages() is
* completely broken with the legacy memcg and direct stalling in
* shrink_folio_list() is used for throttling instead, which lacks all the
* niceties such as fairness, adaptive pausing, bandwidth proportional
* allocation and configurability.
*
* This function tests whether the vmscan currently in progress can assume
* that the normal dirty throttling mechanism is operational.
*/
static bool writeback_throttling_sane(struct scan_control *sc)
{
if (!cgroup_reclaim(sc))
return true;
#ifdef CONFIG_CGROUP_WRITEBACK
if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
return true;
#endif
return false;
}
static int sc_swappiness(struct scan_control *sc, struct mem_cgroup *memcg)
{
if (sc->proactive && sc->proactive_swappiness)
return *sc->proactive_swappiness;
return mem_cgroup_swappiness(memcg);
}
#else
static bool cgroup_reclaim(struct scan_control *sc)
{
return false;
}
static bool root_reclaim(struct scan_control *sc)
{
return true;
}
static bool writeback_throttling_sane(struct scan_control *sc)
{
return true;
}
static int sc_swappiness(struct scan_control *sc, struct mem_cgroup *memcg)
{
return READ_ONCE(vm_swappiness);
}
#endif
static void set_task_reclaim_state(struct task_struct *task,
struct reclaim_state *rs)
{
/* Check for an overwrite */
WARN_ON_ONCE(rs && task->reclaim_state);
/* Check for the nulling of an already-nulled member */
WARN_ON_ONCE(!rs && !task->reclaim_state);
task->reclaim_state = rs;
}
/*
* flush_reclaim_state(): add pages reclaimed outside of LRU-based reclaim to
* scan_control->nr_reclaimed.
*/
static void flush_reclaim_state(struct scan_control *sc)
{
/*
* Currently, reclaim_state->reclaimed includes three types of pages
* freed outside of vmscan:
* (1) Slab pages.
* (2) Clean file pages from pruned inodes (on highmem systems).
* (3) XFS freed buffer pages.
*
* For all of these cases, we cannot universally link the pages to a
* single memcg. For example, a memcg-aware shrinker can free one object
* charged to the target memcg, causing an entire page to be freed.
* If we count the entire page as reclaimed from the memcg, we end up
* overestimating the reclaimed amount (potentially under-reclaiming).
*
* Only count such pages for global reclaim to prevent under-reclaiming
* from the target memcg; preventing unnecessary retries during memcg
* charging and false positives from proactive reclaim.
*
* For uncommon cases where the freed pages were actually mostly
* charged to the target memcg, we end up underestimating the reclaimed
* amount. This should be fine. The freed pages will be uncharged
* anyway, even if they are not counted here properly, and we will be
* able to make forward progress in charging (which is usually in a
* retry loop).
*
* We can go one step further, and report the uncharged objcg pages in
* memcg reclaim, to make reporting more accurate and reduce
* underestimation, but it's probably not worth the complexity for now.
*/
if (current->reclaim_state && root_reclaim(sc)) {
sc->nr_reclaimed += current->reclaim_state->reclaimed;
current->reclaim_state->reclaimed = 0;
}
}
static bool can_demote(int nid, struct scan_control *sc)
{
if (!numa_demotion_enabled)
return false;
if (sc && sc->no_demotion)
return false;
if (next_demotion_node(nid) == NUMA_NO_NODE)
return false;
return true;
}
static inline bool can_reclaim_anon_pages(struct mem_cgroup *memcg,
int nid,
struct scan_control *sc)
{
if (memcg == NULL) {
/*
* For non-memcg reclaim, is there
* space in any swap device?
*/
if (get_nr_swap_pages() > 0)
return true;
} else {
/* Is the memcg below its swap limit? */
if (mem_cgroup_get_nr_swap_pages(memcg) > 0)
return true;
}
/*
* The page can not be swapped.
*
* Can it be reclaimed from this node via demotion?
*/
return can_demote(nid, sc);
}
/*
* This misses isolated folios which are not accounted for to save counters.
* As the data only determines if reclaim or compaction continues, it is
* not expected that isolated folios will be a dominating factor.
*/
unsigned long zone_reclaimable_pages(struct zone *zone)
{
unsigned long nr;
nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) +
zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE);
if (can_reclaim_anon_pages(NULL, zone_to_nid(zone), NULL))
nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);
return nr;
}
/**
* lruvec_lru_size - Returns the number of pages on the given LRU list.
* @lruvec: lru vector
* @lru: lru to use
* @zone_idx: zones to consider (use MAX_NR_ZONES - 1 for the whole LRU list)
*/
static unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru,
int zone_idx)
{
unsigned long size = 0;
int zid;
for (zid = 0; zid <= zone_idx; zid++) {
struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid];
if (!managed_zone(zone))
continue;
if (!mem_cgroup_disabled())
size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
else
size += zone_page_state(zone, NR_ZONE_LRU_BASE + lru);
}
return size;
}
static unsigned long drop_slab_node(int nid)
{
unsigned long freed = 0;
struct mem_cgroup *memcg = NULL;
memcg = mem_cgroup_iter(NULL, NULL, NULL);
do {
freed += shrink_slab(GFP_KERNEL, nid, memcg, 0);
} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
return freed;
}
void drop_slab(void)
{
int nid;
int shift = 0;
unsigned long freed;
do {
freed = 0;
for_each_online_node(nid) {
if (fatal_signal_pending(current))
return;
freed += drop_slab_node(nid);
}
} while ((freed >> shift++) > 1);
}
static int reclaimer_offset(void)
{
BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD !=
PGDEMOTE_DIRECT - PGDEMOTE_KSWAPD);
BUILD_BUG_ON(PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD !=
PGDEMOTE_KHUGEPAGED - PGDEMOTE_KSWAPD);
BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD !=
PGSCAN_DIRECT - PGSCAN_KSWAPD);
BUILD_BUG_ON(PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD !=
PGSCAN_KHUGEPAGED - PGSCAN_KSWAPD);
if (current_is_kswapd())
return 0;
if (current_is_khugepaged())
return PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD;
return PGSTEAL_DIRECT - PGSTEAL_KSWAPD;
}
static inline int is_page_cache_freeable(struct folio *folio)
{
/*
* A freeable page cache folio is referenced only by the caller
* that isolated the folio, the page cache and optional filesystem
* private data at folio->private.
*/
return folio_ref_count(folio) - folio_test_private(folio) ==
1 + folio_nr_pages(folio);
}
/*
* We detected a synchronous write error writing a folio out. Probably
* -ENOSPC. We need to propagate that into the address_space for a subsequent
* fsync(), msync() or close().
*
* The tricky part is that after writepage we cannot touch the mapping: nothing
* prevents it from being freed up. But we have a ref on the folio and once
* that folio is locked, the mapping is pinned.
*
* We're allowed to run sleeping folio_lock() here because we know the caller has
* __GFP_FS.
*/
static void handle_write_error(struct address_space *mapping,
struct folio *folio, int error)
{
folio_lock(folio);
if (folio_mapping(folio) == mapping)
mapping_set_error(mapping, error);
folio_unlock(folio);
}
static bool skip_throttle_noprogress(pg_data_t *pgdat)
{
int reclaimable = 0, write_pending = 0;
int i;
/*
* If kswapd is disabled, reschedule if necessary but do not
* throttle as the system is likely near OOM.
*/
if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
return true;
/*
* If there are a lot of dirty/writeback folios then do not
* throttle as throttling will occur when the folios cycle
* towards the end of the LRU if still under writeback.
*/
for (i = 0; i < MAX_NR_ZONES; i++) {
struct zone *zone = pgdat->node_zones + i;
if (!managed_zone(zone))
continue;
reclaimable += zone_reclaimable_pages(zone);
write_pending += zone_page_state_snapshot(zone,
NR_ZONE_WRITE_PENDING);
}
if (2 * write_pending <= reclaimable)
return true;
return false;
}
void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason)
{
wait_queue_head_t *wqh = &pgdat->reclaim_wait[reason];
long timeout, ret;
DEFINE_WAIT(wait);
/*
* Do not throttle user workers, kthreads other than kswapd or
* workqueues. They may be required for reclaim to make
* forward progress (e.g. journalling workqueues or kthreads).
*/
if (!current_is_kswapd() &&
current->flags & (PF_USER_WORKER|PF_KTHREAD)) {
cond_resched();
return;
}
/*
* These figures are pulled out of thin air.
* VMSCAN_THROTTLE_ISOLATED is a transient condition based on too many
* parallel reclaimers which is a short-lived event so the timeout is
* short. Failing to make progress or waiting on writeback are
* potentially long-lived events so use a longer timeout. This is shaky
* logic as a failure to make progress could be due to anything from
* writeback to a slow device to excessive referenced folios at the tail
* of the inactive LRU.
*/
switch(reason) {
case VMSCAN_THROTTLE_WRITEBACK:
timeout = HZ/10;
if (atomic_inc_return(&pgdat->nr_writeback_throttled) == 1) {
WRITE_ONCE(pgdat->nr_reclaim_start,
node_page_state(pgdat, NR_THROTTLED_WRITTEN));
}
break;
case VMSCAN_THROTTLE_CONGESTED:
fallthrough;
case VMSCAN_THROTTLE_NOPROGRESS:
if (skip_throttle_noprogress(pgdat)) {
cond_resched();
return;
}
timeout = 1;
break;
case VMSCAN_THROTTLE_ISOLATED:
timeout = HZ/50;
break;
default:
WARN_ON_ONCE(1);
timeout = HZ;
break;
}
prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
ret = schedule_timeout(timeout);
finish_wait(wqh, &wait);
if (reason == VMSCAN_THROTTLE_WRITEBACK)
atomic_dec(&pgdat->nr_writeback_throttled);
trace_mm_vmscan_throttled(pgdat->node_id, jiffies_to_usecs(timeout),
jiffies_to_usecs(timeout - ret),
reason);
}
/*
* Account for folios written if tasks are throttled waiting on dirty
* folios to clean. If enough folios have been cleaned since throttling
* started then wakeup the throttled tasks.
*/
void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
int nr_throttled)
{
unsigned long nr_written;
node_stat_add_folio(folio, NR_THROTTLED_WRITTEN);
/*
* This is an inaccurate read as the per-cpu deltas may not
* be synchronised. However, given that the system is
* writeback throttled, it is not worth taking the penalty
* of getting an accurate count. At worst, the throttle
* timeout guarantees forward progress.
*/
nr_written = node_page_state(pgdat, NR_THROTTLED_WRITTEN) -
READ_ONCE(pgdat->nr_reclaim_start);
if (nr_written > SWAP_CLUSTER_MAX * nr_throttled)
wake_up(&pgdat->reclaim_wait[VMSCAN_THROTTLE_WRITEBACK]);
}
/* possible outcome of pageout() */
typedef enum {
/* failed to write folio out, folio is locked */
PAGE_KEEP,
/* move folio to the active list, folio is locked */
PAGE_ACTIVATE,
/* folio has been sent to the disk successfully, folio is unlocked */
PAGE_SUCCESS,
/* folio is clean and locked */
PAGE_CLEAN,
} pageout_t;
/*
* pageout is called by shrink_folio_list() for each dirty folio.
* Calls ->writepage().
*/
static pageout_t pageout(struct folio *folio, struct address_space *mapping,
struct swap_iocb **plug)
{
/*
* If the folio is dirty, only perform writeback if that write
* will be non-blocking. To prevent this allocation from being
* stalled by pagecache activity. But note that there may be
* stalls if we need to run get_block(). We could test
* PagePrivate for that.
*
* If this process is currently in __generic_file_write_iter() against
* this folio's queue, we can perform writeback even if that
* will block.
*
* If the folio is swapcache, write it back even if that would
* block, for some throttling. This happens by accident, because
* swap_backing_dev_info is bust: it doesn't reflect the
* congestion state of the swapdevs. Easy to fix, if needed.
*/
if (!is_page_cache_freeable(folio))
return PAGE_KEEP;
if (!mapping) {
/*
* Some data journaling orphaned folios can have
* folio->mapping == NULL while being dirty with clean buffers.
*/
if (folio_test_private(folio)) {
if (try_to_free_buffers(folio)) {
folio_clear_dirty(folio);
pr_info("%s: orphaned folio\n", __func__);
return PAGE_CLEAN;
}
}
return PAGE_KEEP;
}
if (mapping->a_ops->writepage == NULL)
return PAGE_ACTIVATE;
if (folio_clear_dirty_for_io(folio)) {
int res;
struct writeback_control wbc = {
.sync_mode = WB_SYNC_NONE,
.nr_to_write = SWAP_CLUSTER_MAX,
.range_start = 0,
.range_end = LLONG_MAX,
.for_reclaim = 1,
.swap_plug = plug,
};
folio_set_reclaim(folio);
res = mapping->a_ops->writepage(&folio->page, &wbc);
if (res < 0)
handle_write_error(mapping, folio, res);
if (res == AOP_WRITEPAGE_ACTIVATE) {
folio_clear_reclaim(folio);
return PAGE_ACTIVATE;
}
if (!folio_test_writeback(folio)) {
/* synchronous write or broken a_ops? */
folio_clear_reclaim(folio);
}
trace_mm_vmscan_write_folio(folio);
node_stat_add_folio(folio, NR_VMSCAN_WRITE);
return PAGE_SUCCESS;
}
return PAGE_CLEAN;
}
/*
* Same as remove_mapping, but if the folio is removed from the mapping, it
* gets returned with a refcount of 0.
*/
static int __remove_mapping(struct address_space *mapping, struct folio *folio,
bool reclaimed, struct mem_cgroup *target_memcg)
{
int refcount;
void *shadow = NULL;
BUG_ON(!folio_test_locked(folio));
BUG_ON(mapping != folio_mapping(folio));
if (!folio_test_swapcache(folio))
spin_lock(&mapping->host->i_lock);
xa_lock_irq(&mapping->i_pages);
/*
* The non racy check for a busy folio.
*
* Must be careful with the order of the tests. When someone has
* a ref to the folio, it may be possible that they dirty it then
* drop the reference. So if the dirty flag is tested before the
* refcount here, then the following race may occur:
*
* get_user_pages(&page);
* [user mapping goes away]
* write_to(page);
* !folio_test_dirty(folio) [good]
* folio_set_dirty(folio);
* folio_put(folio);
* !refcount(folio) [good, discard it]
*
* [oops, our write_to data is lost]
*
* Reversing the order of the tests ensures such a situation cannot
* escape unnoticed. The smp_rmb is needed to ensure the folio->flags
* load is not satisfied before that of folio->_refcount.
*
* Note that if the dirty flag is always set via folio_mark_dirty,
* and thus under the i_pages lock, then this ordering is not required.
*/
refcount = 1 + folio_nr_pages(folio);
if (!folio_ref_freeze(folio, refcount))
goto cannot_free;
/* note: atomic_cmpxchg in folio_ref_freeze provides the smp_rmb */
if (unlikely(folio_test_dirty(folio))) {
folio_ref_unfreeze(folio, refcount);
goto cannot_free;
}
if (folio_test_swapcache(folio)) {
swp_entry_t swap = folio->swap;
if (reclaimed && !mapping_exiting(mapping))
shadow = workingset_eviction(folio, target_memcg);
__delete_from_swap_cache(folio, swap, shadow);
mem_cgroup_swapout(folio, swap);
xa_unlock_irq(&mapping->i_pages);
put_swap_folio(folio, swap);
} else {
void (*free_folio)(struct folio *);
free_folio = mapping->a_ops->free_folio;
/*
* Remember a shadow entry for reclaimed file cache in
* order to detect refaults, thus thrashing, later on.
*
* But don't store shadows in an address space that is
* already exiting. This is not just an optimization,
* inode reclaim needs to empty out the radix tree or
* the nodes are lost. Don't plant shadows behind its
* back.
*
* We also don't store shadows for DAX mappings because the
* only page cache folios found in these are zero pages
* covering holes, and because we don't want to mix DAX
* exceptional entries and shadow exceptional entries in the
* same address_space.
*/
if (reclaimed && folio_is_file_lru(folio) &&
!mapping_exiting(mapping) && !dax_mapping(mapping))
shadow = workingset_eviction(folio, target_memcg);
__filemap_remove_folio(folio, shadow);
xa_unlock_irq(&mapping->i_pages);
if (mapping_shrinkable(mapping))
inode_add_lru(mapping->host);
spin_unlock(&mapping->host->i_lock);
if (free_folio)
free_folio(folio);
}
return 1;
cannot_free:
xa_unlock_irq(&mapping->i_pages);
if (!folio_test_swapcache(folio))
spin_unlock(&mapping->host->i_lock);
return 0;
}
/**
* remove_mapping() - Attempt to remove a folio from its mapping.
* @mapping: The address space.
* @folio: The folio to remove.
*
* If the folio is dirty, under writeback or if someone else has a ref
* on it, removal will fail.
* Return: The number of pages removed from the mapping. 0 if the folio
* could not be removed.
* Context: The caller should have a single refcount on the folio and
* hold its lock.
*/
long remove_mapping(struct address_space *mapping, struct folio *folio)
{
if (__remove_mapping(mapping, folio, false, NULL)) {
/*
* Unfreezing the refcount with 1 effectively
* drops the pagecache ref for us without requiring another
* atomic operation.
*/
folio_ref_unfreeze(folio, 1);
return folio_nr_pages(folio);
}
return 0;
}
/**
* folio_putback_lru - Put previously isolated folio onto appropriate LRU list.
* @folio: Folio to be returned to an LRU list.
*
* Add previously isolated @folio to appropriate LRU list.
* The folio may still be unevictable for other reasons.
*
* Context: lru_lock must not be held, interrupts must be enabled.
*/
void folio_putback_lru(struct folio *folio)
{
folio_add_lru(folio);
folio_put(folio); /* drop ref from isolate */
}
enum folio_references {
FOLIOREF_RECLAIM,
FOLIOREF_RECLAIM_CLEAN,
FOLIOREF_KEEP,
FOLIOREF_ACTIVATE,
};
static enum folio_references folio_check_references(struct folio *folio,
struct scan_control *sc)
{
int referenced_ptes, referenced_folio;
unsigned long vm_flags;
referenced_ptes = folio_referenced(folio, 1, sc->target_mem_cgroup,
&vm_flags);
referenced_folio = folio_test_clear_referenced(folio);
/*
* The supposedly reclaimable folio was found to be in a VM_LOCKED vma.
* Let the folio, now marked Mlocked, be moved to the unevictable list.
*/
if (vm_flags & VM_LOCKED)
return FOLIOREF_ACTIVATE;
/* rmap lock contention: rotate */
if (referenced_ptes == -1)
return FOLIOREF_KEEP;
if (referenced_ptes) {
/*
* All mapped folios start out with page table
* references from the instantiating fault, so we need
* to look twice if a mapped file/anon folio is used more
* than once.
*
* Mark it and spare it for another trip around the
* inactive list. Another page table reference will
* lead to its activation.
*
* Note: the mark is set for activated folios as well
* so that recently deactivated but used folios are
* quickly recovered.
*/
folio_set_referenced(folio);
if (referenced_folio || referenced_ptes > 1)
return FOLIOREF_ACTIVATE;
/*
* Activate file-backed executable folios after first usage.
*/
if ((vm_flags & VM_EXEC) && folio_is_file_lru(folio))
return FOLIOREF_ACTIVATE;
return FOLIOREF_KEEP;
}
/* Reclaim if clean, defer dirty folios to writeback */
if (referenced_folio && folio_is_file_lru(folio))
return FOLIOREF_RECLAIM_CLEAN;
return FOLIOREF_RECLAIM;
}
/* Check if a folio is dirty or under writeback */
static void folio_check_dirty_writeback(struct folio *folio,
bool *dirty, bool *writeback)
{
struct address_space *mapping;
/*
* Anonymous folios are not handled by flushers and must be written
* from reclaim context. Do not stall reclaim based on them.
* MADV_FREE anonymous folios are put into inactive file list too.
* They could be mistakenly treated as file lru. So further anon
* test is needed.
*/
if (!folio_is_file_lru(folio) ||
(folio_test_anon(folio) && !folio_test_swapbacked(folio))) {
*dirty = false;
*writeback = false;
return;
}
/* By default assume that the folio flags are accurate */
*dirty = folio_test_dirty(folio);
*writeback = folio_test_writeback(folio);
/* Verify dirty/writeback state if the filesystem supports it */
if (!folio_test_private(folio))
return;
mapping = folio_mapping(folio);
if (mapping && mapping->a_ops->is_dirty_writeback)
mapping->a_ops->is_dirty_writeback(folio, dirty, writeback);
}
struct folio *alloc_migrate_folio(struct folio *src, unsigned long private)
{
struct folio *dst;
nodemask_t *allowed_mask;
struct migration_target_control *mtc;
mtc = (struct migration_target_control *)private;
allowed_mask = mtc->nmask;
/*
* make sure we allocate from the target node first also trying to
* demote or reclaim pages from the target node via kswapd if we are
* low on free memory on target node. If we don't do this and if
* we have free memory on the slower(lower) memtier, we would start
* allocating pages from slower(lower) memory tiers without even forcing
* a demotion of cold pages from the target memtier. This can result
* in the kernel placing hot pages in slower(lower) memory tiers.
*/
mtc->nmask = NULL;
mtc->gfp_mask |= __GFP_THISNODE;
dst = alloc_migration_target(src, (unsigned long)mtc);
if (dst)
return dst;
mtc->gfp_mask &= ~__GFP_THISNODE;
mtc->nmask = allowed_mask;
return alloc_migration_target(src, (unsigned long)mtc);
}
/*
* Take folios on @demote_folios and attempt to demote them to another node.
* Folios which are not demoted are left on @demote_folios.
*/
static unsigned int demote_folio_list(struct list_head *demote_folios,
struct pglist_data *pgdat)
{
int target_nid = next_demotion_node(pgdat->node_id);
unsigned int nr_succeeded;
nodemask_t allowed_mask;
struct migration_target_control mtc = {
/*
* Allocate from 'node', or fail quickly and quietly.
* When this happens, 'page' will likely just be discarded
* instead of migrated.
*/
.gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | __GFP_NOWARN |
__GFP_NOMEMALLOC | GFP_NOWAIT,
.nid = target_nid,
.nmask = &allowed_mask,
.reason = MR_DEMOTION,
};
if (list_empty(demote_folios))
return 0;
if (target_nid == NUMA_NO_NODE)
return 0;
node_get_allowed_targets(pgdat, &allowed_mask);
/* Demotion ignores all cpuset and mempolicy settings */
migrate_pages(demote_folios, alloc_migrate_folio, NULL,
(unsigned long)&mtc, MIGRATE_ASYNC, MR_DEMOTION,
&nr_succeeded);
mod_node_page_state(pgdat, PGDEMOTE_KSWAPD + reclaimer_offset(),
nr_succeeded);
return nr_succeeded;
}
static bool may_enter_fs(struct folio *folio, gfp_t gfp_mask)
{
if (gfp_mask & __GFP_FS)
return true;
if (!folio_test_swapcache(folio) || !(gfp_mask & __GFP_IO))
return false;
/*
* We can "enter_fs" for swap-cache with only __GFP_IO
* providing this isn't SWP_FS_OPS.
* ->flags can be updated non-atomicially (scan_swap_map_slots),
* but that will never affect SWP_FS_OPS, so the data_race
* is safe.
*/
return !data_race(folio_swap_flags(folio) & SWP_FS_OPS);
}
/*
* shrink_folio_list() returns the number of reclaimed pages
*/
static unsigned int shrink_folio_list(struct list_head *folio_list,
struct pglist_data *pgdat, struct scan_control *sc,
struct reclaim_stat *stat, bool ignore_references)
{
struct folio_batch free_folios;
LIST_HEAD(ret_folios);
LIST_HEAD(demote_folios);
unsigned int nr_reclaimed = 0;
unsigned int pgactivate = 0;
bool do_demote_pass;
struct swap_iocb *plug = NULL;
folio_batch_init(&free_folios);
memset(stat, 0, sizeof(*stat));
cond_resched();
do_demote_pass = can_demote(pgdat->node_id, sc);
retry:
while (!list_empty(folio_list)) {
struct address_space *mapping;
struct folio *folio;
enum folio_references references = FOLIOREF_RECLAIM;
bool dirty, writeback;
unsigned int nr_pages;
cond_resched();
folio = lru_to_folio(folio_list);
list_del(&folio->lru);
if (!folio_trylock(folio))
goto keep;
VM_BUG_ON_FOLIO(folio_test_active(folio), folio);
nr_pages = folio_nr_pages(folio);
/* Account the number of base pages */
sc->nr_scanned += nr_pages;
if (unlikely(!folio_evictable(folio)))
goto activate_locked;
if (!sc->may_unmap && folio_mapped(folio))
goto keep_locked;
/* folio_update_gen() tried to promote this page? */
if (lru_gen_enabled() && !ignore_references &&
folio_mapped(folio) && folio_test_referenced(folio))
goto keep_locked;
/*
* The number of dirty pages determines if a node is marked
* reclaim_congested. kswapd will stall and start writing
* folios if the tail of the LRU is all dirty unqueued folios.
*/
folio_check_dirty_writeback(folio, &dirty, &writeback);
if (dirty || writeback)
stat->nr_dirty += nr_pages;
if (dirty && !writeback)
stat->nr_unqueued_dirty += nr_pages;
/*
* Treat this folio as congested if folios are cycling
* through the LRU so quickly that the folios marked
* for immediate reclaim are making it to the end of
* the LRU a second time.
*/
if (writeback && folio_test_reclaim(folio))
stat->nr_congested += nr_pages;
/*
* If a folio at the tail of the LRU is under writeback, there
* are three cases to consider.
*
* 1) If reclaim is encountering an excessive number
* of folios under writeback and this folio has both
* the writeback and reclaim flags set, then it
* indicates that folios are being queued for I/O but
* are being recycled through the LRU before the I/O
* can complete. Waiting on the folio itself risks an
* indefinite stall if it is impossible to writeback
* the folio due to I/O error or disconnected storage
* so instead note that the LRU is being scanned too
* quickly and the caller can stall after the folio
* list has been processed.
*
* 2) Global or new memcg reclaim encounters a folio that is
* not marked for immediate reclaim, or the caller does not
* have __GFP_FS (or __GFP_IO if it's simply going to swap,
* not to fs). In this case mark the folio for immediate
* reclaim and continue scanning.
*
* Require may_enter_fs() because we would wait on fs, which
* may not have submitted I/O yet. And the loop driver might
* enter reclaim, and deadlock if it waits on a folio for
* which it is needed to do the write (loop masks off
* __GFP_IO|__GFP_FS for this reason); but more thought
* would probably show more reasons.
*
* 3) Legacy memcg encounters a folio that already has the
* reclaim flag set. memcg does not have any dirty folio
* throttling so we could easily OOM just because too many
* folios are in writeback and there is nothing else to
* reclaim. Wait for the writeback to complete.
*
* In cases 1) and 2) we activate the folios to get them out of
* the way while we continue scanning for clean folios on the
* inactive list and refilling from the active list. The
* observation here is that waiting for disk writes is more
* expensive than potentially causing reloads down the line.
* Since they're marked for immediate reclaim, they won't put
* memory pressure on the cache working set any longer than it
* takes to write them to disk.
*/
if (folio_test_writeback(folio)) {
/* Case 1 above */
if (current_is_kswapd() &&
folio_test_reclaim(folio) &&
test_bit(PGDAT_WRITEBACK, &pgdat->flags)) {
stat->nr_immediate += nr_pages;
goto activate_locked;
/* Case 2 above */
} else if (writeback_throttling_sane(sc) ||
!folio_test_reclaim(folio) ||
!may_enter_fs(folio, sc->gfp_mask)) {
/*
* This is slightly racy -
* folio_end_writeback() might have
* just cleared the reclaim flag, then
* setting the reclaim flag here ends up
* interpreted as the readahead flag - but
* that does not matter enough to care.
* What we do want is for this folio to
* have the reclaim flag set next time
* memcg reclaim reaches the tests above,
* so it will then wait for writeback to
* avoid OOM; and it's also appropriate
* in global reclaim.
*/
folio_set_reclaim(folio);
stat->nr_writeback += nr_pages;
goto activate_locked;
/* Case 3 above */
} else {
folio_unlock(folio);
folio_wait_writeback(folio);
/* then go back and try same folio again */
list_add_tail(&folio->lru, folio_list);
continue;
}
}
if (!ignore_references)
references = folio_check_references(folio, sc);
switch (references) {
case FOLIOREF_ACTIVATE:
goto activate_locked;
case FOLIOREF_KEEP:
stat->nr_ref_keep += nr_pages;
goto keep_locked;
case FOLIOREF_RECLAIM:
case FOLIOREF_RECLAIM_CLEAN:
; /* try to reclaim the folio below */
}
/*
* Before reclaiming the folio, try to relocate
* its contents to another node.
*/
if (do_demote_pass &&
(thp_migration_supported() || !folio_test_large(folio))) {
list_add(&folio->lru, &demote_folios);
folio_unlock(folio);
continue;
}
/*
* Anonymous process memory has backing store?
* Try to allocate it some swap space here.
* Lazyfree folio could be freed directly
*/
if (folio_test_anon(folio) && folio_test_swapbacked(folio)) {
if (!folio_test_swapcache(folio)) {
if (!(sc->gfp_mask & __GFP_IO))
goto keep_locked;
if (folio_maybe_dma_pinned(folio))
goto keep_locked;
if (folio_test_large(folio)) {
/* cannot split folio, skip it */
if (!can_split_folio(folio, NULL))
goto activate_locked;
/*
* Split partially mapped folios right away.
* We can free the unmapped pages without IO.
*/
if (data_race(!list_empty(&folio->_deferred_list)) &&
split_folio_to_list(folio, folio_list))
goto activate_locked;
}
if (!add_to_swap(folio)) {
int __maybe_unused order = folio_order(folio);
if (!folio_test_large(folio))
goto activate_locked_split;
/* Fallback to swap normal pages */
if (split_folio_to_list(folio, folio_list))
goto activate_locked;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (nr_pages >= HPAGE_PMD_NR) {
count_memcg_folio_events(folio,
THP_SWPOUT_FALLBACK, 1);
count_vm_event(THP_SWPOUT_FALLBACK);
}
count_mthp_stat(order, MTHP_STAT_SWPOUT_FALLBACK);
#endif
if (!add_to_swap(folio))
goto activate_locked_split;
}
}
} else if (folio_test_swapbacked(folio) &&
folio_test_large(folio)) {
/* Split shmem folio */
if (split_folio_to_list(folio, folio_list))
goto keep_locked;
}
/*
* If the folio was split above, the tail pages will make
* their own pass through this function and be accounted
* then.
*/
if ((nr_pages > 1) && !folio_test_large(folio)) {
sc->nr_scanned -= (nr_pages - 1);
nr_pages = 1;
}
/*
* The folio is mapped into the page tables of one or more
* processes. Try to unmap it here.
*/
if (folio_mapped(folio)) {
enum ttu_flags flags = TTU_BATCH_FLUSH;
bool was_swapbacked = folio_test_swapbacked(folio);
if (folio_test_pmd_mappable(folio))
flags |= TTU_SPLIT_HUGE_PMD;
/*
* Without TTU_SYNC, try_to_unmap will only begin to
* hold PTL from the first present PTE within a large
* folio. Some initial PTEs might be skipped due to
* races with parallel PTE writes in which PTEs can be
* cleared temporarily before being written new present
* values. This will lead to a large folio is still
* mapped while some subpages have been partially
* unmapped after try_to_unmap; TTU_SYNC helps
* try_to_unmap acquire PTL from the first PTE,
* eliminating the influence of temporary PTE values.
*/
if (folio_test_large(folio))
flags |= TTU_SYNC;
try_to_unmap(folio, flags);
if (folio_mapped(folio)) {
stat->nr_unmap_fail += nr_pages;
if (!was_swapbacked &&
folio_test_swapbacked(folio))
stat->nr_lazyfree_fail += nr_pages;
goto activate_locked;
}
}
/*
* Folio is unmapped now so it cannot be newly pinned anymore.
* No point in trying to reclaim folio if it is pinned.
* Furthermore we don't want to reclaim underlying fs metadata
* if the folio is pinned and thus potentially modified by the
* pinning process as that may upset the filesystem.
*/
if (folio_maybe_dma_pinned(folio))
goto activate_locked;
mapping = folio_mapping(folio);
if (folio_test_dirty(folio)) {
/*
* Only kswapd can writeback filesystem folios
* to avoid risk of stack overflow. But avoid
* injecting inefficient single-folio I/O into
* flusher writeback as much as possible: only
* write folios when we've encountered many
* dirty folios, and when we've already scanned
* the rest of the LRU for clean folios and see
* the same dirty folios again (with the reclaim
* flag set).
*/
if (folio_is_file_lru(folio) &&
(!current_is_kswapd() ||
!folio_test_reclaim(folio) ||
!test_bit(PGDAT_DIRTY, &pgdat->flags))) {
/*
* Immediately reclaim when written back.
* Similar in principle to folio_deactivate()
* except we already have the folio isolated
* and know it's dirty
*/
node_stat_mod_folio(folio, NR_VMSCAN_IMMEDIATE,
nr_pages);
folio_set_reclaim(folio);
goto activate_locked;
}
if (references == FOLIOREF_RECLAIM_CLEAN)
goto keep_locked;
if (!may_enter_fs(folio, sc->gfp_mask))
goto keep_locked;
if (!sc->may_writepage)
goto keep_locked;
/*
* Folio is dirty. Flush the TLB if a writable entry
* potentially exists to avoid CPU writes after I/O
* starts and then write it out here.
*/
try_to_unmap_flush_dirty();
switch (pageout(folio, mapping, &plug)) {
case PAGE_KEEP:
goto keep_locked;
case PAGE_ACTIVATE:
goto activate_locked;
case PAGE_SUCCESS:
stat->nr_pageout += nr_pages;
if (folio_test_writeback(folio))
goto keep;
if (folio_test_dirty(folio))
goto keep;
/*
* A synchronous write - probably a ramdisk. Go
* ahead and try to reclaim the folio.
*/
if (!folio_trylock(folio))
goto keep;
if (folio_test_dirty(folio) ||
folio_test_writeback(folio))
goto keep_locked;
mapping = folio_mapping(folio);
fallthrough;
case PAGE_CLEAN:
; /* try to free the folio below */
}
}
/*
* If the folio has buffers, try to free the buffer
* mappings associated with this folio. If we succeed
* we try to free the folio as well.
*
* We do this even if the folio is dirty.
* filemap_release_folio() does not perform I/O, but it
* is possible for a folio to have the dirty flag set,
* but it is actually clean (all its buffers are clean).
* This happens if the buffers were written out directly,
* with submit_bh(). ext3 will do this, as well as
* the blockdev mapping. filemap_release_folio() will
* discover that cleanness and will drop the buffers
* and mark the folio clean - it can be freed.
*
* Rarely, folios can have buffers and no ->mapping.
* These are the folios which were not successfully
* invalidated in truncate_cleanup_folio(). We try to
* drop those buffers here and if that worked, and the
* folio is no longer mapped into process address space
* (refcount == 1) it can be freed. Otherwise, leave
* the folio on the LRU so it is swappable.
*/
if (folio_needs_release(folio)) {
if (!filemap_release_folio(folio, sc->gfp_mask))
goto activate_locked;
if (!mapping && folio_ref_count(folio) == 1) {
folio_unlock(folio);
if (folio_put_testzero(folio))
goto free_it;
else {
/*
* rare race with speculative reference.
* the speculative reference will free
* this folio shortly, so we may
* increment nr_reclaimed here (and
* leave it off the LRU).
*/
nr_reclaimed += nr_pages;
continue;
}
}
}
if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) {
/* follow __remove_mapping for reference */
if (!folio_ref_freeze(folio, 1))
goto keep_locked;
/*
* The folio has only one reference left, which is
* from the isolation. After the caller puts the
* folio back on the lru and drops the reference, the
* folio will be freed anyway. It doesn't matter
* which lru it goes on. So we don't bother checking
* the dirty flag here.
*/
count_vm_events(PGLAZYFREED, nr_pages);
count_memcg_folio_events(folio, PGLAZYFREED, nr_pages);
} else if (!mapping || !__remove_mapping(mapping, folio, true,
sc->target_mem_cgroup))
goto keep_locked;
folio_unlock(folio);
free_it:
/*
* Folio may get swapped out as a whole, need to account
* all pages in it.
*/
nr_reclaimed += nr_pages;
folio_undo_large_rmappable(folio);
if (folio_batch_add(&free_folios, folio) == 0) {
mem_cgroup_uncharge_folios(&free_folios);
try_to_unmap_flush();
free_unref_folios(&free_folios);
}
continue;
activate_locked_split:
/*
* The tail pages that are failed to add into swap cache
* reach here. Fixup nr_scanned and nr_pages.
*/
if (nr_pages > 1) {
sc->nr_scanned -= (nr_pages - 1);
nr_pages = 1;
}
activate_locked:
/* Not a candidate for swapping, so reclaim swap space. */
if (folio_test_swapcache(folio) &&
(mem_cgroup_swap_full(folio) || folio_test_mlocked(folio)))
folio_free_swap(folio);
VM_BUG_ON_FOLIO(folio_test_active(folio), folio);
if (!folio_test_mlocked(folio)) {
int type = folio_is_file_lru(folio);
folio_set_active(folio);
stat->nr_activate[type] += nr_pages;
count_memcg_folio_events(folio, PGACTIVATE, nr_pages);
}
keep_locked:
folio_unlock(folio);
keep:
list_add(&folio->lru, &ret_folios);
VM_BUG_ON_FOLIO(folio_test_lru(folio) ||
folio_test_unevictable(folio), folio);
}
/* 'folio_list' is always empty here */
/* Migrate folios selected for demotion */
nr_reclaimed += demote_folio_list(&demote_folios, pgdat);
/* Folios that could not be demoted are still in @demote_folios */
if (!list_empty(&demote_folios)) {
/* Folios which weren't demoted go back on @folio_list */
list_splice_init(&demote_folios, folio_list);
/*
* goto retry to reclaim the undemoted folios in folio_list if
* desired.
*
* Reclaiming directly from top tier nodes is not often desired
* due to it breaking the LRU ordering: in general memory
* should be reclaimed from lower tier nodes and demoted from
* top tier nodes.
*
* However, disabling reclaim from top tier nodes entirely
* would cause ooms in edge scenarios where lower tier memory
* is unreclaimable for whatever reason, eg memory being
* mlocked or too hot to reclaim. We can disable reclaim
* from top tier nodes in proactive reclaim though as that is
* not real memory pressure.
*/
if (!sc->proactive) {
do_demote_pass = false;
goto retry;
}
}
pgactivate = stat->nr_activate[0] + stat->nr_activate[1];
mem_cgroup_uncharge_folios(&free_folios);
try_to_unmap_flush();
free_unref_folios(&free_folios);
list_splice(&ret_folios, folio_list);
count_vm_events(PGACTIVATE, pgactivate);
if (plug)
swap_write_unplug(plug);
return nr_reclaimed;
}
unsigned int reclaim_clean_pages_from_list(struct zone *zone,
struct list_head *folio_list)
{
struct scan_control sc = {
.gfp_mask = GFP_KERNEL,
.may_unmap = 1,
};
struct reclaim_stat stat;
unsigned int nr_reclaimed;
struct folio *folio, *next;
LIST_HEAD(clean_folios);
unsigned int noreclaim_flag;
list_for_each_entry_safe(folio, next, folio_list, lru) {
if (!folio_test_hugetlb(folio) && folio_is_file_lru(folio) &&
!folio_test_dirty(folio) && !__folio_test_movable(folio) &&
!folio_test_unevictable(folio)) {
folio_clear_active(folio);
list_move(&folio->lru, &clean_folios);
}
}
/*
* We should be safe here since we are only dealing with file pages and
* we are not kswapd and therefore cannot write dirty file pages. But
* call memalloc_noreclaim_save() anyway, just in case these conditions
* change in the future.
*/
noreclaim_flag = memalloc_noreclaim_save();
nr_reclaimed = shrink_folio_list(&clean_folios, zone->zone_pgdat, &sc,
&stat, true);
memalloc_noreclaim_restore(noreclaim_flag);
list_splice(&clean_folios, folio_list);
mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE,
-(long)nr_reclaimed);
/*
* Since lazyfree pages are isolated from file LRU from the beginning,
* they will rotate back to anonymous LRU in the end if it failed to
* discard so isolated count will be mismatched.
* Compensate the isolated count for both LRU lists.
*/
mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON,
stat.nr_lazyfree_fail);
mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE,
-(long)stat.nr_lazyfree_fail);
return nr_reclaimed;
}
/*
* Update LRU sizes after isolating pages. The LRU size updates must
* be complete before mem_cgroup_update_lru_size due to a sanity check.
*/
static __always_inline void update_lru_sizes(struct lruvec *lruvec,
enum lru_list lru, unsigned long *nr_zone_taken)
{
int zid;
for (zid = 0; zid < MAX_NR_ZONES; zid++) {
if (!nr_zone_taken[zid])
continue;
update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
}
}
#ifdef CONFIG_CMA
/*
* It is waste of effort to scan and reclaim CMA pages if it is not available
* for current allocation context. Kswapd can not be enrolled as it can not
* distinguish this scenario by using sc->gfp_mask = GFP_KERNEL
*/
static bool skip_cma(struct folio *folio, struct scan_control *sc)
{
return !current_is_kswapd() &&
gfp_migratetype(sc->gfp_mask) != MIGRATE_MOVABLE &&
folio_migratetype(folio) == MIGRATE_CMA;
}
#else
static bool skip_cma(struct folio *folio, struct scan_control *sc)
{
return false;
}
#endif
/*
* Isolating page from the lruvec to fill in @dst list by nr_to_scan times.
*
* lruvec->lru_lock is heavily contended. Some of the functions that
* shrink the lists perform better by taking out a batch of pages
* and working on them outside the LRU lock.
*
* For pagecache intensive workloads, this function is the hottest
* spot in the kernel (apart from copy_*_user functions).
*
* Lru_lock must be held before calling this function.
*
* @nr_to_scan: The number of eligible pages to look through on the list.
* @lruvec: The LRU vector to pull pages from.
* @dst: The temp list to put pages on to.
* @nr_scanned: The number of pages that were scanned.
* @sc: The scan_control struct for this reclaim session
* @lru: LRU list id for isolating
*
* returns how many pages were moved onto *@dst.
*/
static unsigned long isolate_lru_folios(unsigned long nr_to_scan,
struct lruvec *lruvec, struct list_head *dst,
unsigned long *nr_scanned, struct scan_control *sc,
enum lru_list lru)
{
struct list_head *src = &lruvec->lists[lru];
unsigned long nr_taken = 0;
unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 };
unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
unsigned long skipped = 0;
unsigned long scan, total_scan, nr_pages;
LIST_HEAD(folios_skipped);
total_scan = 0;
scan = 0;
while (scan < nr_to_scan && !list_empty(src)) {
struct list_head *move_to = src;
struct folio *folio;
folio = lru_to_folio(src);
prefetchw_prev_lru_folio(folio, src, flags);
nr_pages = folio_nr_pages(folio);
total_scan += nr_pages;
if (folio_zonenum(folio) > sc->reclaim_idx ||
skip_cma(folio, sc)) {
nr_skipped[folio_zonenum(folio)] += nr_pages;
move_to = &folios_skipped;
goto move;
}
/*
* Do not count skipped folios because that makes the function
* return with no isolated folios if the LRU mostly contains
* ineligible folios. This causes the VM to not reclaim any
* folios, triggering a premature OOM.
* Account all pages in a folio.
*/
scan += nr_pages;
if (!folio_test_lru(folio))
goto move;
if (!sc->may_unmap && folio_mapped(folio))
goto move;
/*
* Be careful not to clear the lru flag until after we're
* sure the folio is not being freed elsewhere -- the
* folio release code relies on it.
*/
if (unlikely(!folio_try_get(folio)))
goto move;
if (!folio_test_clear_lru(folio)) {
/* Another thread is already isolating this folio */
folio_put(folio);
goto move;
}
nr_taken += nr_pages;
nr_zone_taken[folio_zonenum(folio)] += nr_pages;
move_to = dst;
move:
list_move(&folio->lru, move_to);
}
/*
* Splice any skipped folios to the start of the LRU list. Note that
* this disrupts the LRU order when reclaiming for lower zones but
* we cannot splice to the tail. If we did then the SWAP_CLUSTER_MAX
* scanning would soon rescan the same folios to skip and waste lots
* of cpu cycles.
*/
if (!list_empty(&folios_skipped)) {
int zid;
list_splice(&folios_skipped, src);
for (zid = 0; zid < MAX_NR_ZONES; zid++) {
if (!nr_skipped[zid])
continue;
__count_zid_vm_events(PGSCAN_SKIP, zid, nr_skipped[zid]);
skipped += nr_skipped[zid];
}
}
*nr_scanned = total_scan;
trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan,
total_scan, skipped, nr_taken, lru);
update_lru_sizes(lruvec, lru, nr_zone_taken);
return nr_taken;
}
/**
* folio_isolate_lru() - Try to isolate a folio from its LRU list.
* @folio: Folio to isolate from its LRU list.
*
* Isolate a @folio from an LRU list and adjust the vmstat statistic
* corresponding to whatever LRU list the folio was on.
*
* The folio will have its LRU flag cleared. If it was found on the
* active list, it will have the Active flag set. If it was found on the
* unevictable list, it will have the Unevictable flag set. These flags
* may need to be cleared by the caller before letting the page go.
*
* Context:
*
* (1) Must be called with an elevated refcount on the folio. This is a
* fundamental difference from isolate_lru_folios() (which is called
* without a stable reference).
* (2) The lru_lock must not be held.
* (3) Interrupts must be enabled.
*
* Return: true if the folio was removed from an LRU list.
* false if the folio was not on an LRU list.
*/
bool folio_isolate_lru(struct folio *folio)
{
bool ret = false;
VM_BUG_ON_FOLIO(!folio_ref_count(folio), folio);
if (folio_test_clear_lru(folio)) {
struct lruvec *lruvec;
folio_get(folio);
lruvec = folio_lruvec_lock_irq(folio);
lruvec_del_folio(lruvec, folio);
unlock_page_lruvec_irq(lruvec);
ret = true;
}
return ret;
}
/*
* A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
* then get rescheduled. When there are massive number of tasks doing page
* allocation, such sleeping direct reclaimers may keep piling up on each CPU,
* the LRU list will go small and be scanned faster than necessary, leading to
* unnecessary swapping, thrashing and OOM.
*/
static bool too_many_isolated(struct pglist_data *pgdat, int file,
struct scan_control *sc)
{
unsigned long inactive, isolated;
bool too_many;
if (current_is_kswapd())
return false;
if (!writeback_throttling_sane(sc))
return false;
if (file) {
inactive = node_page_state(pgdat, NR_INACTIVE_FILE);
isolated = node_page_state(pgdat, NR_ISOLATED_FILE);
} else {
inactive = node_page_state(pgdat, NR_INACTIVE_ANON);
isolated = node_page_state(pgdat, NR_ISOLATED_ANON);
}
/*
* GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they
* won't get blocked by normal direct-reclaimers, forming a circular
* deadlock.
*/
if (gfp_has_io_fs(sc->gfp_mask))
inactive >>= 3;
too_many = isolated > inactive;
/* Wake up tasks throttled due to too_many_isolated. */
if (!too_many)
wake_throttle_isolated(pgdat);
return too_many;
}
/*
* move_folios_to_lru() moves folios from private @list to appropriate LRU list.
*
* Returns the number of pages moved to the given lruvec.
*/
static unsigned int move_folios_to_lru(struct lruvec *lruvec,
struct list_head *list)
{
int nr_pages, nr_moved = 0;
struct folio_batch free_folios;
folio_batch_init(&free_folios);
while (!list_empty(list)) {
struct folio *folio = lru_to_folio(list);
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
list_del(&folio->lru);
if (unlikely(!folio_evictable(folio))) {
spin_unlock_irq(&lruvec->lru_lock);
folio_putback_lru(folio);
spin_lock_irq(&lruvec->lru_lock);
continue;
}
/*
* The folio_set_lru needs to be kept here for list integrity.
* Otherwise:
* #0 move_folios_to_lru #1 release_pages
* if (!folio_put_testzero())
* if (folio_put_testzero())
* !lru //skip lru_lock
* folio_set_lru()
* list_add(&folio->lru,)
* list_add(&folio->lru,)
*/
folio_set_lru(folio);
if (unlikely(folio_put_testzero(folio))) {
__folio_clear_lru_flags(folio);
folio_undo_large_rmappable(folio);
if (folio_batch_add(&free_folios, folio) == 0) {
spin_unlock_irq(&lruvec->lru_lock);
mem_cgroup_uncharge_folios(&free_folios);
free_unref_folios(&free_folios);
spin_lock_irq(&lruvec->lru_lock);
}
continue;
}
/*
* All pages were isolated from the same lruvec (and isolation
* inhibits memcg migration).
*/
VM_BUG_ON_FOLIO(!folio_matches_lruvec(folio, lruvec), folio);
lruvec_add_folio(lruvec, folio);
nr_pages = folio_nr_pages(folio);
nr_moved += nr_pages;
if (folio_test_active(folio))
workingset_age_nonresident(lruvec, nr_pages);
}
if (free_folios.nr) {
spin_unlock_irq(&lruvec->lru_lock);
mem_cgroup_uncharge_folios(&free_folios);
free_unref_folios(&free_folios);
spin_lock_irq(&lruvec->lru_lock);
}
return nr_moved;
}
/*
* If a kernel thread (such as nfsd for loop-back mounts) services a backing
* device by writing to the page cache it sets PF_LOCAL_THROTTLE. In this case
* we should not throttle. Otherwise it is safe to do so.
*/
static int current_may_throttle(void)
{
return !(current->flags & PF_LOCAL_THROTTLE);
}
/*
* shrink_inactive_list() is a helper for shrink_node(). It returns the number
* of reclaimed pages
*/
static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
struct lruvec *lruvec, struct scan_control *sc,
enum lru_list lru)
{
LIST_HEAD(folio_list);
unsigned long nr_scanned;
unsigned int nr_reclaimed = 0;
unsigned long nr_taken;
struct reclaim_stat stat;
bool file = is_file_lru(lru);
enum vm_event_item item;
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
bool stalled = false;
while (unlikely(too_many_isolated(pgdat, file, sc))) {
if (stalled)
return 0;
/* wait a bit for the reclaimer. */
stalled = true;
reclaim_throttle(pgdat, VMSCAN_THROTTLE_ISOLATED);
/* We are about to die and free our memory. Return now. */
if (fatal_signal_pending(current))
return SWAP_CLUSTER_MAX;
}
lru_add_drain();
spin_lock_irq(&lruvec->lru_lock);
nr_taken = isolate_lru_folios(nr_to_scan, lruvec, &folio_list,
&nr_scanned, sc, lru);
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
item = PGSCAN_KSWAPD + reclaimer_offset();
if (!cgroup_reclaim(sc))
__count_vm_events(item, nr_scanned);
__count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned);
__count_vm_events(PGSCAN_ANON + file, nr_scanned);
spin_unlock_irq(&lruvec->lru_lock);
if (nr_taken == 0)
return 0;
nr_reclaimed = shrink_folio_list(&folio_list, pgdat, sc, &stat, false);
spin_lock_irq(&lruvec->lru_lock);
move_folios_to_lru(lruvec, &folio_list);
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
item = PGSTEAL_KSWAPD + reclaimer_offset();
if (!cgroup_reclaim(sc))
__count_vm_events(item, nr_reclaimed);
__count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
__count_vm_events(PGSTEAL_ANON + file, nr_reclaimed);
spin_unlock_irq(&lruvec->lru_lock);
lru_note_cost(lruvec, file, stat.nr_pageout, nr_scanned - nr_reclaimed);
/*
* If dirty folios are scanned that are not queued for IO, it
* implies that flushers are not doing their job. This can
* happen when memory pressure pushes dirty folios to the end of
* the LRU before the dirty limits are breached and the dirty
* data has expired. It can also happen when the proportion of
* dirty folios grows not through writes but through memory
* pressure reclaiming all the clean cache. And in some cases,
* the flushers simply cannot keep up with the allocation
* rate. Nudge the flusher threads in case they are asleep.
*/
if (stat.nr_unqueued_dirty == nr_taken) {
wakeup_flusher_threads(WB_REASON_VMSCAN);
/*
* For cgroupv1 dirty throttling is achieved by waking up
* the kernel flusher here and later waiting on folios
* which are in writeback to finish (see shrink_folio_list()).
*
* Flusher may not be able to issue writeback quickly
* enough for cgroupv1 writeback throttling to work
* on a large system.
*/
if (!writeback_throttling_sane(sc))
reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK);
}
sc->nr.dirty += stat.nr_dirty;
sc->nr.congested += stat.nr_congested;
sc->nr.unqueued_dirty += stat.nr_unqueued_dirty;
sc->nr.writeback += stat.nr_writeback;
sc->nr.immediate += stat.nr_immediate;
sc->nr.taken += nr_taken;
if (file)
sc->nr.file_taken += nr_taken;
trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
nr_scanned, nr_reclaimed, &stat, sc->priority, file);
return nr_reclaimed;
}
/*
* shrink_active_list() moves folios from the active LRU to the inactive LRU.
*
* We move them the other way if the folio is referenced by one or more
* processes.
*
* If the folios are mostly unmapped, the processing is fast and it is
* appropriate to hold lru_lock across the whole operation. But if
* the folios are mapped, the processing is slow (folio_referenced()), so
* we should drop lru_lock around each folio. It's impossible to balance
* this, so instead we remove the folios from the LRU while processing them.
* It is safe to rely on the active flag against the non-LRU folios in here
* because nobody will play with that bit on a non-LRU folio.
*
* The downside is that we have to touch folio->_refcount against each folio.
* But we had to alter folio->flags anyway.
*/
static void shrink_active_list(unsigned long nr_to_scan,
struct lruvec *lruvec,
struct scan_control *sc,
enum lru_list lru)
{
unsigned long nr_taken;
unsigned long nr_scanned;
unsigned long vm_flags;
LIST_HEAD(l_hold); /* The folios which were snipped off */
LIST_HEAD(l_active);
LIST_HEAD(l_inactive);
unsigned nr_deactivate, nr_activate;
unsigned nr_rotated = 0;
bool file = is_file_lru(lru);
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
lru_add_drain();
spin_lock_irq(&lruvec->lru_lock);
nr_taken = isolate_lru_folios(nr_to_scan, lruvec, &l_hold,
&nr_scanned, sc, lru);
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
if (!cgroup_reclaim(sc))
__count_vm_events(PGREFILL, nr_scanned);
__count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned);
spin_unlock_irq(&lruvec->lru_lock);
while (!list_empty(&l_hold)) {
struct folio *folio;
cond_resched();
folio = lru_to_folio(&l_hold);
list_del(&folio->lru);
if (unlikely(!folio_evictable(folio))) {
folio_putback_lru(folio);
continue;
}
if (unlikely(buffer_heads_over_limit)) {
if (folio_needs_release(folio) &&
folio_trylock(folio)) {
filemap_release_folio(folio, 0);
folio_unlock(folio);
}
}
/* Referenced or rmap lock contention: rotate */
if (folio_referenced(folio, 0, sc->target_mem_cgroup,
&vm_flags) != 0) {
/*
* Identify referenced, file-backed active folios and
* give them one more trip around the active list. So
* that executable code get better chances to stay in
* memory under moderate memory pressure. Anon folios
* are not likely to be evicted by use-once streaming
* IO, plus JVM can create lots of anon VM_EXEC folios,
* so we ignore them here.
*/
if ((vm_flags & VM_EXEC) && folio_is_file_lru(folio)) {
nr_rotated += folio_nr_pages(folio);
list_add(&folio->lru, &l_active);
continue;
}
}
folio_clear_active(folio); /* we are de-activating */
folio_set_workingset(folio);
list_add(&folio->lru, &l_inactive);
}
/*
* Move folios back to the lru list.
*/
spin_lock_irq(&lruvec->lru_lock);
nr_activate = move_folios_to_lru(lruvec, &l_active);
nr_deactivate = move_folios_to_lru(lruvec, &l_inactive);
__count_vm_events(PGDEACTIVATE, nr_deactivate);
__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
spin_unlock_irq(&lruvec->lru_lock);
if (nr_rotated)
lru_note_cost(lruvec, file, 0, nr_rotated);
trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate,
nr_deactivate, nr_rotated, sc->priority, file);
}
static unsigned int reclaim_folio_list(struct list_head *folio_list,
struct pglist_data *pgdat)
{
struct reclaim_stat dummy_stat;
unsigned int nr_reclaimed;
struct folio *folio;
struct scan_control sc = {
.gfp_mask = GFP_KERNEL,
.may_writepage = 1,
.may_unmap = 1,
.may_swap = 1,
.no_demotion = 1,
};
nr_reclaimed = shrink_folio_list(folio_list, pgdat, &sc, &dummy_stat, true);
while (!list_empty(folio_list)) {
folio = lru_to_folio(folio_list);
list_del(&folio->lru);
folio_putback_lru(folio);
}
return nr_reclaimed;
}
unsigned long reclaim_pages(struct list_head *folio_list)
{
int nid;
unsigned int nr_reclaimed = 0;
LIST_HEAD(node_folio_list);
unsigned int noreclaim_flag;
if (list_empty(folio_list))
return nr_reclaimed;
noreclaim_flag = memalloc_noreclaim_save();
nid = folio_nid(lru_to_folio(folio_list));
do {
struct folio *folio = lru_to_folio(folio_list);
if (nid == folio_nid(folio)) {
folio_clear_active(folio);
list_move(&folio->lru, &node_folio_list);
continue;
}
nr_reclaimed += reclaim_folio_list(&node_folio_list, NODE_DATA(nid));
nid = folio_nid(lru_to_folio(folio_list));
} while (!list_empty(folio_list));
nr_reclaimed += reclaim_folio_list(&node_folio_list, NODE_DATA(nid));
memalloc_noreclaim_restore(noreclaim_flag);
return nr_reclaimed;
}
static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
struct lruvec *lruvec, struct scan_control *sc)
{
if (is_active_lru(lru)) {
if (sc->may_deactivate & (1 << is_file_lru(lru)))
shrink_active_list(nr_to_scan, lruvec, sc, lru);
else
sc->skipped_deactivate = 1;
return 0;
}
return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
}
/*
* The inactive anon list should be small enough that the VM never has
* to do too much work.
*
* The inactive file list should be small enough to leave most memory
* to the established workingset on the scan-resistant active list,
* but large enough to avoid thrashing the aggregate readahead window.
*
* Both inactive lists should also be large enough that each inactive
* folio has a chance to be referenced again before it is reclaimed.
*
* If that fails and refaulting is observed, the inactive list grows.
*
* The inactive_ratio is the target ratio of ACTIVE to INACTIVE folios
* on this LRU, maintained by the pageout code. An inactive_ratio
* of 3 means 3:1 or 25% of the folios are kept on the inactive list.
*
* total target max
* memory ratio inactive
* -------------------------------------
* 10MB 1 5MB
* 100MB 1 50MB
* 1GB 3 250MB
* 10GB 10 0.9GB
* 100GB 31 3GB
* 1TB 101 10GB
* 10TB 320 32GB
*/
static bool inactive_is_low(struct lruvec *lruvec, enum lru_list inactive_lru)
{
enum lru_list active_lru = inactive_lru + LRU_ACTIVE;
unsigned long inactive, active;
unsigned long inactive_ratio;
unsigned long gb;
inactive = lruvec_page_state(lruvec, NR_LRU_BASE + inactive_lru);
active = lruvec_page_state(lruvec, NR_LRU_BASE + active_lru);
gb = (inactive + active) >> (30 - PAGE_SHIFT);
if (gb)
inactive_ratio = int_sqrt(10 * gb);
else
inactive_ratio = 1;
return inactive * inactive_ratio < active;
}
enum scan_balance {
SCAN_EQUAL,
SCAN_FRACT,
SCAN_ANON,
SCAN_FILE,
};
static void prepare_scan_control(pg_data_t *pgdat, struct scan_control *sc)
{
unsigned long file;
struct lruvec *target_lruvec;
if (lru_gen_enabled())
return;
target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
/*
* Flush the memory cgroup stats, so that we read accurate per-memcg
* lruvec stats for heuristics.
*/
mem_cgroup_flush_stats(sc->target_mem_cgroup);
/*
* Determine the scan balance between anon and file LRUs.
*/
spin_lock_irq(&target_lruvec->lru_lock);
sc->anon_cost = target_lruvec->anon_cost;
sc->file_cost = target_lruvec->file_cost;
spin_unlock_irq(&target_lruvec->lru_lock);
/*
* Target desirable inactive:active list ratios for the anon
* and file LRU lists.
*/
if (!sc->force_deactivate) {
unsigned long refaults;
/*
* When refaults are being observed, it means a new
* workingset is being established. Deactivate to get
* rid of any stale active pages quickly.
*/
refaults = lruvec_page_state(target_lruvec,
WORKINGSET_ACTIVATE_ANON);
if (refaults != target_lruvec->refaults[WORKINGSET_ANON] ||
inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
sc->may_deactivate |= DEACTIVATE_ANON;
else
sc->may_deactivate &= ~DEACTIVATE_ANON;
refaults = lruvec_page_state(target_lruvec,
WORKINGSET_ACTIVATE_FILE);
if (refaults != target_lruvec->refaults[WORKINGSET_FILE] ||
inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
sc->may_deactivate |= DEACTIVATE_FILE;
else
sc->may_deactivate &= ~DEACTIVATE_FILE;
} else
sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
/*
* If we have plenty of inactive file pages that aren't
* thrashing, try to reclaim those first before touching
* anonymous pages.
*/
file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE) &&
!sc->no_cache_trim_mode)
sc->cache_trim_mode = 1;
else
sc->cache_trim_mode = 0;
/*
* Prevent the reclaimer from falling into the cache trap: as
* cache pages start out inactive, every cache fault will tip
* the scan balance towards the file LRU. And as the file LRU
* shrinks, so does the window for rotation from references.
* This means we have a runaway feedback loop where a tiny
* thrashing file LRU becomes infinitely more attractive than
* anon pages. Try to detect this based on file LRU size.
*/
if (!cgroup_reclaim(sc)) {
unsigned long total_high_wmark = 0;
unsigned long free, anon;
int z;
free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
file = node_page_state(pgdat, NR_ACTIVE_FILE) +
node_page_state(pgdat, NR_INACTIVE_FILE);
for (z = 0; z < MAX_NR_ZONES; z++) {
struct zone *zone = &pgdat->node_zones[z];
if (!managed_zone(zone))
continue;
total_high_wmark += high_wmark_pages(zone);
}
/*
* Consider anon: if that's low too, this isn't a
* runaway file reclaim problem, but rather just
* extreme pressure. Reclaim as per usual then.
*/
anon = node_page_state(pgdat, NR_INACTIVE_ANON);
sc->file_is_tiny =
file + free <= total_high_wmark &&
!(sc->may_deactivate & DEACTIVATE_ANON) &&
anon >> sc->priority;
}
}
/*
* Determine how aggressively the anon and file LRU lists should be
* scanned.
*
* nr[0] = anon inactive folios to scan; nr[1] = anon active folios to scan
* nr[2] = file inactive folios to scan; nr[3] = file active folios to scan
*/
static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
unsigned long *nr)
{
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
unsigned long anon_cost, file_cost, total_cost;
int swappiness = sc_swappiness(sc, memcg);
u64 fraction[ANON_AND_FILE];
u64 denominator = 0; /* gcc */
enum scan_balance scan_balance;
unsigned long ap, fp;
enum lru_list lru;
/* If we have no swap space, do not bother scanning anon folios. */
if (!sc->may_swap || !can_reclaim_anon_pages(memcg, pgdat->node_id, sc)) {
scan_balance = SCAN_FILE;
goto out;
}
/*
* Global reclaim will swap to prevent OOM even with no
* swappiness, but memcg users want to use this knob to
* disable swapping for individual groups completely when
* using the memory controller's swap limit feature would be
* too expensive.
*/
if (cgroup_reclaim(sc) && !swappiness) {
scan_balance = SCAN_FILE;
goto out;
}
/*
* Do not apply any pressure balancing cleverness when the
* system is close to OOM, scan both anon and file equally
* (unless the swappiness setting disagrees with swapping).
*/
if (!sc->priority && swappiness) {
scan_balance = SCAN_EQUAL;
goto out;
}
/*
* If the system is almost out of file pages, force-scan anon.
*/
if (sc->file_is_tiny) {
scan_balance = SCAN_ANON;
goto out;
}
/*
* If there is enough inactive page cache, we do not reclaim
* anything from the anonymous working right now.
*/
if (sc->cache_trim_mode) {
scan_balance = SCAN_FILE;
goto out;
}
scan_balance = SCAN_FRACT;
/*
* Calculate the pressure balance between anon and file pages.
*
* The amount of pressure we put on each LRU is inversely
* proportional to the cost of reclaiming each list, as
* determined by the share of pages that are refaulting, times
* the relative IO cost of bringing back a swapped out
* anonymous page vs reloading a filesystem page (swappiness).
*
* Although we limit that influence to ensure no list gets
* left behind completely: at least a third of the pressure is
* applied, before swappiness.
*
* With swappiness at 100, anon and file have equal IO cost.
*/
total_cost = sc->anon_cost + sc->file_cost;
anon_cost = total_cost + sc->anon_cost;
file_cost = total_cost + sc->file_cost;
total_cost = anon_cost + file_cost;
ap = swappiness * (total_cost + 1);
ap /= anon_cost + 1;
fp = (MAX_SWAPPINESS - swappiness) * (total_cost + 1);
fp /= file_cost + 1;
fraction[0] = ap;
fraction[1] = fp;
denominator = ap + fp;
out:
for_each_evictable_lru(lru) {
bool file = is_file_lru(lru);
unsigned long lruvec_size;
unsigned long low, min;
unsigned long scan;
lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
mem_cgroup_protection(sc->target_mem_cgroup, memcg,
&min, &low);
if (min || low) {
/*
* Scale a cgroup's reclaim pressure by proportioning
* its current usage to its memory.low or memory.min
* setting.
*
* This is important, as otherwise scanning aggression
* becomes extremely binary -- from nothing as we
* approach the memory protection threshold, to totally
* nominal as we exceed it. This results in requiring
* setting extremely liberal protection thresholds. It
* also means we simply get no protection at all if we
* set it too low, which is not ideal.
*
* If there is any protection in place, we reduce scan
* pressure by how much of the total memory used is
* within protection thresholds.
*
* There is one special case: in the first reclaim pass,
* we skip over all groups that are within their low
* protection. If that fails to reclaim enough pages to
* satisfy the reclaim goal, we come back and override
* the best-effort low protection. However, we still
* ideally want to honor how well-behaved groups are in
* that case instead of simply punishing them all
* equally. As such, we reclaim them based on how much
* memory they are using, reducing the scan pressure
* again by how much of the total memory used is under
* hard protection.
*/
unsigned long cgroup_size = mem_cgroup_size(memcg);
unsigned long protection;
/* memory.low scaling, make sure we retry before OOM */
if (!sc->memcg_low_reclaim && low > min) {
protection = low;
sc->memcg_low_skipped = 1;
} else {
protection = min;
}
/* Avoid TOCTOU with earlier protection check */
cgroup_size = max(cgroup_size, protection);
scan = lruvec_size - lruvec_size * protection /
(cgroup_size + 1);
/*
* Minimally target SWAP_CLUSTER_MAX pages to keep
* reclaim moving forwards, avoiding decrementing
* sc->priority further than desirable.
*/
scan = max(scan, SWAP_CLUSTER_MAX);
} else {
scan = lruvec_size;
}
scan >>= sc->priority;
/*
* If the cgroup's already been deleted, make sure to
* scrape out the remaining cache.
*/
if (!scan && !mem_cgroup_online(memcg))
scan = min(lruvec_size, SWAP_CLUSTER_MAX);
switch (scan_balance) {
case SCAN_EQUAL:
/* Scan lists relative to size */
break;
case SCAN_FRACT:
/*
* Scan types proportional to swappiness and
* their relative recent reclaim efficiency.
* Make sure we don't miss the last page on
* the offlined memory cgroups because of a
* round-off error.
*/
scan = mem_cgroup_online(memcg) ?
div64_u64(scan * fraction[file], denominator) :
DIV64_U64_ROUND_UP(scan * fraction[file],
denominator);
break;
case SCAN_FILE:
case SCAN_ANON:
/* Scan one type exclusively */
if ((scan_balance == SCAN_FILE) != file)
scan = 0;
break;
default:
/* Look ma, no brain */
BUG();
}
nr[lru] = scan;
}
}
/*
* Anonymous LRU management is a waste if there is
* ultimately no way to reclaim the memory.
*/
static bool can_age_anon_pages(struct pglist_data *pgdat,
struct scan_control *sc)
{
/* Aging the anon LRU is valuable if swap is present: */
if (total_swap_pages > 0)
return true;
/* Also valuable if anon pages can be demoted: */
return can_demote(pgdat->node_id, sc);
}
#ifdef CONFIG_LRU_GEN
#ifdef CONFIG_LRU_GEN_ENABLED
DEFINE_STATIC_KEY_ARRAY_TRUE(lru_gen_caps, NR_LRU_GEN_CAPS);
#define get_cap(cap) static_branch_likely(&lru_gen_caps[cap])
#else
DEFINE_STATIC_KEY_ARRAY_FALSE(lru_gen_caps, NR_LRU_GEN_CAPS);
#define get_cap(cap) static_branch_unlikely(&lru_gen_caps[cap])
#endif
static bool should_walk_mmu(void)
{
return arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK);
}
static bool should_clear_pmd_young(void)
{
return arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG);
}
/******************************************************************************
* shorthand helpers
******************************************************************************/
#define LRU_REFS_FLAGS (BIT(PG_referenced) | BIT(PG_workingset))
#define DEFINE_MAX_SEQ(lruvec) \
unsigned long max_seq = READ_ONCE((lruvec)->lrugen.max_seq)
#define DEFINE_MIN_SEQ(lruvec) \
unsigned long min_seq[ANON_AND_FILE] = { \
READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_ANON]), \
READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_FILE]), \
}
#define for_each_gen_type_zone(gen, type, zone) \
for ((gen) = 0; (gen) < MAX_NR_GENS; (gen)++) \
for ((type) = 0; (type) < ANON_AND_FILE; (type)++) \
for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++)
#define get_memcg_gen(seq) ((seq) % MEMCG_NR_GENS)
#define get_memcg_bin(bin) ((bin) % MEMCG_NR_BINS)
static struct lruvec *get_lruvec(struct mem_cgroup *memcg, int nid)
{
struct pglist_data *pgdat = NODE_DATA(nid);
#ifdef CONFIG_MEMCG
if (memcg) {
struct lruvec *lruvec = &memcg->nodeinfo[nid]->lruvec;
/* see the comment in mem_cgroup_lruvec() */
if (!lruvec->pgdat)
lruvec->pgdat = pgdat;
return lruvec;
}
#endif
VM_WARN_ON_ONCE(!mem_cgroup_disabled());
return &pgdat->__lruvec;
}
static int get_swappiness(struct lruvec *lruvec, struct scan_control *sc)
{
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
if (!sc->may_swap)
return 0;
if (!can_demote(pgdat->node_id, sc) &&
mem_cgroup_get_nr_swap_pages(memcg) < MIN_LRU_BATCH)
return 0;
return sc_swappiness(sc, memcg);
}
static int get_nr_gens(struct lruvec *lruvec, int type)
{
return lruvec->lrugen.max_seq - lruvec->lrugen.min_seq[type] + 1;
}
static bool __maybe_unused seq_is_valid(struct lruvec *lruvec)
{
/* see the comment on lru_gen_folio */
return get_nr_gens(lruvec, LRU_GEN_FILE) >= MIN_NR_GENS &&
get_nr_gens(lruvec, LRU_GEN_FILE) <= get_nr_gens(lruvec, LRU_GEN_ANON) &&
get_nr_gens(lruvec, LRU_GEN_ANON) <= MAX_NR_GENS;
}
/******************************************************************************
* Bloom filters
******************************************************************************/
/*
* Bloom filters with m=1<<15, k=2 and the false positive rates of ~1/5 when
* n=10,000 and ~1/2 when n=20,000, where, conventionally, m is the number of
* bits in a bitmap, k is the number of hash functions and n is the number of
* inserted items.
*
* Page table walkers use one of the two filters to reduce their search space.
* To get rid of non-leaf entries that no longer have enough leaf entries, the
* aging uses the double-buffering technique to flip to the other filter each
* time it produces a new generation. For non-leaf entries that have enough
* leaf entries, the aging carries them over to the next generation in
* walk_pmd_range(); the eviction also report them when walking the rmap
* in lru_gen_look_around().
*
* For future optimizations:
* 1. It's not necessary to keep both filters all the time. The spare one can be
* freed after the RCU grace period and reallocated if needed again.
* 2. And when reallocating, it's worth scaling its size according to the number
* of inserted entries in the other filter, to reduce the memory overhead on
* small systems and false positives on large systems.
* 3. Jenkins' hash function is an alternative to Knuth's.
*/
#define BLOOM_FILTER_SHIFT 15
static inline int filter_gen_from_seq(unsigned long seq)
{
return seq % NR_BLOOM_FILTERS;
}
static void get_item_key(void *item, int *key)
{
u32 hash = hash_ptr(item, BLOOM_FILTER_SHIFT * 2);
BUILD_BUG_ON(BLOOM_FILTER_SHIFT * 2 > BITS_PER_TYPE(u32));
key[0] = hash & (BIT(BLOOM_FILTER_SHIFT) - 1);
key[1] = hash >> BLOOM_FILTER_SHIFT;
}
static bool test_bloom_filter(struct lru_gen_mm_state *mm_state, unsigned long seq,
void *item)
{
int key[2];
unsigned long *filter;
int gen = filter_gen_from_seq(seq);
filter = READ_ONCE(mm_state->filters[gen]);
if (!filter)
return true;
get_item_key(item, key);
return test_bit(key[0], filter) && test_bit(key[1], filter);
}
static void update_bloom_filter(struct lru_gen_mm_state *mm_state, unsigned long seq,
void *item)
{
int key[2];
unsigned long *filter;
int gen = filter_gen_from_seq(seq);
filter = READ_ONCE(mm_state->filters[gen]);
if (!filter)
return;
get_item_key(item, key);
if (!test_bit(key[0], filter))
set_bit(key[0], filter);
if (!test_bit(key[1], filter))
set_bit(key[1], filter);
}
static void reset_bloom_filter(struct lru_gen_mm_state *mm_state, unsigned long seq)
{
unsigned long *filter;
int gen = filter_gen_from_seq(seq);
filter = mm_state->filters[gen];
if (filter) {
bitmap_clear(filter, 0, BIT(BLOOM_FILTER_SHIFT));
return;
}
filter = bitmap_zalloc(BIT(BLOOM_FILTER_SHIFT),
__GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
WRITE_ONCE(mm_state->filters[gen], filter);
}
/******************************************************************************
* mm_struct list
******************************************************************************/
#ifdef CONFIG_LRU_GEN_WALKS_MMU
static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg)
{
static struct lru_gen_mm_list mm_list = {
.fifo = LIST_HEAD_INIT(mm_list.fifo),
.lock = __SPIN_LOCK_UNLOCKED(mm_list.lock),
};
#ifdef CONFIG_MEMCG
if (memcg)
return &memcg->mm_list;
#endif
VM_WARN_ON_ONCE(!mem_cgroup_disabled());
return &mm_list;
}
static struct lru_gen_mm_state *get_mm_state(struct lruvec *lruvec)
{
return &lruvec->mm_state;
}
static struct mm_struct *get_next_mm(struct lru_gen_mm_walk *walk)
{
int key;
struct mm_struct *mm;
struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
struct lru_gen_mm_state *mm_state = get_mm_state(walk->lruvec);
mm = list_entry(mm_state->head, struct mm_struct, lru_gen.list);
key = pgdat->node_id % BITS_PER_TYPE(mm->lru_gen.bitmap);
if (!walk->force_scan && !test_bit(key, &mm->lru_gen.bitmap))
return NULL;
clear_bit(key, &mm->lru_gen.bitmap);
return mmget_not_zero(mm) ? mm : NULL;
}
void lru_gen_add_mm(struct mm_struct *mm)
{
int nid;
struct mem_cgroup *memcg = get_mem_cgroup_from_mm(mm);
struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
VM_WARN_ON_ONCE(!list_empty(&mm->lru_gen.list));
#ifdef CONFIG_MEMCG
VM_WARN_ON_ONCE(mm->lru_gen.memcg);
mm->lru_gen.memcg = memcg;
#endif
spin_lock(&mm_list->lock);
for_each_node_state(nid, N_MEMORY) {
struct lruvec *lruvec = get_lruvec(memcg, nid);
struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
/* the first addition since the last iteration */
if (mm_state->tail == &mm_list->fifo)
mm_state->tail = &mm->lru_gen.list;
}
list_add_tail(&mm->lru_gen.list, &mm_list->fifo);
spin_unlock(&mm_list->lock);
}
void lru_gen_del_mm(struct mm_struct *mm)
{
int nid;
struct lru_gen_mm_list *mm_list;
struct mem_cgroup *memcg = NULL;
if (list_empty(&mm->lru_gen.list))
return;
#ifdef CONFIG_MEMCG
memcg = mm->lru_gen.memcg;
#endif
mm_list = get_mm_list(memcg);
spin_lock(&mm_list->lock);
for_each_node(nid) {
struct lruvec *lruvec = get_lruvec(memcg, nid);
struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
/* where the current iteration continues after */
if (mm_state->head == &mm->lru_gen.list)
mm_state->head = mm_state->head->prev;
/* where the last iteration ended before */
if (mm_state->tail == &mm->lru_gen.list)
mm_state->tail = mm_state->tail->next;
}
list_del_init(&mm->lru_gen.list);
spin_unlock(&mm_list->lock);
#ifdef CONFIG_MEMCG
mem_cgroup_put(mm->lru_gen.memcg);
mm->lru_gen.memcg = NULL;
#endif
}
#ifdef CONFIG_MEMCG
void lru_gen_migrate_mm(struct mm_struct *mm)
{
struct mem_cgroup *memcg;
struct task_struct *task = rcu_dereference_protected(mm->owner, true);
VM_WARN_ON_ONCE(task->mm != mm);
lockdep_assert_held(&task->alloc_lock);
/* for mm_update_next_owner() */
if (mem_cgroup_disabled())
return;
/* migration can happen before addition */
if (!mm->lru_gen.memcg)
return;
rcu_read_lock();
memcg = mem_cgroup_from_task(task);
rcu_read_unlock();
if (memcg == mm->lru_gen.memcg)
return;
VM_WARN_ON_ONCE(list_empty(&mm->lru_gen.list));
lru_gen_del_mm(mm);
lru_gen_add_mm(mm);
}
#endif
#else /* !CONFIG_LRU_GEN_WALKS_MMU */
static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg)
{
return NULL;
}
static struct lru_gen_mm_state *get_mm_state(struct lruvec *lruvec)
{
return NULL;
}
static struct mm_struct *get_next_mm(struct lru_gen_mm_walk *walk)
{
return NULL;
}
#endif
static void reset_mm_stats(struct lru_gen_mm_walk *walk, bool last)
{
int i;
int hist;
struct lruvec *lruvec = walk->lruvec;
struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
lockdep_assert_held(&get_mm_list(lruvec_memcg(lruvec))->lock);
hist = lru_hist_from_seq(walk->seq);
for (i = 0; i < NR_MM_STATS; i++) {
WRITE_ONCE(mm_state->stats[hist][i],
mm_state->stats[hist][i] + walk->mm_stats[i]);
walk->mm_stats[i] = 0;
}
if (NR_HIST_GENS > 1 && last) {
hist = lru_hist_from_seq(walk->seq + 1);
for (i = 0; i < NR_MM_STATS; i++)
WRITE_ONCE(mm_state->stats[hist][i], 0);
}
}
static bool iterate_mm_list(struct lru_gen_mm_walk *walk, struct mm_struct **iter)
{
bool first = false;
bool last = false;
struct mm_struct *mm = NULL;
struct lruvec *lruvec = walk->lruvec;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
/*
* mm_state->seq is incremented after each iteration of mm_list. There
* are three interesting cases for this page table walker:
* 1. It tries to start a new iteration with a stale max_seq: there is
* nothing left to do.
* 2. It started the next iteration: it needs to reset the Bloom filter
* so that a fresh set of PTE tables can be recorded.
* 3. It ended the current iteration: it needs to reset the mm stats
* counters and tell its caller to increment max_seq.
*/
spin_lock(&mm_list->lock);
VM_WARN_ON_ONCE(mm_state->seq + 1 < walk->seq);
if (walk->seq <= mm_state->seq)
goto done;
if (!mm_state->head)
mm_state->head = &mm_list->fifo;
if (mm_state->head == &mm_list->fifo)
first = true;
do {
mm_state->head = mm_state->head->next;
if (mm_state->head == &mm_list->fifo) {
WRITE_ONCE(mm_state->seq, mm_state->seq + 1);
last = true;
break;
}
/* force scan for those added after the last iteration */
if (!mm_state->tail || mm_state->tail == mm_state->head) {
mm_state->tail = mm_state->head->next;
walk->force_scan = true;
}
} while (!(mm = get_next_mm(walk)));
done:
if (*iter || last)
reset_mm_stats(walk, last);
spin_unlock(&mm_list->lock);
if (mm && first)
reset_bloom_filter(mm_state, walk->seq + 1);
if (*iter)
mmput_async(*iter);
*iter = mm;
return last;
}
static bool iterate_mm_list_nowalk(struct lruvec *lruvec, unsigned long seq)
{
bool success = false;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
spin_lock(&mm_list->lock);
VM_WARN_ON_ONCE(mm_state->seq + 1 < seq);
if (seq > mm_state->seq) {
mm_state->head = NULL;
mm_state->tail = NULL;
WRITE_ONCE(mm_state->seq, mm_state->seq + 1);
success = true;
}
spin_unlock(&mm_list->lock);
return success;
}
/******************************************************************************
* PID controller
******************************************************************************/
/*
* A feedback loop based on Proportional-Integral-Derivative (PID) controller.
*
* The P term is refaulted/(evicted+protected) from a tier in the generation
* currently being evicted; the I term is the exponential moving average of the
* P term over the generations previously evicted, using the smoothing factor
* 1/2; the D term isn't supported.
*
* The setpoint (SP) is always the first tier of one type; the process variable
* (PV) is either any tier of the other type or any other tier of the same
* type.
*
* The error is the difference between the SP and the PV; the correction is to
* turn off protection when SP>PV or turn on protection when SP<PV.
*
* For future optimizations:
* 1. The D term may discount the other two terms over time so that long-lived
* generations can resist stale information.
*/
struct ctrl_pos {
unsigned long refaulted;
unsigned long total;
int gain;
};
static void read_ctrl_pos(struct lruvec *lruvec, int type, int tier, int gain,
struct ctrl_pos *pos)
{
struct lru_gen_folio *lrugen = &lruvec->lrugen;
int hist = lru_hist_from_seq(lrugen->min_seq[type]);
pos->refaulted = lrugen->avg_refaulted[type][tier] +
atomic_long_read(&lrugen->refaulted[hist][type][tier]);
pos->total = lrugen->avg_total[type][tier] +
atomic_long_read(&lrugen->evicted[hist][type][tier]);
if (tier)
pos->total += lrugen->protected[hist][type][tier - 1];
pos->gain = gain;
}
static void reset_ctrl_pos(struct lruvec *lruvec, int type, bool carryover)
{
int hist, tier;
struct lru_gen_folio *lrugen = &lruvec->lrugen;
bool clear = carryover ? NR_HIST_GENS == 1 : NR_HIST_GENS > 1;
unsigned long seq = carryover ? lrugen->min_seq[type] : lrugen->max_seq + 1;
lockdep_assert_held(&lruvec->lru_lock);
if (!carryover && !clear)
return;
hist = lru_hist_from_seq(seq);
for (tier = 0; tier < MAX_NR_TIERS; tier++) {
if (carryover) {
unsigned long sum;
sum = lrugen->avg_refaulted[type][tier] +
atomic_long_read(&lrugen->refaulted[hist][type][tier]);
WRITE_ONCE(lrugen->avg_refaulted[type][tier], sum / 2);
sum = lrugen->avg_total[type][tier] +
atomic_long_read(&lrugen->evicted[hist][type][tier]);
if (tier)
sum += lrugen->protected[hist][type][tier - 1];
WRITE_ONCE(lrugen->avg_total[type][tier], sum / 2);
}
if (clear) {
atomic_long_set(&lrugen->refaulted[hist][type][tier], 0);
atomic_long_set(&lrugen->evicted[hist][type][tier], 0);
if (tier)
WRITE_ONCE(lrugen->protected[hist][type][tier - 1], 0);
}
}
}
static bool positive_ctrl_err(struct ctrl_pos *sp, struct ctrl_pos *pv)
{
/*
* Return true if the PV has a limited number of refaults or a lower
* refaulted/total than the SP.
*/
return pv->refaulted < MIN_LRU_BATCH ||
pv->refaulted * (sp->total + MIN_LRU_BATCH) * sp->gain <=
(sp->refaulted + 1) * pv->total * pv->gain;
}
/******************************************************************************
* the aging
******************************************************************************/
/* promote pages accessed through page tables */
static int folio_update_gen(struct folio *folio, int gen)
{
unsigned long new_flags, old_flags = READ_ONCE(folio->flags);
VM_WARN_ON_ONCE(gen >= MAX_NR_GENS);
VM_WARN_ON_ONCE(!rcu_read_lock_held());
do {
/* lru_gen_del_folio() has isolated this page? */
if (!(old_flags & LRU_GEN_MASK)) {
/* for shrink_folio_list() */
new_flags = old_flags | BIT(PG_referenced);
continue;
}
new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS);
new_flags |= (gen + 1UL) << LRU_GEN_PGOFF;
} while (!try_cmpxchg(&folio->flags, &old_flags, new_flags));
return ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
}
/* protect pages accessed multiple times through file descriptors */
static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
{
int type = folio_is_file_lru(folio);
struct lru_gen_folio *lrugen = &lruvec->lrugen;
int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]);
unsigned long new_flags, old_flags = READ_ONCE(folio->flags);
VM_WARN_ON_ONCE_FOLIO(!(old_flags & LRU_GEN_MASK), folio);
do {
new_gen = ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
/* folio_update_gen() has promoted this page? */
if (new_gen >= 0 && new_gen != old_gen)
return new_gen;
new_gen = (old_gen + 1) % MAX_NR_GENS;
new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS);
new_flags |= (new_gen + 1UL) << LRU_GEN_PGOFF;
/* for folio_end_writeback() */
if (reclaiming)
new_flags |= BIT(PG_reclaim);
} while (!try_cmpxchg(&folio->flags, &old_flags, new_flags));
lru_gen_update_size(lruvec, folio, old_gen, new_gen);
return new_gen;
}
static void update_batch_size(struct lru_gen_mm_walk *walk, struct folio *folio,
int old_gen, int new_gen)
{
int type = folio_is_file_lru(folio);
int zone = folio_zonenum(folio);
int delta = folio_nr_pages(folio);
VM_WARN_ON_ONCE(old_gen >= MAX_NR_GENS);
VM_WARN_ON_ONCE(new_gen >= MAX_NR_GENS);
walk->batched++;
walk->nr_pages[old_gen][type][zone] -= delta;
walk->nr_pages[new_gen][type][zone] += delta;
}
static void reset_batch_size(struct lru_gen_mm_walk *walk)
{
int gen, type, zone;
struct lruvec *lruvec = walk->lruvec;
struct lru_gen_folio *lrugen = &lruvec->lrugen;
walk->batched = 0;
for_each_gen_type_zone(gen, type, zone) {
enum lru_list lru = type * LRU_INACTIVE_FILE;
int delta = walk->nr_pages[gen][type][zone];
if (!delta)
continue;
walk->nr_pages[gen][type][zone] = 0;
WRITE_ONCE(lrugen->nr_pages[gen][type][zone],
lrugen->nr_pages[gen][type][zone] + delta);
if (lru_gen_is_active(lruvec, gen))
lru += LRU_ACTIVE;
__update_lru_size(lruvec, lru, zone, delta);
}
}
static int should_skip_vma(unsigned long start, unsigned long end, struct mm_walk *args)
{
struct address_space *mapping;
struct vm_area_struct *vma = args->vma;
struct lru_gen_mm_walk *walk = args->private;
if (!vma_is_accessible(vma))
return true;
if (is_vm_hugetlb_page(vma))
return true;
if (!vma_has_recency(vma))
return true;
if (vma->vm_flags & (VM_LOCKED | VM_SPECIAL))
return true;
if (vma == get_gate_vma(vma->vm_mm))
return true;
if (vma_is_anonymous(vma))
return !walk->can_swap;
if (WARN_ON_ONCE(!vma->vm_file || !vma->vm_file->f_mapping))
return true;
mapping = vma->vm_file->f_mapping;
if (mapping_unevictable(mapping))
return true;
if (shmem_mapping(mapping))
return !walk->can_swap;
/* to exclude special mappings like dax, etc. */
return !mapping->a_ops->read_folio;
}
/*
* Some userspace memory allocators map many single-page VMAs. Instead of
* returning back to the PGD table for each of such VMAs, finish an entire PMD
* table to reduce zigzags and improve cache performance.
*/
static bool get_next_vma(unsigned long mask, unsigned long size, struct mm_walk *args,
unsigned long *vm_start, unsigned long *vm_end)
{
unsigned long start = round_up(*vm_end, size);
unsigned long end = (start | ~mask) + 1;
VMA_ITERATOR(vmi, args->mm, start);
VM_WARN_ON_ONCE(mask & size);
VM_WARN_ON_ONCE((start & mask) != (*vm_start & mask));
for_each_vma(vmi, args->vma) {
if (end && end <= args->vma->vm_start)
return false;