diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 7ec0492da0de..e6a725564d91 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1348,7 +1348,7 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf) /* * See do_wp_page(): we can only reuse the folio exclusively if * there are no additional references. Note that we always drain - * the LRU pagevecs immediately after adding a THP. + * the LRU cache immediately after adding a THP. */ if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio) * folio_nr_pages(folio)) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 7892fa657c81..02804a44eec7 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1051,7 +1051,7 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm, if (pte) pte_unmap(pte); - /* Drain LRU add pagevec to remove extra pin on the swapped in pages */ + /* Drain LRU cache to remove extra pin on the swapped in pages */ if (swapped_in) lru_add_drain(); @@ -1969,7 +1969,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, result = SCAN_FAIL; goto xa_unlocked; } - /* drain pagevecs to help isolate_lru_page() */ + /* drain lru cache to help isolate_lru_page() */ lru_add_drain(); page = folio_file_page(folio, index); } else if (trylock_page(page)) { @@ -1985,7 +1985,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, page_cache_sync_readahead(mapping, &file->f_ra, file, index, end - index); - /* drain pagevecs to help isolate_lru_page() */ + /* drain lru cache to help isolate_lru_page() */ lru_add_drain(); page = find_lock_page(mapping, index); if (unlikely(page == NULL)) { diff --git a/mm/ksm.c b/mm/ksm.c index 272c4f51388b..647b2f594f3d 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -939,7 +939,7 @@ static int remove_stable_node(struct ksm_stable_node *stable_node) * The stable node did not yet appear stale to get_ksm_page(), * since that allows for an unmapped ksm page to be recognized * right up until it is freed; but the node is safe to remove. - * This page might be in a pagevec waiting to be freed, + * This page might be in an LRU cache waiting to be freed, * or it might be PageSwapCache (perhaps under writeback), * or it might have been removed from swapcache a moment ago. */ @@ -2310,8 +2310,8 @@ static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page) trace_ksm_start_scan(ksm_scan.seqnr, ksm_rmap_items); /* - * A number of pages can hang around indefinitely on per-cpu - * pagevecs, raised page count preventing write_protect_page + * A number of pages can hang around indefinitely in per-cpu + * LRU cache, raised page count preventing write_protect_page * from merging them. Though it doesn't really matter much, * it is puzzling to see some stuck in pages_volatile until * other activity jostles them out, and they also prevented diff --git a/mm/memory.c b/mm/memory.c index 2f5bd0286e09..d971d390299e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3401,8 +3401,8 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf) goto copy; if (!folio_test_lru(folio)) /* - * Note: We cannot easily detect+handle references from - * remote LRU pagevecs or references to LRU folios. + * We cannot easily detect+handle references from + * remote LRU caches or references to LRU folios. */ lru_add_drain(); if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio)) @@ -3895,7 +3895,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) * If we want to map a page that's in the swapcache writable, we * have to detect via the refcount if we're really the exclusive * owner. Try removing the extra reference from the local LRU - * pagevecs if required. + * caches if required. */ if ((vmf->flags & FAULT_FLAG_WRITE) && folio == swapcache && !folio_test_ksm(folio) && !folio_test_lru(folio)) diff --git a/mm/migrate_device.c b/mm/migrate_device.c index fad976ed7112..17aa57dfa32d 100644 --- a/mm/migrate_device.c +++ b/mm/migrate_device.c @@ -376,7 +376,7 @@ static unsigned long migrate_device_unmap(unsigned long *src_pfns, /* ZONE_DEVICE pages are not on LRU */ if (!is_zone_device_page(page)) { if (!PageLRU(page) && allow_drain) { - /* Drain CPU's pagevec */ + /* Drain CPU's lru cache */ lru_add_drain_all(); allow_drain = false; } diff --git a/mm/swap.c b/mm/swap.c index f2627d97a094..304830126f4d 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -76,7 +76,7 @@ static DEFINE_PER_CPU(struct cpu_fbatches, cpu_fbatches) = { /* * This path almost never happens for VM activity - pages are normally freed - * via pagevecs. But it gets used by networking - and for compound pages. + * in batches. But it gets used by networking - and for compound pages. */ static void __page_cache_release(struct folio *folio) { diff --git a/mm/truncate.c b/mm/truncate.c index dee9c86ee2c0..36e6c8691ead 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -564,7 +564,7 @@ EXPORT_SYMBOL(invalidate_mapping_pages); * refcount. We do this because invalidate_inode_pages2() needs stronger * invalidation guarantees, and cannot afford to leave pages behind because * shrink_page_list() has a temp ref on them, or because they're transiently - * sitting in the folio_add_lru() pagevecs. + * sitting in the folio_add_lru() caches. */ static int invalidate_complete_folio2(struct address_space *mapping, struct folio *folio)