Revert "BACKPORT: mm: move zone watermark accesses behind an accessor"

This reverts commit acfb1c608b.

Reason for revert: revert customized code
Bug: 140544941
Test: boot
Signed-off-by: Minchan Kim <minchan@google.com>
Signed-off-by: Martin Liu <liumartin@google.com>
Signed-off-by: Mark Salyzyn <salyzyn@google.com>
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Change-Id: I4988ddf1fc24579d4fc478de1de6e45b870f7bcd
This commit is contained in:
Mark Salyzyn 2020-04-10 17:14:09 -07:00 committed by Tao Huang
parent 240610c486
commit e77ca8f9c9
3 changed files with 11 additions and 12 deletions

View File

@ -273,10 +273,9 @@ enum zone_watermarks {
NR_WMARK
};
#define min_wmark_pages(z) (z->_watermark[WMARK_MIN])
#define low_wmark_pages(z) (z->_watermark[WMARK_LOW])
#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH])
#define wmark_pages(z, i) (z->_watermark[i])
#define min_wmark_pages(z) (z->watermark[WMARK_MIN])
#define low_wmark_pages(z) (z->watermark[WMARK_LOW])
#define high_wmark_pages(z) (z->watermark[WMARK_HIGH])
struct per_cpu_pages {
int count; /* number of pages in the list */
@ -367,7 +366,7 @@ struct zone {
/* Read-mostly fields */
/* zone watermarks, access with *_wmark_pages(zone) macros */
unsigned long _watermark[NR_WMARK];
unsigned long watermark[NR_WMARK];
unsigned long nr_reserved_highatomic;

View File

@ -1431,7 +1431,7 @@ static enum compact_result __compaction_suitable(struct zone *zone, int order,
if (is_via_compact_memory(order))
return COMPACT_CONTINUE;
watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
watermark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
/*
* If watermarks for high-order allocation are already met, there
* should be no need for compaction at all.

View File

@ -3537,7 +3537,7 @@ retry:
}
}
mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
if (!zone_watermark_fast(zone, order, mark,
ac_classzone_idx(ac), alloc_flags)) {
int ret;
@ -4971,7 +4971,7 @@ long si_mem_available(void)
pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
for_each_zone(zone)
wmark_low += low_wmark_pages(zone);
wmark_low += zone->watermark[WMARK_LOW];
/*
* Estimate the amount of memory available for userspace allocations,
@ -7545,13 +7545,13 @@ static void __setup_per_zone_wmarks(void)
min_pages = zone->managed_pages / 1024;
min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
zone->_watermark[WMARK_MIN] = min_pages;
zone->watermark[WMARK_MIN] = min_pages;
} else {
/*
* If it's a lowmem zone, reserve a number of pages
* proportionate to the zone's size.
*/
zone->_watermark[WMARK_MIN] = min;
zone->watermark[WMARK_MIN] = min;
}
/*
@ -7563,9 +7563,9 @@ static void __setup_per_zone_wmarks(void)
mult_frac(zone->managed_pages,
watermark_scale_factor, 10000));
zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) +
zone->watermark[WMARK_LOW] = min_wmark_pages(zone) +
low + min;
zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) +
zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) +
low + min * 2;
spin_unlock_irqrestore(&zone->lock, flags);