ANDROID: implement wrapper for reverse migration

Reverse migration is used to do the balancing the occupancy of memory
zones in a node in the system whose imabalance may be caused by
migration of pages to other zones by an operation, eg: hotremove and
then hotadding the same memory. In this case there is a lot of free
memory in newly hotadd memory which can be filled up by the previous
migrated pages(as part of offline/hotremove) thus may free up some
pressure in other zones of the node.

Upstream discussion: https://lore.kernel.org/all/ee78c83d-da9b-f6d1-4f66-934b7782acfb@codeaurora.org/

Port to ACK6.6: Fold in following simple fixes from ACK6.1:

commit d0652b3a8559 ("ANDROID: inline isolate_and_split_free_page")
commit 8a98feb47ec0 ("ANDROID: mm: compaction: fix isolate_and_split_free_page() redefinition")

Port to ACK6.12: Make below changes to patch:

1. Move isolate_and_split_free_page() into mm/page_alloc to be able to
   make use of split_free_pages() [1]. The @list argument is also
   now an array of lists based on page order on account of switching to the
   split_free_pages API.

2. isolate_lru_page is also deprecated. Hence isolate_anon_lru_page() is
   now using folio_isolate_lru() which is the folio equivalent.

[1] https://lore.kernel.org/all/20240814035451.773331-1-yuzhao@google.com/T/#u

Bug: 201263307
Signed-off-by: Charan Teja Reddy <quic_charante@quicinc.com>
Signed-off-by: Sukadev Bhattiprolu <quic_sukadev@quicinc.com>
Change-Id: Ib3137dab0db66ecf6858c4077dcadb9dfd0c6b1c
This commit is contained in:
Charan Teja Reddy
2024-02-12 20:07:51 -08:00
committed by Treehugger Robot
parent dfc83778aa
commit 2145149a38
3 changed files with 56 additions and 0 deletions

View File

@@ -1622,6 +1622,18 @@ static inline struct pglist_data *NODE_DATA(int nid)
extern struct pglist_data *first_online_pgdat(void);
extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
extern struct zone *next_zone(struct zone *zone);
extern int isolate_anon_lru_page(struct page *page);
#ifdef CONFIG_COMPACTION
extern unsigned long isolate_and_split_free_page(struct page *page,
struct list_head *list);
#else
static inline unsigned long isolate_and_split_free_page(struct page *page,
struct list_head *list)
{
return 0;
}
#endif /* CONFIG_COMPACTION */
/**
* for_each_online_pgdat - helper macro to iterate over all online nodes

View File

@@ -174,6 +174,7 @@ void putback_movable_pages(struct list_head *l)
}
}
}
EXPORT_SYMBOL_GPL(putback_movable_pages);
/* Must be called with an elevated refcount on the non-hugetlb folio */
bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
@@ -2127,6 +2128,7 @@ out:
return rc_gather;
}
EXPORT_SYMBOL_GPL(migrate_pages);
struct folio *alloc_migration_target(struct folio *src, unsigned long private)
{

View File

@@ -452,6 +452,23 @@ unsigned long get_pfnblock_flags_mask(const struct page *page,
}
EXPORT_SYMBOL_GPL(get_pfnblock_flags_mask);
int isolate_anon_lru_page(struct page *page)
{
int ret;
if (!PageLRU(page) || !PageAnon(page))
return -EINVAL;
if (!get_page_unless_zero(page))
return -EINVAL;
ret = folio_isolate_lru(page_folio(page));
put_page(page);
return ret;
}
EXPORT_SYMBOL_GPL(isolate_anon_lru_page);
static __always_inline int get_pfnblock_migratetype(const struct page *page,
unsigned long pfn)
{
@@ -6919,6 +6936,31 @@ static void split_free_pages(struct list_head *list)
}
}
#ifdef CONFIG_COMPACTION
unsigned long isolate_and_split_free_page(struct page *page,
struct list_head *list)
{
unsigned long isolated;
unsigned int order;
if (!PageBuddy(page))
return 0;
order = buddy_order(page);
isolated = __isolate_free_page(page, order);
if (!isolated)
return 0;
set_page_private(page, order);
list_add(&page->lru, &list[order]);
split_free_pages(list);
return isolated;
}
EXPORT_SYMBOL_GPL(isolate_and_split_free_page);
#endif
/**
* alloc_contig_range() -- tries to allocate given range of pages
* @start: start PFN to allocate