For CMA allocation, it's really critical to migrate a page but sometimes it fails. One of the reasons is some driver holds a page refcount for a long time so VM couldn't migrate the page at that time. The concern here is there is no way to find the who hold the refcount of the page effectively. This patch introduces feature to keep tracking page's pinner. All get_page sites are vulnerable to pin a page for a long time but the cost to keep track it would be significat since get_page is the most frequent kernel operation. Furthermore, the page could be not user page but kernel page which is not related to the page migration failure. Thus, this patch keeps tracks of only migration failed pages to reduce runtime cost. Once page migration fails in CMA allocation path, those pages are marked as "migration failure" and every put_page operation against those pages, callstack of the put are recorded into page_pinner buffer. Later, admin can see what pages were failed and who released the refcount since the failure. It really helps effectively to find out longtime refcount holder to prevent the page migration. note: page_pinner doesn't guarantee attributing/unattributing are atomic if they happen at the same time. It's just best effort so false-positive could happen. Fix build issue during rebase, replace page_mapping() with folio_mapping() and page_mapcount() with folio_mapcount(). Bug: 183414571 Bug: 240196534 Signed-off-by: Minchan Kim <minchan@kernel.org> Signed-off-by: Minchan Kim <minchan@google.com> Change-Id: I603d0c0122734c377db6b1eb95848a6f734173a0 (cherry picked from commit e6e6e1273db431fe03ae6b0bf48738970f6ccaea) Signed-off-by: Georgi Djakov <quic_c_gdjako@quicinc.com>
134 lines
3.0 KiB
C
134 lines
3.0 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __LINUX_PAGE_EXT_H
|
|
#define __LINUX_PAGE_EXT_H
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/stacktrace.h>
|
|
|
|
struct pglist_data;
|
|
|
|
#ifdef CONFIG_PAGE_EXTENSION
|
|
/**
|
|
* struct page_ext_operations - per page_ext client operations
|
|
* @offset: Offset to the client's data within page_ext. Offset is returned to
|
|
* the client by page_ext_init.
|
|
* @size: The size of the client data within page_ext.
|
|
* @need: Function that returns true if client requires page_ext.
|
|
* @init: (optional) Called to initialize client once page_exts are allocated.
|
|
* @need_shared_flags: True when client is using shared page_ext->flags
|
|
* field.
|
|
*
|
|
* Each Page Extension client must define page_ext_operations in
|
|
* page_ext_ops array.
|
|
*/
|
|
struct page_ext_operations {
|
|
size_t offset;
|
|
size_t size;
|
|
bool (*need)(void);
|
|
void (*init)(void);
|
|
bool need_shared_flags;
|
|
};
|
|
|
|
/*
|
|
* The page_ext_flags users must set need_shared_flags to true.
|
|
*/
|
|
enum page_ext_flags {
|
|
PAGE_EXT_OWNER,
|
|
PAGE_EXT_OWNER_ALLOCATED,
|
|
#if defined(CONFIG_PAGE_PINNER)
|
|
/* page migration failed */
|
|
PAGE_EXT_PINNER_MIGRATION_FAILED,
|
|
#endif
|
|
#if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
|
|
PAGE_EXT_YOUNG,
|
|
PAGE_EXT_IDLE,
|
|
#endif
|
|
};
|
|
|
|
/*
|
|
* Page Extension can be considered as an extended mem_map.
|
|
* A page_ext page is associated with every page descriptor. The
|
|
* page_ext helps us add more information about the page.
|
|
* All page_ext are allocated at boot or memory hotplug event,
|
|
* then the page_ext for pfn always exists.
|
|
*/
|
|
struct page_ext {
|
|
unsigned long flags;
|
|
};
|
|
|
|
extern bool early_page_ext;
|
|
extern unsigned long page_ext_size;
|
|
extern void pgdat_page_ext_init(struct pglist_data *pgdat);
|
|
|
|
static inline bool early_page_ext_enabled(void)
|
|
{
|
|
return early_page_ext;
|
|
}
|
|
|
|
#ifdef CONFIG_SPARSEMEM
|
|
static inline void page_ext_init_flatmem(void)
|
|
{
|
|
}
|
|
extern void page_ext_init(void);
|
|
static inline void page_ext_init_flatmem_late(void)
|
|
{
|
|
}
|
|
#else
|
|
extern void page_ext_init_flatmem(void);
|
|
extern void page_ext_init_flatmem_late(void);
|
|
static inline void page_ext_init(void)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
extern struct page_ext *page_ext_get(const struct page *page);
|
|
extern void page_ext_put(struct page_ext *page_ext);
|
|
|
|
static inline void *page_ext_data(struct page_ext *page_ext,
|
|
struct page_ext_operations *ops)
|
|
{
|
|
return (void *)(page_ext) + ops->offset;
|
|
}
|
|
|
|
static inline struct page_ext *page_ext_next(struct page_ext *curr)
|
|
{
|
|
void *next = curr;
|
|
next += page_ext_size;
|
|
return next;
|
|
}
|
|
|
|
#else /* !CONFIG_PAGE_EXTENSION */
|
|
struct page_ext;
|
|
|
|
static inline bool early_page_ext_enabled(void)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline void pgdat_page_ext_init(struct pglist_data *pgdat)
|
|
{
|
|
}
|
|
|
|
static inline void page_ext_init(void)
|
|
{
|
|
}
|
|
|
|
static inline void page_ext_init_flatmem_late(void)
|
|
{
|
|
}
|
|
|
|
static inline void page_ext_init_flatmem(void)
|
|
{
|
|
}
|
|
|
|
static inline struct page_ext *page_ext_get(const struct page *page)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static inline void page_ext_put(struct page_ext *page_ext)
|
|
{
|
|
}
|
|
#endif /* CONFIG_PAGE_EXTENSION */
|
|
#endif /* __LINUX_PAGE_EXT_H */
|