ANDROID: vendor_hooks: add hooks to modify pageflags

These hooks are designed to set or clear OEM reserved pageflags when the memory state may change.

Bug: 336964184

Change-Id: I9cb288ef6eef7a719d4f4748d6b71010645b7d50
Signed-off-by: Xiaofeng Yuan <yuanxiaofeng@vivo.com>
This commit is contained in:
Xiaofeng Yuan
2024-04-28 11:03:29 +08:00
committed by Hao Chen
parent 22cf53077b
commit 0142de08c8
5 changed files with 30 additions and 0 deletions

View File

@@ -545,3 +545,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_oom_swapmem_gather_init);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_oom_swapmem_gather_finish);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_gfp_zone_flags);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_readahead_gfp_mask);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_shmem_swapin_folio);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_wp_page);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_swap_page);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_anonymous_page);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_uprobes_replace_page);

View File

@@ -497,6 +497,22 @@ DECLARE_HOOK(android_vh_oom_swapmem_gather_init,
DECLARE_HOOK(android_vh_oom_swapmem_gather_finish,
TP_PROTO(struct mm_struct *mm),
TP_ARGS(mm));
DECLARE_HOOK(android_vh_do_anonymous_page,
TP_PROTO(struct vm_area_struct *vma, struct folio *folio),
TP_ARGS(vma, folio));
DECLARE_HOOK(android_vh_do_swap_page,
TP_PROTO(struct folio *folio, pte_t *pte, struct vm_fault *vmf,
swp_entry_t entry),
TP_ARGS(folio, pte, vmf, entry));
DECLARE_HOOK(android_vh_do_wp_page,
TP_PROTO(struct folio *folio),
TP_ARGS(folio));
DECLARE_HOOK(android_vh_uprobes_replace_page,
TP_PROTO(struct folio *new_folio, struct folio *old_folio),
TP_ARGS(new_folio, old_folio));
DECLARE_HOOK(android_vh_shmem_swapin_folio,
TP_PROTO(struct folio *folio),
TP_ARGS(folio));
#endif /* _TRACE_HOOK_MM_H */
/* This part must be outside protection */

View File

@@ -29,6 +29,9 @@
#include <linux/uprobes.h>
#undef CREATE_TRACE_POINTS
#include <trace/hooks/mm.h>
#define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
#define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE
@@ -202,6 +205,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
folio_get(new_folio);
folio_add_new_anon_rmap(new_folio, vma, addr, RMAP_EXCLUSIVE);
folio_add_lru_vma(new_folio, vma);
trace_android_vh_uprobes_replace_page(new_folio, old_folio);
} else
/* no new page, just dec_mm_counter for old_page */
dec_mm_counter(mm, MM_ANONPAGES);

View File

@@ -3739,6 +3739,8 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
return wp_page_shared(vmf, folio);
}
trace_android_vh_do_wp_page(folio);
/*
* Private mapping: create an exclusive anonymous page copy if reuse
* is impossible. We might miss VM_WRITE for FOLL_FORCE handling.
@@ -4564,6 +4566,7 @@ check_folio:
pte = pte_mksoft_dirty(pte);
if (pte_swp_uffd_wp(vmf->orig_pte))
pte = pte_mkuffd_wp(pte);
trace_android_vh_do_swap_page(folio, &pte, vmf, entry);
/*
* Same logic as in do_wp_page(); however, optimize for pages that are
@@ -4828,6 +4831,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
*/
__folio_mark_uptodate(folio);
trace_android_vh_do_anonymous_page(vma, folio);
entry = mk_pte(&folio->page, vma->vm_page_prot);
entry = pte_sw_mkyoung(entry);
if (vma->vm_flags & VM_WRITE)

View File

@@ -2178,6 +2178,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
/* We have to do this with folio locked to prevent races */
folio_lock(folio);
trace_android_vh_shmem_swapin_folio(folio);
if (!folio_test_swapcache(folio) ||
folio->swap.val != swap.val ||
!shmem_confirm_swap(mapping, index, swap)) {