From e7f83d4d4babb97047876e58941ae2615d738fd3 Mon Sep 17 00:00:00 2001 From: Kalesh Singh Date: Tue, 6 Feb 2024 22:21:33 -0800 Subject: [PATCH] ANDROID: 16K: Ensure unmapped_area returns a __PAGE_ALIGNED address In Linux based systems, the kernel page size directly impacts the minimum unit at which applications interact with memory regions. By increasing the page size, we effectively decrease the granularity of user-space operations on memory. Given this, a larger-page-size (__PAGE_SIZE) can be emulated for the purpose of testing user-space compatibility. To achieve this emulation, the strategy involves enforcing a consistent __PAGE_SIZE-alignment and __PAGE_SIZE-multiple-size for all memory regions. This ensures that memory regions are structured and accessed in a way that mirrors the behavior associated with a larger page-sized system. Any operations conducted on these memory regions are similarly required to interact specifically with __PAGE_SIZE-aligned and __PAGE_SIZE-multiple-sized areas. This will be handled in subsequent patches. Bug: 383389337 Bug: 315325080 Bug: 302403436 Change-Id: Ib9667d1df3e6ef90175b091c99a89c37ea734c79 Signed-off-by: Kalesh Singh --- include/linux/page_size_compat.h | 27 +++++++++++++++++++++++++++ mm/mmap.c | 9 +++++---- 2 files changed, 32 insertions(+), 4 deletions(-) diff --git a/include/linux/page_size_compat.h b/include/linux/page_size_compat.h index 4059902922b7..17450746414a 100644 --- a/include/linux/page_size_compat.h +++ b/include/linux/page_size_compat.h @@ -59,4 +59,31 @@ static __always_inline unsigned __page_shift(void) #define __PAGE_ALIGNED(addr) (!__offset_in_page_log(addr)) +/* + * Increases @size by an adequate amount to allow __PAGE_SIZE alignment + * by rounding up; given that @size is already a multiple of the + * base page size (PAGE_SIZE). + * + * Example: + * If __PAGE_SHIFT == PAGE_SHIFT == 12 + * @size is increased by 0 + * ((1 << (0)) - 1) << PAGE_SHIFT + * (1 ) - 1) << PAGE_SHIFT + * (0 ) << PAGE_SHIFT + * + * If __PAGE_SHIFT == 13 and PAGE_SHIFT == 12 + * @size is increased by PAGE_SIZE (4KB): + * ((1 << (1)) - 1) << PAGE_SHIFT + * (2 ) - 1) << PAGE_SHIFT + * (1 ) << PAGE_SHIFT + * If __PAGE_SHIFT == 14 and PAGE_SHIFT == 12 + * @size is increased by 3xPAGE_SIZE (12KB): + * ((1 << (2)) - 1) << PAGE_SHIFT + * (4 ) - 1) << PAGE_SHIFT + * (3 ) << PAGE_SHIFT + * ... + */ +#define __PAGE_SIZE_ROUND_UP_ADJ(size) \ + ((size) + (((1 << (__PAGE_SHIFT - PAGE_SHIFT)) - 1) << PAGE_SHIFT)) + #endif /* __LINUX_PAGE_SIZE_COMPAT_H */ diff --git a/mm/mmap.c b/mm/mmap.c index 2f8383c61f5b..bc6948ca5f8b 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -617,7 +618,7 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info) VMA_ITERATOR(vmi, current->mm, 0); /* Adjust search length to account for worst case alignment overhead */ - length = info->length + info->align_mask + info->start_gap; + length = __PAGE_SIZE_ROUND_UP_ADJ(info->length + info->align_mask + info->start_gap); if (length < info->length) return -ENOMEM; @@ -653,7 +654,7 @@ retry: } } - return gap; + return __PAGE_ALIGN(gap); } /** @@ -674,7 +675,7 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) VMA_ITERATOR(vmi, current->mm, 0); /* Adjust search length to account for worst case alignment overhead */ - length = info->length + info->align_mask + info->start_gap; + length = __PAGE_SIZE_ROUND_UP_ADJ(info->length + info->align_mask + info->start_gap); if (length < info->length) return -ENOMEM; @@ -705,7 +706,7 @@ retry: } } - return gap; + return __PAGE_ALIGN(gap); } /*