ANDROID: 16K: Ensure unmapped_area returns a __PAGE_ALIGNED address

In Linux based systems, the kernel page size directly impacts the minimum
unit at which applications interact with memory regions. By increasing the
page size, we effectively decrease the granularity of user-space operations
on memory.

Given this, a larger-page-size (__PAGE_SIZE) can be emulated for the purpose
of testing user-space compatibility.

To achieve this emulation, the strategy involves enforcing a consistent
__PAGE_SIZE-alignment  and __PAGE_SIZE-multiple-size for all memory regions.
This ensures that memory regions are structured and accessed in a way that
mirrors the behavior associated with a larger page-sized system.

Any operations conducted on these memory regions are similarly required to
interact specifically with __PAGE_SIZE-aligned and __PAGE_SIZE-multiple-sized
areas. This will be handled in subsequent patches.

Bug: 383389337
Bug: 315325080
Bug: 302403436
Change-Id: Ib9667d1df3e6ef90175b091c99a89c37ea734c79
Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
This commit is contained in:
Kalesh Singh
2024-02-06 22:21:33 -08:00
committed by Carlos Llamas
parent 796be8fd27
commit e7f83d4d4b
2 changed files with 32 additions and 4 deletions

View File

@@ -59,4 +59,31 @@ static __always_inline unsigned __page_shift(void)
#define __PAGE_ALIGNED(addr) (!__offset_in_page_log(addr))
/*
* Increases @size by an adequate amount to allow __PAGE_SIZE alignment
* by rounding up; given that @size is already a multiple of the
* base page size (PAGE_SIZE).
*
* Example:
* If __PAGE_SHIFT == PAGE_SHIFT == 12
* @size is increased by 0
* ((1 << (0)) - 1) << PAGE_SHIFT
* (1 ) - 1) << PAGE_SHIFT
* (0 ) << PAGE_SHIFT
*
* If __PAGE_SHIFT == 13 and PAGE_SHIFT == 12
* @size is increased by PAGE_SIZE (4KB):
* ((1 << (1)) - 1) << PAGE_SHIFT
* (2 ) - 1) << PAGE_SHIFT
* (1 ) << PAGE_SHIFT
* If __PAGE_SHIFT == 14 and PAGE_SHIFT == 12
* @size is increased by 3xPAGE_SIZE (12KB):
* ((1 << (2)) - 1) << PAGE_SHIFT
* (4 ) - 1) << PAGE_SHIFT
* (3 ) << PAGE_SHIFT
* ...
*/
#define __PAGE_SIZE_ROUND_UP_ADJ(size) \
((size) + (((1 << (__PAGE_SHIFT - PAGE_SHIFT)) - 1) << PAGE_SHIFT))
#endif /* __LINUX_PAGE_SIZE_COMPAT_H */

View File

@@ -17,6 +17,7 @@
#include <linux/shm.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
#include <linux/page_size_compat.h>
#include <linux/swap.h>
#include <linux/syscalls.h>
#include <linux/capability.h>
@@ -617,7 +618,7 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
VMA_ITERATOR(vmi, current->mm, 0);
/* Adjust search length to account for worst case alignment overhead */
length = info->length + info->align_mask + info->start_gap;
length = __PAGE_SIZE_ROUND_UP_ADJ(info->length + info->align_mask + info->start_gap);
if (length < info->length)
return -ENOMEM;
@@ -653,7 +654,7 @@ retry:
}
}
return gap;
return __PAGE_ALIGN(gap);
}
/**
@@ -674,7 +675,7 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
VMA_ITERATOR(vmi, current->mm, 0);
/* Adjust search length to account for worst case alignment overhead */
length = info->length + info->align_mask + info->start_gap;
length = __PAGE_SIZE_ROUND_UP_ADJ(info->length + info->align_mask + info->start_gap);
if (length < info->length)
return -ENOMEM;
@@ -705,7 +706,7 @@ retry:
}
}
return gap;
return __PAGE_ALIGN(gap);
}
/*