ANDROID: 16K: Emulate /proc/pid/pagemap

Collapse X kernelpage pagemap entries into a single emulated pagemap
entry.

PM_PFRAME_BITS are zeroed since there is no guarantee these are
contiguous in physical memory.

Swap related bits are also zeored since there is no guarantee that swap
offset have to be contiguous.

UFFD WP bit is zeroed as it is not supported for x86_64 16K [1].

All other bits are set if any of the subpages' bits are set.

Also introduce __pagemap_lseek(), to adjust the file offset to compensate
for the fact that userspace believes the page size is larger than it
actually is.

[1] https://r.android.com/3424862

Bug: 383389337
Bug: 385167611
Test: atest vts_ltp_test_x86_64:syscalls.msync04_64bit#syscalls.msync04_64bit
Test: atest vts_ltp_test_x86_64:syscalls.mmap12_64bit#syscalls.mmap12_64bit
Test: atest libmeminfo_test
Test: atest bionic-unit-tests:DlExtRelroSharingTest#VerifyMemorySaving
Note: bionic-unit-tests must be run as root (add require_root: true)
Change-Id: Ifc159f63f4b18dc43799b104d6be7d3dcb4fca49
Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
This commit is contained in:
Kalesh Singh
2025-01-23 23:18:35 +00:00
committed by Carlos Llamas
parent 3c9a39c770
commit 669f0c4355

View File

@@ -1936,6 +1936,30 @@ static const struct mm_walk_ops pagemap_ops = {
.walk_lock = PGWALK_RDLOCK,
};
static inline void __collapse_pagemap_result(pagemap_entry_t *src_vec,
pagemap_entry_t *res_vec,
unsigned int entries,
unsigned int nr_subpages)
{
unsigned int i;
if (nr_subpages == 1)
return;
for (i = 0; i < entries; i++) {
/*
* Zero the PFN since there is no guarantee that the PFNs are contiguous.
* Zero the flags - applicable flags are derived from the sub-entries,
* inapplicable flags are kept zeroed.
*/
if (i % nr_subpages == 0)
res_vec[i / nr_subpages] = make_pme(0, 0);
res_vec[i / nr_subpages].pme
|= src_vec[i].pme & (PM_SOFT_DIRTY|PM_MMAP_EXCLUSIVE|PM_FILE|PM_PRESENT);
}
}
/*
* /proc/pid/pagemap - an array mapping virtual pages to pfns
*
@@ -1974,6 +1998,8 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
unsigned long start_vaddr;
unsigned long end_vaddr;
int ret = 0, copied = 0;
unsigned int nr_subpages = __PAGE_SIZE / PAGE_SIZE;
pagemap_entry_t *res = NULL;
if (!mm || !mmget_not_zero(mm))
goto out;
@@ -1996,6 +2022,21 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
if (!pm.buffer)
goto out_mm;
if (unlikely(nr_subpages > 1)) {
/*
* Userspace thinks the pages are large than the actually are, adjust the count to
* compensate.
*/
count *= nr_subpages;
res = kcalloc(pm.len / nr_subpages, PM_ENTRY_BYTES, GFP_KERNEL);
if (!res) {
ret = -ENOMEM;
goto out_free;
}
} else
res = pm.buffer;
src = *ppos;
svpfn = src / PM_ENTRY_BYTES;
end_vaddr = mm->task_size;
@@ -2038,19 +2079,33 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
start_vaddr = end;
len = min(count, PM_ENTRY_BYTES * pm.pos);
if (copy_to_user(buf, pm.buffer, len)) {
__collapse_pagemap_result(pm.buffer, res, len / PM_ENTRY_BYTES, nr_subpages);
if (copy_to_user(buf, res, len / nr_subpages)) {
ret = -EFAULT;
goto out_free;
}
/*
* If emulating the page size, clear the old results, to avoid
* corrupting the next __collapse_pagemap_result()
*/
if (unlikely(nr_subpages > 1))
memset(res, 0, len / nr_subpages);
copied += len;
buf += len;
buf += len / nr_subpages;
count -= len;
}
*ppos += copied;
if (!ret || ret == PM_END_OF_BUFFER)
ret = copied;
ret = copied / nr_subpages;
out_free:
/* Avoid double free, as res = pm.buffer if nr_subpages == 1 */
if (unlikely(nr_subpages > 1))
kfree(res);
kfree(pm.buffer);
out_mm:
mmput(mm);
@@ -2841,8 +2896,27 @@ static long do_pagemap_cmd(struct file *file, unsigned int cmd,
}
}
loff_t __pagemap_lseek(struct file *file, loff_t offset, int orig)
{
unsigned long nr_subpages = __PAGE_SIZE / PAGE_SIZE;
loff_t ret;
/*
* Userspace thinks the pages are larger than they actually are, so adjust the
* offset to compensate.
*/
offset *= nr_subpages;
ret = mem_lseek(file, offset, orig); /* borrow this */
if (ret < 0)
return offset;
/* Re-adjust the offset to reflect the larger userspace page size. */
return ret / nr_subpages;
}
const struct file_operations proc_pagemap_operations = {
.llseek = mem_lseek, /* borrow this */
.llseek = __pagemap_lseek,
.read = pagemap_read,
.open = pagemap_open,
.release = pagemap_release,