iommufd/selftest: Fix iommufd_dirty_tracking with large hugepage sizes

commit 818625570558cd91082c9bafd6f2b59b73241a69 upstream.

The hugepage test cases of iommufd_dirty_tracking have the 64MB and 128MB
coverages. Both of them are smaller than the default hugepage size 512MB,
when CONFIG_PAGE_SIZE_64KB=y. However, these test cases have a variant of
using huge pages, which would mmap(MAP_HUGETLB) using these smaller sizes
than the system hugepag size. This results in the kernel aligning up the
smaller size to 512MB. If a memory was located between the upper 64/128MB
size boundary and the hugepage 512MB boundary, it would get wiped out:
https://lore.kernel.org/all/aEoUhPYIAizTLADq@nvidia.com/

Given that this aligning up behavior is well documented, we have no choice
but to allocate a hugepage aligned size to avoid this unintended wipe out.
Instead of relying on the kernel's internal force alignment, pass the same
size to posix_memalign() and map().

Also, fix the FIXTURE_TEARDOWN() misusing munmap() to free the memory from
posix_memalign(), as munmap() doesn't destroy the allocator meta data. So,
call free() instead.

Fixes: a9af47e382 ("iommufd/selftest: Test IOMMU_HWPT_GET_DIRTY_BITMAP")
Link: https://patch.msgid.link/r/1ea8609ae6d523fdd4d8efb179ddee79c8582cb6.1750787928.git.nicolinc@nvidia.com
Cc: stable@vger.kernel.org
Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Nicolin Chen
2025-06-24 11:00:45 -07:00
committed by Greg Kroah-Hartman
parent a99f80c88a
commit dae12bc688

View File

@@ -1748,6 +1748,7 @@ FIXTURE_VARIANT(iommufd_dirty_tracking)
FIXTURE_SETUP(iommufd_dirty_tracking)
{
size_t mmap_buffer_size;
unsigned long size;
int mmap_flags;
void *vrc;
@@ -1762,22 +1763,33 @@ FIXTURE_SETUP(iommufd_dirty_tracking)
self->fd = open("/dev/iommu", O_RDWR);
ASSERT_NE(-1, self->fd);
rc = posix_memalign(&self->buffer, HUGEPAGE_SIZE, variant->buffer_size);
if (rc || !self->buffer) {
SKIP(return, "Skipping buffer_size=%lu due to errno=%d",
variant->buffer_size, rc);
}
mmap_flags = MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED;
mmap_buffer_size = variant->buffer_size;
if (variant->hugepages) {
/*
* MAP_POPULATE will cause the kernel to fail mmap if THPs are
* not available.
*/
mmap_flags |= MAP_HUGETLB | MAP_POPULATE;
/*
* Allocation must be aligned to the HUGEPAGE_SIZE, because the
* following mmap() will automatically align the length to be a
* multiple of the underlying huge page size. Failing to do the
* same at this allocation will result in a memory overwrite by
* the mmap().
*/
if (mmap_buffer_size < HUGEPAGE_SIZE)
mmap_buffer_size = HUGEPAGE_SIZE;
}
rc = posix_memalign(&self->buffer, HUGEPAGE_SIZE, mmap_buffer_size);
if (rc || !self->buffer) {
SKIP(return, "Skipping buffer_size=%lu due to errno=%d",
mmap_buffer_size, rc);
}
assert((uintptr_t)self->buffer % HUGEPAGE_SIZE == 0);
vrc = mmap(self->buffer, variant->buffer_size, PROT_READ | PROT_WRITE,
vrc = mmap(self->buffer, mmap_buffer_size, PROT_READ | PROT_WRITE,
mmap_flags, -1, 0);
assert(vrc == self->buffer);
@@ -1806,8 +1818,8 @@ FIXTURE_SETUP(iommufd_dirty_tracking)
FIXTURE_TEARDOWN(iommufd_dirty_tracking)
{
munmap(self->buffer, variant->buffer_size);
munmap(self->bitmap, DIV_ROUND_UP(self->bitmap_size, BITS_PER_BYTE));
free(self->buffer);
free(self->bitmap);
teardown_iommufd(self->fd, _metadata);
}