ANDROID: KVM: arm64: Deprecate lazy pte mappings for hyp modules

Remove the lazy PTE mapping functionality for vendor modules
since we now support donating memory through the ff-a lend
interface.

Bug: 352480558
Bug: 278749606
Bug: 381963586
Change-Id: I632d937cda52d8869b1157063d24dc86ee53085f
Signed-off-by: Sebastian Ene <sebastianene@google.com>
This commit is contained in:
Sebastian Ene
2025-06-06 13:08:51 +00:00
parent 5d45bc0cb2
commit 9fc45b0aca
6 changed files with 12 additions and 125 deletions

View File

@@ -941,27 +941,4 @@ enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte);
*/
void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
phys_addr_t addr, size_t size);
/**
* kvm_pgtable_stage2_get_pages() - Raise the refcount for each entry and unmap them.
*
* @pgt: Page-table structure initialised by kvm_pgtable_*_init()
* or a similar initialiser.
* @addr: Input address for the start of the walk.
* @size: Size of the range.
* @mc: Cache of pre-allocated and zeroed memory from which to allocate
* page-table pages.
*/
int kvm_pgtable_stage2_get_pages(struct kvm_pgtable *pgt, u64 addr, u64 size, void *mc);
/**
* kvm_pgtable_stage2_put_pages() - Drop the refcount for each entry. This is the
* opposite of kvm_pgtable_get_pages().
*
* @pgt: Page-table structure initialised by kvm_pgtable_*_init()
* or a similar initialiser.
* @addr: Input address for the start of the walk.
* @size: Size of the range.
*/
int kvm_pgtable_stage2_put_pages(struct kvm_pgtable *pgt, u64 addr, u64 size);
#endif /* __ARM64_KVM_PGTABLE_H__ */

View File

@@ -95,12 +95,14 @@ struct pkvm_sglist_page {
* allows to apply this prot on a range of
* contiguous memory.
* @host_stage2_enable_lazy_pte:
* DEPRECATED
* Unmap a range of memory from the host stage-2,
* leaving the pages host ownership intact. The
* pages will be remapped lazily (subject to the
* usual ownership checks) in response to a
* faulting access from the host.
* @host_stage2_disable_lazy_pte:
* DEPRECATED
* This is the opposite function of
* host_stage2_enable_lazy_pte. Must be called once
* the module is done with the region.

View File

@@ -78,7 +78,6 @@ int __pkvm_guest_relinquish_to_host(struct pkvm_hyp_vcpu *vcpu,
u64 ipa, u64 *ppa);
int __pkvm_use_dma(u64 phys_addr, size_t size, struct pkvm_hyp_vcpu *hyp_vcpu);
int __pkvm_unuse_dma(u64 phys_addr, size_t size, struct pkvm_hyp_vcpu *hyp_vcpu);
int __pkvm_host_lazy_pte(u64 pfn, u64 nr_pages, bool enable);
u64 __pkvm_ptdump_get_config(pkvm_handle_t handle, enum pkvm_ptdump_ops op);
u64 __pkvm_ptdump_walk_range(pkvm_handle_t handle, struct pkvm_ptdump_log_hdr *log_hva);

View File

@@ -1779,45 +1779,6 @@ unlock:
return ret;
}
int __pkvm_host_lazy_pte(u64 pfn, u64 nr_pages, bool enable)
{
u64 size, end, addr = hyp_pfn_to_phys(pfn);
struct memblock_region *reg;
struct kvm_mem_range range;
int ret;
if (check_shl_overflow(nr_pages, PAGE_SHIFT, &size) ||
check_add_overflow(addr, size, &end))
return -EINVAL;
/* Reject MMIO regions */
reg = find_mem_range(addr, &range);
if (!reg || !is_in_mem_range(end - 1, &range))
return -EPERM;
host_lock_component();
ret = ___host_check_page_state_range(addr, size, PKVM_PAGE_OWNED, reg, true);
if (ret)
goto unlock;
if (enable) {
ret = kvm_pgtable_stage2_get_pages(&host_mmu.pgt, addr, size,
&host_s2_pool);
} else {
ret = kvm_pgtable_stage2_put_pages(&host_mmu.pgt, addr, size);
if (ret)
goto unlock;
WARN_ON(host_stage2_idmap_locked(addr, size, PKVM_HOST_MEM_PROT, false));
}
unlock:
host_unlock_component();
return ret;
}
int hyp_pin_shared_mem(void *from, void *to)
{
u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);

View File

@@ -116,12 +116,20 @@ static void tracing_mod_hyp_printk(u8 fmt_id, u64 a, u64 b, u64 c, u64 d)
static int host_stage2_enable_lazy_pte(u64 pfn, u64 nr_pages)
{
return __pkvm_host_lazy_pte(pfn, nr_pages, true);
/*
* Deprecating the lazy PTE functionality as now the
* host can unmap on FF-A lend.
*/
WARN_ON(1);
return -EPERM;
}
static int host_stage2_disable_lazy_pte(u64 pfn, u64 nr_pages)
{
return __pkvm_host_lazy_pte(pfn, nr_pages, false);
WARN_ON(1);
return -EPERM;
}
static int __hyp_smp_processor_id(void)

View File

@@ -1263,66 +1263,6 @@ int kvm_pgtable_stage2_annotate(struct kvm_pgtable *pgt, u64 addr, u64 size,
return ret;
}
static int stage2_get_pages_walker(const struct kvm_pgtable_visit_ctx *ctx,
enum kvm_pgtable_walk_flags visit)
{
struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
struct stage2_map_data *data = ctx->arg;
int ret;
ret = stage2_map_walk_leaf(ctx, data);
if (ret)
return ret;
if (ctx->level == KVM_PGTABLE_LAST_LEVEL)
mm_ops->get_page(ctx->ptep);
return 0;
}
int kvm_pgtable_stage2_get_pages(struct kvm_pgtable *pgt, u64 addr, u64 size,
void *mc)
{
struct stage2_map_data map_data = {
.phys = KVM_PHYS_INVALID,
.mmu = pgt->mmu,
.memcache = mc,
.force_pte = true,
};
struct kvm_pgtable_walker walker = {
.cb = stage2_get_pages_walker,
.flags = KVM_PGTABLE_WALK_LEAF,
.arg = &map_data,
};
return kvm_pgtable_walk(pgt, addr, size, &walker);
}
static int stage2_put_pages_walker(const struct kvm_pgtable_visit_ctx *ctx,
enum kvm_pgtable_walk_flags visit)
{
struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
/* get_pages has force_pte */
if (WARN_ON(ctx->level != KVM_PGTABLE_LAST_LEVEL))
return -EINVAL;
mm_ops->put_page(ctx->ptep);
return 0;
}
int kvm_pgtable_stage2_put_pages(struct kvm_pgtable *pgt, u64 addr, u64 size)
{
struct kvm_pgtable_walker walker = {
.cb = stage2_put_pages_walker,
.flags = KVM_PGTABLE_WALK_LEAF,
.arg = pgt->mmu,
};
return kvm_pgtable_walk(pgt, addr, size, &walker);
}
static int stage2_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
enum kvm_pgtable_walk_flags visit)
{