diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 4943732e07cd..4607610ef062 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -7616,32 +7616,6 @@ void kvm_mmu_pre_destroy_vm(struct kvm *kvm) } #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES -bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm, - struct kvm_gfn_range *range) -{ - /* - * Zap SPTEs even if the slot can't be mapped PRIVATE. KVM x86 only - * supports KVM_MEMORY_ATTRIBUTE_PRIVATE, and so it *seems* like KVM - * can simply ignore such slots. But if userspace is making memory - * PRIVATE, then KVM must prevent the guest from accessing the memory - * as shared. And if userspace is making memory SHARED and this point - * is reached, then at least one page within the range was previously - * PRIVATE, i.e. the slot's possible hugepage ranges are changing. - * Zapping SPTEs in this case ensures KVM will reassess whether or not - * a hugepage can be used for affected ranges. - */ - if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm))) - return false; - - /* Unmap the old attribute page. */ - if (range->arg.attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE) - range->attr_filter = KVM_FILTER_SHARED; - else - range->attr_filter = KVM_FILTER_PRIVATE; - - return kvm_unmap_gfn_range(kvm, range); -} - static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn, int level) { @@ -7660,6 +7634,69 @@ static void hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn, lpage_info_slot(gfn, slot, level)->disallow_lpage |= KVM_LPAGE_MIXED_FLAG; } +bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm, + struct kvm_gfn_range *range) +{ + struct kvm_memory_slot *slot = range->slot; + int level; + + /* + * Zap SPTEs even if the slot can't be mapped PRIVATE. KVM x86 only + * supports KVM_MEMORY_ATTRIBUTE_PRIVATE, and so it *seems* like KVM + * can simply ignore such slots. But if userspace is making memory + * PRIVATE, then KVM must prevent the guest from accessing the memory + * as shared. And if userspace is making memory SHARED and this point + * is reached, then at least one page within the range was previously + * PRIVATE, i.e. the slot's possible hugepage ranges are changing. + * Zapping SPTEs in this case ensures KVM will reassess whether or not + * a hugepage can be used for affected ranges. + */ + if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm))) + return false; + + if (WARN_ON_ONCE(range->end <= range->start)) + return false; + + /* + * If the head and tail pages of the range currently allow a hugepage, + * i.e. reside fully in the slot and don't have mixed attributes, then + * add each corresponding hugepage range to the ongoing invalidation, + * e.g. to prevent KVM from creating a hugepage in response to a fault + * for a gfn whose attributes aren't changing. Note, only the range + * of gfns whose attributes are being modified needs to be explicitly + * unmapped, as that will unmap any existing hugepages. + */ + for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) { + gfn_t start = gfn_round_for_level(range->start, level); + gfn_t end = gfn_round_for_level(range->end - 1, level); + gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level); + + if ((start != range->start || start + nr_pages > range->end) && + start >= slot->base_gfn && + start + nr_pages <= slot->base_gfn + slot->npages && + !hugepage_test_mixed(slot, start, level)) + kvm_mmu_invalidate_range_add(kvm, start, start + nr_pages); + + if (end == start) + continue; + + if ((end + nr_pages) > range->end && + (end + nr_pages) <= (slot->base_gfn + slot->npages) && + !hugepage_test_mixed(slot, end, level)) + kvm_mmu_invalidate_range_add(kvm, end, end + nr_pages); + } + + /* Unmap the old attribute page. */ + if (range->arg.attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE) + range->attr_filter = KVM_FILTER_SHARED; + else + range->attr_filter = KVM_FILTER_PRIVATE; + + return kvm_unmap_gfn_range(kvm, range); +} + + + static bool hugepage_has_attrs(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, int level, unsigned long attrs) {