| // SPDX-License-Identifier: GPL-2.0-only |
| /* |
| * Kernel-based Virtual Machine driver for Linux |
| * |
| * This module enables machines with Intel VT-x extensions to run virtual |
| * machines without emulation or binary translation. |
| * |
| * Copyright (C) 2006 Qumranet, Inc. |
| * Copyright 2010 Red Hat, Inc. and/or its affiliates. |
| * |
| * Authors: |
| * Avi Kivity <avi@qumranet.com> |
| * Yaniv Kamay <yaniv@qumranet.com> |
| */ |
| |
| #include <kvm/iodev.h> |
| |
| #include <linux/kvm_host.h> |
| #include <linux/kvm.h> |
| #include <linux/module.h> |
| #include <linux/errno.h> |
| #include <linux/percpu.h> |
| #include <linux/mm.h> |
| #include <linux/miscdevice.h> |
| #include <linux/vmalloc.h> |
| #include <linux/reboot.h> |
| #include <linux/debugfs.h> |
| #include <linux/highmem.h> |
| #include <linux/file.h> |
| #include <linux/syscore_ops.h> |
| #include <linux/cpu.h> |
| #include <linux/sched/signal.h> |
| #include <linux/sched/mm.h> |
| #include <linux/sched/stat.h> |
| #include <linux/cpumask.h> |
| #include <linux/smp.h> |
| #include <linux/anon_inodes.h> |
| #include <linux/profile.h> |
| #include <linux/kvm_para.h> |
| #include <linux/pagemap.h> |
| #include <linux/mman.h> |
| #include <linux/swap.h> |
| #include <linux/bitops.h> |
| #include <linux/spinlock.h> |
| #include <linux/compat.h> |
| #include <linux/srcu.h> |
| #include <linux/hugetlb.h> |
| #include <linux/slab.h> |
| #include <linux/sort.h> |
| #include <linux/bsearch.h> |
| #include <linux/io.h> |
| #include <linux/lockdep.h> |
| #include <linux/kthread.h> |
| #include <linux/suspend.h> |
| |
| #include <asm/processor.h> |
| #include <asm/ioctl.h> |
| #include <linux/uaccess.h> |
| |
| #include "coalesced_mmio.h" |
| #include "async_pf.h" |
| #include "kvm_mm.h" |
| #include "vfio.h" |
| |
| #define CREATE_TRACE_POINTS |
| #include <trace/events/kvm.h> |
| |
| #include <linux/kvm_dirty_ring.h> |
| |
| /* Worst case buffer size needed for holding an integer. */ |
| #define ITOA_MAX_LEN 12 |
| |
| MODULE_AUTHOR("Qumranet"); |
| MODULE_LICENSE("GPL"); |
| |
| /* Architectures should define their poll value according to the halt latency */ |
| unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT; |
| module_param(halt_poll_ns, uint, 0644); |
| EXPORT_SYMBOL_GPL(halt_poll_ns); |
| |
| /* Default doubles per-vcpu halt_poll_ns. */ |
| unsigned int halt_poll_ns_grow = 2; |
| module_param(halt_poll_ns_grow, uint, 0644); |
| EXPORT_SYMBOL_GPL(halt_poll_ns_grow); |
| |
| /* The start value to grow halt_poll_ns from */ |
| unsigned int halt_poll_ns_grow_start = 10000; /* 10us */ |
| module_param(halt_poll_ns_grow_start, uint, 0644); |
| EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start); |
| |
| /* Default resets per-vcpu halt_poll_ns . */ |
| unsigned int halt_poll_ns_shrink; |
| module_param(halt_poll_ns_shrink, uint, 0644); |
| EXPORT_SYMBOL_GPL(halt_poll_ns_shrink); |
| |
| /* |
| * Ordering of locks: |
| * |
| * kvm->lock --> kvm->slots_lock --> kvm->irq_lock |
| */ |
| |
| DEFINE_MUTEX(kvm_lock); |
| static DEFINE_RAW_SPINLOCK(kvm_count_lock); |
| LIST_HEAD(vm_list); |
| |
| static cpumask_var_t cpus_hardware_enabled; |
| static int kvm_usage_count; |
| static atomic_t hardware_enable_failed; |
| |
| static struct kmem_cache *kvm_vcpu_cache; |
| |
| static __read_mostly struct preempt_ops kvm_preempt_ops; |
| static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu); |
| |
| struct dentry *kvm_debugfs_dir; |
| EXPORT_SYMBOL_GPL(kvm_debugfs_dir); |
| |
| static const struct file_operations stat_fops_per_vm; |
| |
| static struct file_operations kvm_chardev_ops; |
| |
| static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, |
| unsigned long arg); |
| #ifdef CONFIG_KVM_COMPAT |
| static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl, |
| unsigned long arg); |
| #define KVM_COMPAT(c) .compat_ioctl = (c) |
| #else |
| /* |
| * For architectures that don't implement a compat infrastructure, |
| * adopt a double line of defense: |
| * - Prevent a compat task from opening /dev/kvm |
| * - If the open has been done by a 64bit task, and the KVM fd |
| * passed to a compat task, let the ioctls fail. |
| */ |
| static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl, |
| unsigned long arg) { return -EINVAL; } |
| |
| static int kvm_no_compat_open(struct inode *inode, struct file *file) |
| { |
| return is_compat_task() ? -ENODEV : 0; |
| } |
| #define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl, \ |
| .open = kvm_no_compat_open |
| #endif |
| static int hardware_enable_all(void); |
| static void hardware_disable_all(void); |
| |
| static void kvm_io_bus_destroy(struct kvm_io_bus *bus); |
| |
| __visible bool kvm_rebooting; |
| EXPORT_SYMBOL_GPL(kvm_rebooting); |
| |
| #define KVM_EVENT_CREATE_VM 0 |
| #define KVM_EVENT_DESTROY_VM 1 |
| static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm); |
| static unsigned long long kvm_createvm_count; |
| static unsigned long long kvm_active_vms; |
| |
| static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask); |
| |
| __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, |
| unsigned long start, unsigned long end) |
| { |
| } |
| |
| __weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) |
| { |
| } |
| |
| bool kvm_is_zone_device_pfn(kvm_pfn_t pfn) |
| { |
| /* |
| * The metadata used by is_zone_device_page() to determine whether or |
| * not a page is ZONE_DEVICE is guaranteed to be valid if and only if |
| * the device has been pinned, e.g. by get_user_pages(). WARN if the |
| * page_count() is zero to help detect bad usage of this helper. |
| */ |
| if (!pfn_valid(pfn) || WARN_ON_ONCE(!page_count(pfn_to_page(pfn)))) |
| return false; |
| |
| return is_zone_device_page(pfn_to_page(pfn)); |
| } |
| |
| bool kvm_is_reserved_pfn(kvm_pfn_t pfn) |
| { |
| /* |
| * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting |
| * perspective they are "normal" pages, albeit with slightly different |
| * usage rules. |
| */ |
| if (pfn_valid(pfn)) |
| return PageReserved(pfn_to_page(pfn)) && |
| !is_zero_pfn(pfn) && |
| !kvm_is_zone_device_pfn(pfn); |
| |
| return true; |
| } |
| |
| /* |
| * Switches to specified vcpu, until a matching vcpu_put() |
| */ |
| void vcpu_load(struct kvm_vcpu *vcpu) |
| { |
| int cpu = get_cpu(); |
| |
| __this_cpu_write(kvm_running_vcpu, vcpu); |
| preempt_notifier_register(&vcpu->preempt_notifier); |
| kvm_arch_vcpu_load(vcpu, cpu); |
| put_cpu(); |
| } |
| EXPORT_SYMBOL_GPL(vcpu_load); |
| |
| void vcpu_put(struct kvm_vcpu *vcpu) |
| { |
| preempt_disable(); |
| kvm_arch_vcpu_put(vcpu); |
| preempt_notifier_unregister(&vcpu->preempt_notifier); |
| __this_cpu_write(kvm_running_vcpu, NULL); |
| preempt_enable(); |
| } |
| EXPORT_SYMBOL_GPL(vcpu_put); |
| |
| /* TODO: merge with kvm_arch_vcpu_should_kick */ |
| static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req) |
| { |
| int mode = kvm_vcpu_exiting_guest_mode(vcpu); |
| |
| /* |
| * We need to wait for the VCPU to reenable interrupts and get out of |
| * READING_SHADOW_PAGE_TABLES mode. |
| */ |
| if (req & KVM_REQUEST_WAIT) |
| return mode != OUTSIDE_GUEST_MODE; |
| |
| /* |
| * Need to kick a running VCPU, but otherwise there is nothing to do. |
| */ |
| return mode == IN_GUEST_MODE; |
| } |
| |
| static void ack_flush(void *_completed) |
| { |
| } |
| |
| static inline bool kvm_kick_many_cpus(struct cpumask *cpus, bool wait) |
| { |
| if (cpumask_empty(cpus)) |
| return false; |
| |
| smp_call_function_many(cpus, ack_flush, NULL, wait); |
| return true; |
| } |
| |
| static void kvm_make_vcpu_request(struct kvm_vcpu *vcpu, unsigned int req, |
| struct cpumask *tmp, int current_cpu) |
| { |
| int cpu; |
| |
| if (likely(!(req & KVM_REQUEST_NO_ACTION))) |
| __kvm_make_request(req, vcpu); |
| |
| if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu)) |
| return; |
| |
| /* |
| * Note, the vCPU could get migrated to a different pCPU at any point |
| * after kvm_request_needs_ipi(), which could result in sending an IPI |
| * to the previous pCPU. But, that's OK because the purpose of the IPI |
| * is to ensure the vCPU returns to OUTSIDE_GUEST_MODE, which is |
| * satisfied if the vCPU migrates. Entering READING_SHADOW_PAGE_TABLES |
| * after this point is also OK, as the requirement is only that KVM wait |
| * for vCPUs that were reading SPTEs _before_ any changes were |
| * finalized. See kvm_vcpu_kick() for more details on handling requests. |
| */ |
| if (kvm_request_needs_ipi(vcpu, req)) { |
| cpu = READ_ONCE(vcpu->cpu); |
| if (cpu != -1 && cpu != current_cpu) |
| __cpumask_set_cpu(cpu, tmp); |
| } |
| } |
| |
| bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, |
| unsigned long *vcpu_bitmap) |
| { |
| struct kvm_vcpu *vcpu; |
| struct cpumask *cpus; |
| int i, me; |
| bool called; |
| |
| me = get_cpu(); |
| |
| cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask); |
| cpumask_clear(cpus); |
| |
| for_each_set_bit(i, vcpu_bitmap, KVM_MAX_VCPUS) { |
| vcpu = kvm_get_vcpu(kvm, i); |
| if (!vcpu) |
| continue; |
| kvm_make_vcpu_request(vcpu, req, cpus, me); |
| } |
| |
| called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT)); |
| put_cpu(); |
| |
| return called; |
| } |
| |
| bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, |
| struct kvm_vcpu *except) |
| { |
| struct kvm_vcpu *vcpu; |
| struct cpumask *cpus; |
| unsigned long i; |
| bool called; |
| int me; |
| |
| me = get_cpu(); |
| |
| cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask); |
| cpumask_clear(cpus); |
| |
| kvm_for_each_vcpu(i, vcpu, kvm) { |
| if (vcpu == except) |
| continue; |
| kvm_make_vcpu_request(vcpu, req, cpus, me); |
| } |
| |
| called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT)); |
| put_cpu(); |
| |
| return called; |
| } |
| |
| bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) |
| { |
| return kvm_make_all_cpus_request_except(kvm, req, NULL); |
| } |
| EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request); |
| |
| #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL |
| void kvm_flush_remote_tlbs(struct kvm *kvm) |
| { |
| ++kvm->stat.generic.remote_tlb_flush_requests; |
| |
| /* |
| * We want to publish modifications to the page tables before reading |
| * mode. Pairs with a memory barrier in arch-specific code. |
| * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest |
| * and smp_mb in walk_shadow_page_lockless_begin/end. |
| * - powerpc: smp_mb in kvmppc_prepare_to_enter. |
| * |
| * There is already an smp_mb__after_atomic() before |
| * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that |
| * barrier here. |
| */ |
| if (!kvm_arch_flush_remote_tlb(kvm) |
| || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) |
| ++kvm->stat.generic.remote_tlb_flush; |
| } |
| EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); |
| #endif |
| |
| static void kvm_flush_shadow_all(struct kvm *kvm) |
| { |
| kvm_arch_flush_shadow_all(kvm); |
| kvm_arch_guest_memory_reclaimed(kvm); |
| } |
| |
| #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE |
| static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc, |
| gfp_t gfp_flags) |
| { |
| gfp_flags |= mc->gfp_zero; |
| |
| if (mc->kmem_cache) |
| return kmem_cache_alloc(mc->kmem_cache, gfp_flags); |
| else |
| return (void *)__get_free_page(gfp_flags); |
| } |
| |
| int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min) |
| { |
| void *obj; |
| |
| if (mc->nobjs >= min) |
| return 0; |
| while (mc->nobjs < ARRAY_SIZE(mc->objects)) { |
| obj = mmu_memory_cache_alloc_obj(mc, GFP_KERNEL_ACCOUNT); |
| if (!obj) |
| return mc->nobjs >= min ? 0 : -ENOMEM; |
| mc->objects[mc->nobjs++] = obj; |
| } |
| return 0; |
| } |
| |
| int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc) |
| { |
| return mc->nobjs; |
| } |
| |
| void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) |
| { |
| while (mc->nobjs) { |
| if (mc->kmem_cache) |
| kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]); |
| else |
| free_page((unsigned long)mc->objects[--mc->nobjs]); |
| } |
| } |
| |
| void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) |
| { |
| void *p; |
| |
| if (WARN_ON(!mc->nobjs)) |
| p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT); |
| else |
| p = mc->objects[--mc->nobjs]; |
| BUG_ON(!p); |
| return p; |
| } |
| #endif |
| |
| static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) |
| { |
| mutex_init(&vcpu->mutex); |
| vcpu->cpu = -1; |
| vcpu->kvm = kvm; |
| vcpu->vcpu_id = id; |
| vcpu->pid = NULL; |
| #ifndef __KVM_HAVE_ARCH_WQP |
| rcuwait_init(&vcpu->wait); |
| #endif |
| kvm_async_pf_vcpu_init(vcpu); |
| |
| kvm_vcpu_set_in_spin_loop(vcpu, false); |
| kvm_vcpu_set_dy_eligible(vcpu, false); |
| vcpu->preempted = false; |
| vcpu->ready = false; |
| preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); |
| vcpu->last_used_slot = NULL; |
| } |
| |
| static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu) |
| { |
| kvm_arch_vcpu_destroy(vcpu); |
| kvm_dirty_ring_free(&vcpu->dirty_ring); |
| |
| /* |
| * No need for rcu_read_lock as VCPU_RUN is the only place that changes |
| * the vcpu->pid pointer, and at destruction time all file descriptors |
| * are already gone. |
| */ |
| put_pid(rcu_dereference_protected(vcpu->pid, 1)); |
| |
| free_page((unsigned long)vcpu->run); |
| kmem_cache_free(kvm_vcpu_cache, vcpu); |
| } |
| |
| void kvm_destroy_vcpus(struct kvm *kvm) |
| { |
| unsigned long i; |
| struct kvm_vcpu *vcpu; |
| |
| kvm_for_each_vcpu(i, vcpu, kvm) { |
| kvm_vcpu_destroy(vcpu); |
| xa_erase(&kvm->vcpu_array, i); |
| } |
| |
| atomic_set(&kvm->online_vcpus, 0); |
| } |
| EXPORT_SYMBOL_GPL(kvm_destroy_vcpus); |
| |
| #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
| static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) |
| { |
| return container_of(mn, struct kvm, mmu_notifier); |
| } |
| |
| static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn, |
| struct mm_struct *mm, |
| unsigned long start, unsigned long end) |
| { |
| struct kvm *kvm = mmu_notifier_to_kvm(mn); |
| int idx; |
| |
| idx = srcu_read_lock(&kvm->srcu); |
| kvm_arch_mmu_notifier_invalidate_range(kvm, start, end); |
| srcu_read_unlock(&kvm->srcu, idx); |
| } |
| |
| typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range); |
| |
| typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start, |
| unsigned long end); |
| |
| typedef void (*on_unlock_fn_t)(struct kvm *kvm); |
| |
| struct kvm_hva_range { |
| unsigned long start; |
| unsigned long end; |
| pte_t pte; |
| hva_handler_t handler; |
| on_lock_fn_t on_lock; |
| on_unlock_fn_t on_unlock; |
| bool flush_on_ret; |
| bool may_block; |
| }; |
| |
| /* |
| * Use a dedicated stub instead of NULL to indicate that there is no callback |
| * function/handler. The compiler technically can't guarantee that a real |
| * function will have a non-zero address, and so it will generate code to |
| * check for !NULL, whereas comparing against a stub will be elided at compile |
| * time (unless the compiler is getting long in the tooth, e.g. gcc 4.9). |
| */ |
| static void kvm_null_fn(void) |
| { |
| |
| } |
| #define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn) |
| |
| /* Iterate over each memslot intersecting [start, last] (inclusive) range */ |
| #define kvm_for_each_memslot_in_hva_range(node, slots, start, last) \ |
| for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \ |
| node; \ |
| node = interval_tree_iter_next(node, start, last)) \ |
| |
| static __always_inline int __kvm_handle_hva_range(struct kvm *kvm, |
| const struct kvm_hva_range *range) |
| { |
| bool ret = false, locked = false; |
| struct kvm_gfn_range gfn_range; |
| struct kvm_memory_slot *slot; |
| struct kvm_memslots *slots; |
| int i, idx; |
| |
| if (WARN_ON_ONCE(range->end <= range->start)) |
| return 0; |
| |
| /* A null handler is allowed if and only if on_lock() is provided. */ |
| if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) && |
| IS_KVM_NULL_FN(range->handler))) |
| return 0; |
| |
| idx = srcu_read_lock(&kvm->srcu); |
| |
| for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { |
| struct interval_tree_node *node; |
| |
| slots = __kvm_memslots(kvm, i); |
| kvm_for_each_memslot_in_hva_range(node, slots, |
| range->start, range->end - 1) { |
| unsigned long hva_start, hva_end; |
| |
| slot = container_of(node, struct kvm_memory_slot, hva_node[slots->node_idx]); |
| hva_start = max(range->start, slot->userspace_addr); |
| hva_end = min(range->end, slot->userspace_addr + |
| (slot->npages << PAGE_SHIFT)); |
| |
| /* |
| * To optimize for the likely case where the address |
| * range is covered by zero or one memslots, don't |
| * bother making these conditional (to avoid writes on |
| * the second or later invocation of the handler). |
| */ |
| gfn_range.pte = range->pte; |
| gfn_range.may_block = range->may_block; |
| |
| /* |
| * {gfn(page) | page intersects with [hva_start, hva_end)} = |
| * {gfn_start, gfn_start+1, ..., gfn_end-1}. |
| */ |
| gfn_range.start = hva_to_gfn_memslot(hva_start, slot); |
| gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot); |
| gfn_range.slot = slot; |
| |
| if (!locked) { |
| locked = true; |
| KVM_MMU_LOCK(kvm); |
| if (!IS_KVM_NULL_FN(range->on_lock)) |
| range->on_lock(kvm, range->start, range->end); |
| if (IS_KVM_NULL_FN(range->handler)) |
| break; |
| } |
| ret |= range->handler(kvm, &gfn_range); |
| } |
| } |
| |
| if (range->flush_on_ret && ret) |
| kvm_flush_remote_tlbs(kvm); |
| |
| if (locked) { |
| KVM_MMU_UNLOCK(kvm); |
| if (!IS_KVM_NULL_FN(range->on_unlock)) |
| range->on_unlock(kvm); |
| } |
| |
| srcu_read_unlock(&kvm->srcu, idx); |
| |
| /* The notifiers are averse to booleans. :-( */ |
| return (int)ret; |
| } |
| |
| static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn, |
| unsigned long start, |
| unsigned long end, |
| pte_t pte, |
| hva_handler_t handler) |
| { |
| struct kvm *kvm = mmu_notifier_to_kvm(mn); |
| const struct kvm_hva_range range = { |
| .start = start, |
| .end = end, |
| .pte = pte, |
| .handler = handler, |
| .on_lock = (void *)kvm_null_fn, |
| .on_unlock = (void *)kvm_null_fn, |
| .flush_on_ret = true, |
| .may_block = false, |
| }; |
| |
| return __kvm_handle_hva_range(kvm, &range); |
| } |
| |
| static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn, |
| unsigned long start, |
| unsigned long end, |
| hva_handler_t handler) |
| { |
| struct kvm *kvm = mmu_notifier_to_kvm(mn); |
| const struct kvm_hva_range range = { |
| .start = start, |
| .end = end, |
| .pte = __pte(0), |
| .handler = handler, |
| .on_lock = (void *)kvm_null_fn, |
| .on_unlock = (void *)kvm_null_fn, |
| .flush_on_ret = false, |
| .may_block = false, |
| }; |
| |
| return __kvm_handle_hva_range(kvm, &range); |
| } |
| static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, |
| struct mm_struct *mm, |
| unsigned long address, |
| pte_t pte) |
| { |
| struct kvm *kvm = mmu_notifier_to_kvm(mn); |
| |
| trace_kvm_set_spte_hva(address); |
| |
| /* |
| * .change_pte() must be surrounded by .invalidate_range_{start,end}(). |
| * If mmu_notifier_count is zero, then no in-progress invalidations, |
| * including this one, found a relevant memslot at start(); rechecking |
| * memslots here is unnecessary. Note, a false positive (count elevated |
| * by a different invalidation) is sub-optimal but functionally ok. |
| */ |
| WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count)); |
| if (!READ_ONCE(kvm->mmu_notifier_count)) |
| return; |
| |
| kvm_handle_hva_range(mn, address, address + 1, pte, kvm_set_spte_gfn); |
| } |
| |
| void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start, |
| unsigned long end) |
| { |
| /* |
| * The count increase must become visible at unlock time as no |
| * spte can be established without taking the mmu_lock and |
| * count is also read inside the mmu_lock critical section. |
| */ |
| kvm->mmu_notifier_count++; |
| if (likely(kvm->mmu_notifier_count == 1)) { |
| kvm->mmu_notifier_range_start = start; |
| kvm->mmu_notifier_range_end = end; |
| } else { |
| /* |
| * Fully tracking multiple concurrent ranges has diminishing |
| * returns. Keep things simple and just find the minimal range |
| * which includes the current and new ranges. As there won't be |
| * enough information to subtract a range after its invalidate |
| * completes, any ranges invalidated concurrently will |
| * accumulate and persist until all outstanding invalidates |
| * complete. |
| */ |
| kvm->mmu_notifier_range_start = |
| min(kvm->mmu_notifier_range_start, start); |
| kvm->mmu_notifier_range_end = |
| max(kvm->mmu_notifier_range_end, end); |
| } |
| } |
| |
| static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, |
| const struct mmu_notifier_range *range) |
| { |
| struct kvm *kvm = mmu_notifier_to_kvm(mn); |
| const struct kvm_hva_range hva_range = { |
| .start = range->start, |
| .end = range->end, |
| .pte = __pte(0), |
| .handler = kvm_unmap_gfn_range, |
| .on_lock = kvm_inc_notifier_count, |
| .on_unlock = kvm_arch_guest_memory_reclaimed, |
| .flush_on_ret = true, |
| .may_block = mmu_notifier_range_blockable(range), |
| }; |
| |
| trace_kvm_unmap_hva_range(range->start, range->end); |
| |
| /* |
| * Prevent memslot modification between range_start() and range_end() |
| * so that conditionally locking provides the same result in both |
| * functions. Without that guarantee, the mmu_notifier_count |
| * adjustments will be imbalanced. |
| * |
| * Pairs with the decrement in range_end(). |
| */ |
| spin_lock(&kvm->mn_invalidate_lock); |
| kvm->mn_active_invalidate_count++; |
| spin_unlock(&kvm->mn_invalidate_lock); |
| |
| gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end, |
| hva_range.may_block); |
| |
| __kvm_handle_hva_range(kvm, &hva_range); |
| |
| return 0; |
| } |
| |
| void kvm_dec_notifier_count(struct kvm *kvm, unsigned long start, |
| unsigned long end) |
| { |
| /* |
| * This sequence increase will notify the kvm page fault that |
| * the page that is going to be mapped in the spte could have |
| * been freed. |
| */ |
| kvm->mmu_notifier_seq++; |
| smp_wmb(); |
| /* |
| * The above sequence increase must be visible before the |
| * below count decrease, which is ensured by the smp_wmb above |
| * in conjunction with the smp_rmb in mmu_notifier_retry(). |
| */ |
| kvm->mmu_notifier_count--; |
| } |
| |
| static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, |
| const struct mmu_notifier_range *range) |
| { |
| struct kvm *kvm = mmu_notifier_to_kvm(mn); |
| const struct kvm_hva_range hva_range = { |
| .start = range->start, |
| .end = range->end, |
| .pte = __pte(0), |
| .handler = (void *)kvm_null_fn, |
| .on_lock = kvm_dec_notifier_count, |
| .on_unlock = (void *)kvm_null_fn, |
| .flush_on_ret = false, |
| .may_block = mmu_notifier_range_blockable(range), |
| }; |
| bool wake; |
| |
| __kvm_handle_hva_range(kvm, &hva_range); |
| |
| /* Pairs with the increment in range_start(). */ |
| spin_lock(&kvm->mn_invalidate_lock); |
| wake = (--kvm->mn_active_invalidate_count == 0); |
| spin_unlock(&kvm->mn_invalidate_lock); |
| |
| /* |
| * There can only be one waiter, since the wait happens under |
| * slots_lock. |
| */ |
| if (wake) |
| rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait); |
| |
| BUG_ON(kvm->mmu_notifier_count < 0); |
| } |
| |
| static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, |
| struct mm_struct *mm, |
| unsigned long start, |
| unsigned long end) |
| { |
| trace_kvm_age_hva(start, end); |
| |
| return kvm_handle_hva_range(mn, start, end, __pte(0), kvm_age_gfn); |
| } |
| |
| static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, |
| struct mm_struct *mm, |
| unsigned long start, |
| unsigned long end) |
| { |
| trace_kvm_age_hva(start, end); |
| |
| /* |
| * Even though we do not flush TLB, this will still adversely |
| * affect performance on pre-Haswell Intel EPT, where there is |
| * no EPT Access Bit to clear so that we have to tear down EPT |
| * tables instead. If we find this unacceptable, we can always |
| * add a parameter to kvm_age_hva so that it effectively doesn't |
| * do anything on clear_young. |
| * |
| * Also note that currently we never issue secondary TLB flushes |
| * from clear_young, leaving this job up to the regular system |
| * cadence. If we find this inaccurate, we might come up with a |
| * more sophisticated heuristic later. |
| */ |
| return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn); |
| } |
| |
| static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, |
| struct mm_struct *mm, |
| unsigned long address) |
| { |
| trace_kvm_test_age_hva(address); |
| |
| return kvm_handle_hva_range_no_flush(mn, address, address + 1, |
| kvm_test_age_gfn); |
| } |
| |
| static void kvm_mmu_notifier_release(struct mmu_notifier *mn, |
| struct mm_struct *mm) |
| { |
| struct kvm *kvm = mmu_notifier_to_kvm(mn); |
| int idx; |
| |
| idx = srcu_read_lock(&kvm->srcu); |
| kvm_flush_shadow_all(kvm); |
| srcu_read_unlock(&kvm->srcu, idx); |
| } |
| |
| static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { |
| .invalidate_range = kvm_mmu_notifier_invalidate_range, |
| .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, |
| .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, |
| .clear_flush_young = kvm_mmu_notifier_clear_flush_young, |
| .clear_young = kvm_mmu_notifier_clear_young, |
| .test_young = kvm_mmu_notifier_test_young, |
| .change_pte = kvm_mmu_notifier_change_pte, |
| .release = kvm_mmu_notifier_release, |
| }; |
| |
| static int kvm_init_mmu_notifier(struct kvm *kvm) |
| { |
| kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; |
| return mmu_notifier_register(&kvm->mmu_notifier, current->mm); |
| } |
| |
| #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */ |
| |
| static int kvm_init_mmu_notifier(struct kvm *kvm) |
| { |
| return 0; |
| } |
| |
| #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ |
| |
| #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER |
| static int kvm_pm_notifier_call(struct notifier_block *bl, |
| unsigned long state, |
| void *unused) |
| { |
| struct kvm *kvm = container_of(bl, struct kvm, pm_notifier); |
| |
| return kvm_arch_pm_notifier(kvm, state); |
| } |
| |
| static void kvm_init_pm_notifier(struct kvm *kvm) |
| { |
| kvm->pm_notifier.notifier_call = kvm_pm_notifier_call; |
| /* Suspend KVM before we suspend ftrace, RCU, etc. */ |
| kvm->pm_notifier.priority = INT_MAX; |
| register_pm_notifier(&kvm->pm_notifier); |
| } |
| |
| static void kvm_destroy_pm_notifier(struct kvm *kvm) |
| { |
| unregister_pm_notifier(&kvm->pm_notifier); |
| } |
| #else /* !CONFIG_HAVE_KVM_PM_NOTIFIER */ |
| static void kvm_init_pm_notifier(struct kvm *kvm) |
| { |
| } |
| |
| static void kvm_destroy_pm_notifier(struct kvm *kvm) |
| { |
| } |
| #endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */ |
| |
| static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) |
| { |
| if (!memslot->dirty_bitmap) |
| return; |
| |
| kvfree(memslot->dirty_bitmap); |
| memslot->dirty_bitmap = NULL; |
| } |
| |
| /* This does not remove the slot from struct kvm_memslots data structures */ |
| static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) |
| { |
| kvm_destroy_dirty_bitmap(slot); |
| |
| kvm_arch_free_memslot(kvm, slot); |
| |
| kfree(slot); |
| } |
| |
| static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots) |
| { |
| struct hlist_node *idnode; |
| struct kvm_memory_slot *memslot; |
| int bkt; |
| |
| /* |
| * The same memslot objects live in both active and inactive sets, |
| * arbitrarily free using index '1' so the second invocation of this |
| * function isn't operating over a structure with dangling pointers |
| * (even though this function isn't actually touching them). |
| */ |
| if (!slots->node_idx) |
| return; |
| |
| hash_for_each_safe(slots->id_hash, bkt, idnode, memslot, id_node[1]) |
| kvm_free_memslot(kvm, memslot); |
| } |
| |
| static umode_t kvm_stats_debugfs_mode(const struct _kvm_stats_desc *pdesc) |
| { |
| switch (pdesc->desc.flags & KVM_STATS_TYPE_MASK) { |
| case KVM_STATS_TYPE_INSTANT: |
| return 0444; |
| case KVM_STATS_TYPE_CUMULATIVE: |
| case KVM_STATS_TYPE_PEAK: |
| default: |
| return 0644; |
| } |
| } |
| |
| |
| static void kvm_destroy_vm_debugfs(struct kvm *kvm) |
| { |
| int i; |
| int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc + |
| kvm_vcpu_stats_header.num_desc; |
| |
| if (IS_ERR(kvm->debugfs_dentry)) |
| return; |
| |
| debugfs_remove_recursive(kvm->debugfs_dentry); |
| |
| if (kvm->debugfs_stat_data) { |
| for (i = 0; i < kvm_debugfs_num_entries; i++) |
| kfree(kvm->debugfs_stat_data[i]); |
| kfree(kvm->debugfs_stat_data); |
| } |
| } |
| |
| static int kvm_create_vm_debugfs(struct kvm *kvm, int fd) |
| { |
| static DEFINE_MUTEX(kvm_debugfs_lock); |
| struct dentry *dent; |
| char dir_name[ITOA_MAX_LEN * 2]; |
| struct kvm_stat_data *stat_data; |
| const struct _kvm_stats_desc *pdesc; |
| int i, ret; |
| int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc + |
| kvm_vcpu_stats_header.num_desc; |
| |
| if (!debugfs_initialized()) |
| return 0; |
| |
| snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), fd); |
| mutex_lock(&kvm_debugfs_lock); |
| dent = debugfs_lookup(dir_name, kvm_debugfs_dir); |
| if (dent) { |
| pr_warn_ratelimited("KVM: debugfs: duplicate directory %s\n", dir_name); |
| dput(dent); |
| mutex_unlock(&kvm_debugfs_lock); |
| return 0; |
| } |
| dent = debugfs_create_dir(dir_name, kvm_debugfs_dir); |
| mutex_unlock(&kvm_debugfs_lock); |
| if (IS_ERR(dent)) |
| return 0; |
| |
| kvm->debugfs_dentry = dent; |
| kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries, |
| sizeof(*kvm->debugfs_stat_data), |
| GFP_KERNEL_ACCOUNT); |
| if (!kvm->debugfs_stat_data) |
| return -ENOMEM; |
| |
| for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) { |
| pdesc = &kvm_vm_stats_desc[i]; |
| stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT); |
| if (!stat_data) |
| return -ENOMEM; |
| |
| stat_data->kvm = kvm; |
| stat_data->desc = pdesc; |
| stat_data->kind = KVM_STAT_VM; |
| kvm->debugfs_stat_data[i] = stat_data; |
| debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), |
| kvm->debugfs_dentry, stat_data, |
| &stat_fops_per_vm); |
| } |
| |
| for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) { |
| pdesc = &kvm_vcpu_stats_desc[i]; |
| stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT); |
| if (!stat_data) |
| return -ENOMEM; |
| |
| stat_data->kvm = kvm; |
| stat_data->desc = pdesc; |
| stat_data->kind = KVM_STAT_VCPU; |
| kvm->debugfs_stat_data[i + kvm_vm_stats_header.num_desc] = stat_data; |
| debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), |
| kvm->debugfs_dentry, stat_data, |
| &stat_fops_per_vm); |
| } |
| |
| ret = kvm_arch_create_vm_debugfs(kvm); |
| if (ret) { |
| kvm_destroy_vm_debugfs(kvm); |
| return i; |
| } |
| |
| return 0; |
| } |
| |
| /* |
| * Called after the VM is otherwise initialized, but just before adding it to |
| * the vm_list. |
| */ |
| int __weak kvm_arch_post_init_vm(struct kvm *kvm) |
| { |
| return 0; |
| } |
| |
| /* |
| * Called just after removing the VM from the vm_list, but before doing any |
| * other destruction. |
| */ |
| void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm) |
| { |
| } |
| |
| /* |
| * Called after per-vm debugfs created. When called kvm->debugfs_dentry should |
| * be setup already, so we can create arch-specific debugfs entries under it. |
| * Cleanup should be automatic done in kvm_destroy_vm_debugfs() recursively, so |
| * a per-arch destroy interface is not needed. |
| */ |
| int __weak kvm_arch_create_vm_debugfs(struct kvm *kvm) |
| { |
| return 0; |
| } |
| |
| static struct kvm *kvm_create_vm(unsigned long type) |
| { |
| struct kvm *kvm = kvm_arch_alloc_vm(); |
| struct kvm_memslots *slots; |
| int r = -ENOMEM; |
| int i, j; |
| |
| if (!kvm) |
| return ERR_PTR(-ENOMEM); |
| |
| KVM_MMU_LOCK_INIT(kvm); |
| mmgrab(current->mm); |
| kvm->mm = current->mm; |
| kvm_eventfd_init(kvm); |
| mutex_init(&kvm->lock); |
| mutex_init(&kvm->irq_lock); |
| mutex_init(&kvm->slots_lock); |
| mutex_init(&kvm->slots_arch_lock); |
| spin_lock_init(&kvm->mn_invalidate_lock); |
| rcuwait_init(&kvm->mn_memslots_update_rcuwait); |
| xa_init(&kvm->vcpu_array); |
| |
| INIT_LIST_HEAD(&kvm->gpc_list); |
| spin_lock_init(&kvm->gpc_lock); |
| |
| INIT_LIST_HEAD(&kvm->devices); |
| kvm->max_vcpus = KVM_MAX_VCPUS; |
| |
| BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); |
| |
| /* |
| * Force subsequent debugfs file creations to fail if the VM directory |
| * is not created (by kvm_create_vm_debugfs()). |
| */ |
| kvm->debugfs_dentry = ERR_PTR(-ENOENT); |
| |
| if (init_srcu_struct(&kvm->srcu)) |
| goto out_err_no_srcu; |
| if (init_srcu_struct(&kvm->irq_srcu)) |
| goto out_err_no_irq_srcu; |
| |
| refcount_set(&kvm->users_count, 1); |
| for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { |
| for (j = 0; j < 2; j++) { |
| slots = &kvm->__memslots[i][j]; |
| |
| atomic_long_set(&slots->last_used_slot, (unsigned long)NULL); |
| slots->hva_tree = RB_ROOT_CACHED; |
| slots->gfn_tree = RB_ROOT; |
| hash_init(slots->id_hash); |
| slots->node_idx = j; |
| |
| /* Generations must be different for each address space. */ |
| slots->generation = i; |
| } |
| |
| rcu_assign_pointer(kvm->memslots[i], &kvm->__memslots[i][0]); |
| } |
| |
| for (i = 0; i < KVM_NR_BUSES; i++) { |
| rcu_assign_pointer(kvm->buses[i], |
| kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT)); |
| if (!kvm->buses[i]) |
| goto out_err_no_arch_destroy_vm; |
| } |
| |
| kvm->max_halt_poll_ns = halt_poll_ns; |
| |
| r = kvm_arch_init_vm(kvm, type); |
| if (r) |
| goto out_err_no_arch_destroy_vm; |
| |
| r = hardware_enable_all(); |
| if (r) |
| goto out_err_no_disable; |
| |
| #ifdef CONFIG_HAVE_KVM_IRQFD |
| INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); |
| #endif |
| |
| r = kvm_init_mmu_notifier(kvm); |
| if (r) |
| goto out_err_no_mmu_notifier; |
| |
| r = kvm_arch_post_init_vm(kvm); |
| if (r) |
| goto out_err; |
| |
| mutex_lock(&kvm_lock); |
| list_add(&kvm->vm_list, &vm_list); |
| mutex_unlock(&kvm_lock); |
| |
| preempt_notifier_inc(); |
| kvm_init_pm_notifier(kvm); |
| |
| /* |
| * When the fd passed to this ioctl() is opened it pins the module, |
| * but try_module_get() also prevents getting a reference if the module |
| * is in MODULE_STATE_GOING (e.g. if someone ran "rmmod --wait"). |
| */ |
| if (!try_module_get(kvm_chardev_ops.owner)) { |
| r = -ENODEV; |
| goto out_err; |
| } |
| |
| return kvm; |
| |
| out_err: |
| #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
| if (kvm->mmu_notifier.ops) |
| mmu_notifier_unregister(&kvm->mmu_notifier, current->mm); |
| #endif |
| out_err_no_mmu_notifier: |
| hardware_disable_all(); |
| out_err_no_disable: |
| kvm_arch_destroy_vm(kvm); |
| out_err_no_arch_destroy_vm: |
| WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count)); |
| for (i = 0; i < KVM_NR_BUSES; i++) |
| kfree(kvm_get_bus(kvm, i)); |
| cleanup_srcu_struct(&kvm->irq_srcu); |
| out_err_no_irq_srcu: |
| cleanup_srcu_struct(&kvm->srcu); |
| out_err_no_srcu: |
| kvm_arch_free_vm(kvm); |
| mmdrop(current->mm); |
| return ERR_PTR(r); |
| } |
| |
| static void kvm_destroy_devices(struct kvm *kvm) |
| { |
| struct kvm_device *dev, *tmp; |
| |
| /* |
| * We do not need to take the kvm->lock here, because nobody else |
| * has a reference to the struct kvm at this point and therefore |
| * cannot access the devices list anyhow. |
| */ |
| list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) { |
| list_del(&dev->vm_node); |
| dev->ops->destroy(dev); |
| } |
| } |
| |
| static void kvm_destroy_vm(struct kvm *kvm) |
| { |
| int i; |
| struct mm_struct *mm = kvm->mm; |
| |
| kvm_destroy_pm_notifier(kvm); |
| kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm); |
| kvm_destroy_vm_debugfs(kvm); |
| kvm_arch_sync_events(kvm); |
| mutex_lock(&kvm_lock); |
| list_del(&kvm->vm_list); |
| mutex_unlock(&kvm_lock); |
| kvm_arch_pre_destroy_vm(kvm); |
| |
| kvm_free_irq_routing(kvm); |
| for (i = 0; i < KVM_NR_BUSES; i++) { |
| struct kvm_io_bus *bus = kvm_get_bus(kvm, i); |
| |
| if (bus) |
| kvm_io_bus_destroy(bus); |
| kvm->buses[i] = NULL; |
| } |
| kvm_coalesced_mmio_free(kvm); |
| #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
| mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); |
| /* |
| * At this point, pending calls to invalidate_range_start() |
| * have completed but no more MMU notifiers will run, so |
| * mn_active_invalidate_count may remain unbalanced. |
| * No threads can be waiting in install_new_memslots as the |
| * last reference on KVM has been dropped, but freeing |
| * memslots would deadlock without this manual intervention. |
| */ |
| WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait)); |
| kvm->mn_active_invalidate_count = 0; |
| #else |
| kvm_flush_shadow_all(kvm); |
| #endif |
| kvm_arch_destroy_vm(kvm); |
| kvm_destroy_devices(kvm); |
| for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { |
| kvm_free_memslots(kvm, &kvm->__memslots[i][0]); |
| kvm_free_memslots(kvm, &kvm->__memslots[i][1]); |
| } |
| cleanup_srcu_struct(&kvm->irq_srcu); |
| cleanup_srcu_struct(&kvm->srcu); |
| kvm_arch_free_vm(kvm); |
| preempt_notifier_dec(); |
| hardware_disable_all(); |
| mmdrop(mm); |
| module_put(kvm_chardev_ops.owner); |
| } |
| |
| void kvm_get_kvm(struct kvm *kvm) |
| { |
| refcount_inc(&kvm->users_count); |
| } |
| EXPORT_SYMBOL_GPL(kvm_get_kvm); |
| |
| /* |
| * Make sure the vm is not during destruction, which is a safe version of |
| * kvm_get_kvm(). Return true if kvm referenced successfully, false otherwise. |
| */ |
| bool kvm_get_kvm_safe(struct kvm *kvm) |
| { |
| return refcount_inc_not_zero(&kvm->users_count); |
| } |
| EXPORT_SYMBOL_GPL(kvm_get_kvm_safe); |
| |
| void kvm_put_kvm(struct kvm *kvm) |
| { |
| if (refcount_dec_and_test(&kvm->users_count)) |
| kvm_destroy_vm(kvm); |
| } |
| EXPORT_SYMBOL_GPL(kvm_put_kvm); |
| |
| /* |
| * Used to put a reference that was taken on behalf of an object associated |
| * with a user-visible file descriptor, e.g. a vcpu or device, if installation |
| * of the new file descriptor fails and the reference cannot be transferred to |
| * its final owner. In such cases, the caller is still actively using @kvm and |
| * will fail miserably if the refcount unexpectedly hits zero. |
| */ |
| void kvm_put_kvm_no_destroy(struct kvm *kvm) |
| { |
| WARN_ON(refcount_dec_and_test(&kvm->users_count)); |
| } |
| EXPORT_SYMBOL_GPL(kvm_put_kvm_no_destroy); |
| |
| static int kvm_vm_release(struct inode *inode, struct file *filp) |
| { |
| struct kvm *kvm = filp->private_data; |
| |
| kvm_irqfd_release(kvm); |
| |
| kvm_put_kvm(kvm); |
| return 0; |
| } |
| |
| /* |
| * Allocation size is twice as large as the actual dirty bitmap size. |
| * See kvm_vm_ioctl_get_dirty_log() why this is needed. |
| */ |
| static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot) |
| { |
| unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(memslot); |
| |
| memslot->dirty_bitmap = __vcalloc(2, dirty_bytes, GFP_KERNEL_ACCOUNT); |
| if (!memslot->dirty_bitmap) |
| return -ENOMEM; |
| |
| return 0; |
| } |
| |
| static struct kvm_memslots *kvm_get_inactive_memslots(struct kvm *kvm, int as_id) |
| { |
| struct kvm_memslots *active = __kvm_memslots(kvm, as_id); |
| int node_idx_inactive = active->node_idx ^ 1; |
| |
| return &kvm->__memslots[as_id][node_idx_inactive]; |
| } |
| |
| /* |
| * Helper to get the address space ID when one of memslot pointers may be NULL. |
| * This also serves as a sanity that at least one of the pointers is non-NULL, |
| * and that their address space IDs don't diverge. |
| */ |
| static int kvm_memslots_get_as_id(struct kvm_memory_slot *a, |
| struct kvm_memory_slot *b) |
| { |
| if (WARN_ON_ONCE(!a && !b)) |
| return 0; |
| |
| if (!a) |
| return b->as_id; |
| if (!b) |
| return a->as_id; |
| |
| WARN_ON_ONCE(a->as_id != b->as_id); |
| return a->as_id; |
| } |
| |
| static void kvm_insert_gfn_node(struct kvm_memslots *slots, |
| struct kvm_memory_slot *slot) |
| { |
| struct rb_root *gfn_tree = &slots->gfn_tree; |
| struct rb_node **node, *parent; |
| int idx = slots->node_idx; |
| |
| parent = NULL; |
| for (node = &gfn_tree->rb_node; *node; ) { |
| struct kvm_memory_slot *tmp; |
| |
| tmp = container_of(*node, struct kvm_memory_slot, gfn_node[idx]); |
| parent = *node; |
| if (slot->base_gfn < tmp->base_gfn) |
| node = &(*node)->rb_left; |
| else if (slot->base_gfn > tmp->base_gfn) |
| node = &(*node)->rb_right; |
| else |
| BUG(); |
| } |
| |
| rb_link_node(&slot->gfn_node[idx], parent, node); |
| rb_insert_color(&slot->gfn_node[idx], gfn_tree); |
| } |
| |
| static void kvm_erase_gfn_node(struct kvm_memslots *slots, |
| struct kvm_memory_slot *slot) |
| { |
| rb_erase(&slot->gfn_node[slots->node_idx], &slots->gfn_tree); |
| } |
| |
| static void kvm_replace_gfn_node(struct kvm_memslots *slots, |
| struct kvm_memory_slot *old, |
| struct kvm_memory_slot *new) |
| { |
| int idx = slots->node_idx; |
| |
| WARN_ON_ONCE(old->base_gfn != new->base_gfn); |
| |
| rb_replace_node(&old->gfn_node[idx], &new->gfn_node[idx], |
| &slots->gfn_tree); |
| } |
| |
| /* |
| * Replace @old with @new in the inactive memslots. |
| * |
| * With NULL @old this simply adds @new. |
| * With NULL @new this simply removes @old. |
| * |
| * If @new is non-NULL its hva_node[slots_idx] range has to be set |
| * appropriately. |
| */ |
| static void kvm_replace_memslot(struct kvm *kvm, |
| struct kvm_memory_slot *old, |
| struct kvm_memory_slot *new) |
| { |
| int as_id = kvm_memslots_get_as_id(old, new); |
| struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id); |
| int idx = slots->node_idx; |
| |
| if (old) { |
| hash_del(&old->id_node[idx]); |
| interval_tree_remove(&old->hva_node[idx], &slots->hva_tree); |
| |
| if ((long)old == atomic_long_read(&slots->last_used_slot)) |
| atomic_long_set(&slots->last_used_slot, (long)new); |
| |
| if (!new) { |
| kvm_erase_gfn_node(slots, old); |
| return; |
| } |
| } |
| |
| /* |
| * Initialize @new's hva range. Do this even when replacing an @old |
| * slot, kvm_copy_memslot() deliberately does not touch node data. |
| */ |
| new->hva_node[idx].start = new->userspace_addr; |
| new->hva_node[idx].last = new->userspace_addr + |
| (new->npages << PAGE_SHIFT) - 1; |
| |
| /* |
| * (Re)Add the new memslot. There is no O(1) interval_tree_replace(), |
| * hva_node needs to be swapped with remove+insert even though hva can't |
| * change when replacing an existing slot. |
| */ |
| hash_add(slots->id_hash, &new->id_node[idx], new->id); |
| interval_tree_insert(&new->hva_node[idx], &slots->hva_tree); |
| |
| /* |
| * If the memslot gfn is unchanged, rb_replace_node() can be used to |
| * switch the node in the gfn tree instead of removing the old and |
| * inserting the new as two separate operations. Replacement is a |
| * single O(1) operation versus two O(log(n)) operations for |
| * remove+insert. |
| */ |
| if (old && old->base_gfn == new->base_gfn) { |
| kvm_replace_gfn_node(slots, old, new); |
| } else { |
| if (old) |
| kvm_erase_gfn_node(slots, old); |
| kvm_insert_gfn_node(slots, new); |
| } |
| } |
| |
| static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem) |
| { |
| u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES; |
| |
| #ifdef __KVM_HAVE_READONLY_MEM |
| valid_flags |= KVM_MEM_READONLY; |
| #endif |
| |
| if (mem->flags & ~valid_flags) |
| return -EINVAL; |
| |
| return 0; |
| } |
| |
| static void kvm_swap_active_memslots(struct kvm *kvm, int as_id) |
| { |
| struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id); |
| |
| /* Grab the generation from the activate memslots. */ |
| u64 gen = __kvm_memslots(kvm, as_id)->generation; |
| |
| WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS); |
| slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS; |
| |
| /* |
| * Do not store the new memslots while there are invalidations in |
| * progress, otherwise the locking in invalidate_range_start and |
| * invalidate_range_end will be unbalanced. |
| */ |
| spin_lock(&kvm->mn_invalidate_lock); |
| prepare_to_rcuwait(&kvm->mn_memslots_update_rcuwait); |
| while (kvm->mn_active_invalidate_count) { |
| set_current_state(TASK_UNINTERRUPTIBLE); |
| spin_unlock(&kvm->mn_invalidate_lock); |
| schedule(); |
| spin_lock(&kvm->mn_invalidate_lock); |
| } |
| finish_rcuwait(&kvm->mn_memslots_update_rcuwait); |
| rcu_assign_pointer(kvm->memslots[as_id], slots); |
| spin_unlock(&kvm->mn_invalidate_lock); |
| |
| /* |
| * Acquired in kvm_set_memslot. Must be released before synchronize |
| * SRCU below in order to avoid deadlock with another thread |
| * acquiring the slots_arch_lock in an srcu critical section. |
| */ |
| mutex_unlock(&kvm->slots_arch_lock); |
| |
| synchronize_srcu_expedited(&kvm->srcu); |
| |
| /* |
| * Increment the new memslot generation a second time, dropping the |
| * update in-progress flag and incrementing the generation based on |
| * the number of address spaces. This provides a unique and easily |
| * identifiable generation number while the memslots are in flux. |
| */ |
| gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS; |
| |
| /* |
| * Generations must be unique even across address spaces. We do not need |
| * a global counter for that, instead the generation space is evenly split |
| * across address spaces. For example, with two address spaces, address |
| * space 0 will use generations 0, 2, 4, ... while address space 1 will |
| * use generations 1, 3, 5, ... |
| */ |
| gen += KVM_ADDRESS_SPACE_NUM; |
| |
| kvm_arch_memslots_updated(kvm, gen); |
| |
| slots->generation = gen; |
| } |
| |
| static int kvm_prepare_memory_region(struct kvm *kvm, |
| const struct kvm_memory_slot *old, |
| struct kvm_memory_slot *new, |
| enum kvm_mr_change change) |
| { |
| int r; |
| |
| /* |
| * If dirty logging is disabled, nullify the bitmap; the old bitmap |
| * will be freed on "commit". If logging is enabled in both old and |
| * new, reuse the existing bitmap. If logging is enabled only in the |
| * new and KVM isn't using a ring buffer, allocate and initialize a |
| * new bitmap. |
| */ |
| if (change != KVM_MR_DELETE) { |
| if (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES)) |
| new->dirty_bitmap = NULL; |
| else if (old && old->dirty_bitmap) |
| new->dirty_bitmap = old->dirty_bitmap; |
| else if (!kvm->dirty_ring_size) { |
| r = kvm_alloc_dirty_bitmap(new); |
| if (r) |
| return r; |
| |
| if (kvm_dirty_log_manual_protect_and_init_set(kvm)) |
| bitmap_set(new->dirty_bitmap, 0, new->npages); |
| } |
| } |
| |
| r = kvm_arch_prepare_memory_region(kvm, old, new, change); |
| |
| /* Free the bitmap on failure if it was allocated above. */ |
| if (r && new && new->dirty_bitmap && (!old || !old->dirty_bitmap)) |
| kvm_destroy_dirty_bitmap(new); |
| |
| return r; |
| } |
| |
| static void kvm_commit_memory_region(struct kvm *kvm, |
| struct kvm_memory_slot *old, |
| const struct kvm_memory_slot *new, |
| enum kvm_mr_change change) |
| { |
| /* |
| * Update the total number of memslot pages before calling the arch |
| * hook so that architectures can consume the result directly. |
| */ |
| if (change == KVM_MR_DELETE) |
| kvm->nr_memslot_pages -= old->npages; |
| else if (change == KVM_MR_CREATE) |
| kvm->nr_memslot_pages += new->npages; |
| |
| kvm_arch_commit_memory_region(kvm, old, new, change); |
| |
| switch (change) { |
| case KVM_MR_CREATE: |
| /* Nothing more to do. */ |
| break; |
| case KVM_MR_DELETE: |
| /* Free the old memslot and all its metadata. */ |
| kvm_free_memslot(kvm, old); |
| break; |
| case KVM_MR_MOVE: |
| case KVM_MR_FLAGS_ONLY: |
| /* |
| * Free the dirty bitmap as needed; the below check encompasses |
| * both the flags and whether a ring buffer is being used) |
| */ |
| if (old->dirty_bitmap && !new->dirty_bitmap) |
| kvm_destroy_dirty_bitmap(old); |
| |
| /* |
| * The final quirk. Free the detached, old slot, but only its |
| * memory, not any metadata. Metadata, including arch specific |
| * data, may be reused by @new. |
| */ |
| kfree(old); |
| break; |
| default: |
| BUG(); |
| } |
| } |
| |
| /* |
| * Activate @new, which must be installed in the inactive slots by the caller, |
| * by swapping the active slots and then propagating @new to @old once @old is |
| * unreachable and can be safely modified. |
| * |
| * With NULL @old this simply adds @new to @active (while swapping the sets). |
| * With NULL @new this simply removes @old from @active and frees it |
| * (while also swapping the sets). |
| */ |
| static void kvm_activate_memslot(struct kvm *kvm, |
| struct kvm_memory_slot *old, |
| struct kvm_memory_slot *new) |
| { |
| int as_id = kvm_memslots_get_as_id(old, new); |
| |
| kvm_swap_active_memslots(kvm, as_id); |
| |
| /* Propagate the new memslot to the now inactive memslots. */ |
| kvm_replace_memslot(kvm, old, new); |
| } |
| |
| static void kvm_copy_memslot(struct kvm_memory_slot *dest, |
| const struct kvm_memory_slot *src) |
| { |
| dest->base_gfn = src->base_gfn; |
| dest->npages = src->npages; |
| dest->dirty_bitmap = src->dirty_bitmap; |
| dest->arch = src->arch; |
| dest->userspace_addr = src->userspace_addr; |
| dest->flags = src->flags; |
| dest->id = src->id; |
| dest->as_id = src->as_id; |
| } |
| |
| static void kvm_invalidate_memslot(struct kvm *kvm, |
| struct kvm_memory_slot *old, |
| struct kvm_memory_slot *invalid_slot) |
| { |
| /* |
| * Mark the current slot INVALID. As with all memslot modifications, |
| * this must be done on an unreachable slot to avoid modifying the |
| * current slot in the active tree. |
| */ |
| kvm_copy_memslot(invalid_slot, old); |
| invalid_slot->flags |= KVM_MEMSLOT_INVALID; |
| kvm_replace_memslot(kvm, old, invalid_slot); |
| |
| /* |
| * Activate the slot that is now marked INVALID, but don't propagate |
| * the slot to the now inactive slots. The slot is either going to be |
| * deleted or recreated as a new slot. |
| */ |
| kvm_swap_active_memslots(kvm, old->as_id); |
| |
| /* |
| * From this point no new shadow pages pointing to a deleted, or moved, |
| * memslot will be created. Validation of sp->gfn happens in: |
| * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) |
| * - kvm_is_visible_gfn (mmu_check_root) |
| */ |
| kvm_arch_flush_shadow_memslot(kvm, old); |
| kvm_arch_guest_memory_reclaimed(kvm); |
| |
| /* Was released by kvm_swap_active_memslots, reacquire. */ |
| mutex_lock(&kvm->slots_arch_lock); |
| |
| /* |
| * Copy the arch-specific field of the newly-installed slot back to the |
| * old slot as the arch data could have changed between releasing |
| * slots_arch_lock in install_new_memslots() and re-acquiring the lock |
| * above. Writers are required to retrieve memslots *after* acquiring |
| * slots_arch_lock, thus the active slot's data is guaranteed to be fresh. |
| */ |
| old->arch = invalid_slot->arch; |
| } |
| |
| static void kvm_create_memslot(struct kvm *kvm, |
| struct kvm_memory_slot *new) |
| { |
| /* Add the new memslot to the inactive set and activate. */ |
| kvm_replace_memslot(kvm, NULL, new); |
| kvm_activate_memslot(kvm, NULL, new); |
| } |
| |
| static void kvm_delete_memslot(struct kvm *kvm, |
| struct kvm_memory_slot *old, |
| struct kvm_memory_slot *invalid_slot) |
| { |
| /* |
| * Remove the old memslot (in the inactive memslots) by passing NULL as |
| * the "new" slot, and for the invalid version in the active slots. |
| */ |
| kvm_replace_memslot(kvm, old, NULL); |
| kvm_activate_memslot(kvm, invalid_slot, NULL); |
| } |
| |
| static void kvm_move_memslot(struct kvm *kvm, |
| struct kvm_memory_slot *old, |
| struct kvm_memory_slot *new, |
| struct kvm_memory_slot *invalid_slot) |
| { |
| /* |
| * Replace the old memslot in the inactive slots, and then swap slots |
| * and replace the current INVALID with the new as well. |
| */ |
| kvm_replace_memslot(kvm, old, new); |
| kvm_activate_memslot(kvm, invalid_slot, new); |
| } |
| |
| static void kvm_update_flags_memslot(struct kvm *kvm, |
| struct kvm_memory_slot *old, |
| struct kvm_memory_slot *new) |
| { |
| /* |
| * Similar to the MOVE case, but the slot doesn't need to be zapped as |
| * an intermediate step. Instead, the old memslot is simply replaced |
| * with a new, updated copy in both memslot sets. |
| */ |
| kvm_replace_memslot(kvm, old, new); |
| kvm_activate_memslot(kvm, old, new); |
| } |
| |
| static int kvm_set_memslot(struct kvm *kvm, |
| struct kvm_memory_slot *old, |
| struct kvm_memory_slot *new, |
| enum kvm_mr_change change) |
| { |
| struct kvm_memory_slot *invalid_slot; |
| int r; |
| |
| /* |
| * Released in kvm_swap_active_memslots. |
| * |
| * Must be held from before the current memslots are copied until |
| * after the new memslots are installed with rcu_assign_pointer, |
| * then released before the synchronize srcu in kvm_swap_active_memslots. |
| * |
| * When modifying memslots outside of the slots_lock, must be held |
| * before reading the pointer to the current memslots until after all |
| * changes to those memslots are complete. |
| * |
| * These rules ensure that installing new memslots does not lose |
| * changes made to the previous memslots. |
| */ |
| mutex_lock(&kvm->slots_arch_lock); |
| |
| /* |
| * Invalidate the old slot if it's being deleted or moved. This is |
| * done prior to actually deleting/moving the memslot to allow vCPUs to |
| * continue running by ensuring there are no mappings or shadow pages |
| * for the memslot when it is deleted/moved. Without pre-invalidation |
| * (and without a lock), a window would exist between effecting the |
| * delete/move and committing the changes in arch code where KVM or a |
| * guest could access a non-existent memslot. |
| * |
| * Modifications are done on a temporary, unreachable slot. The old |
| * slot needs to be preserved in case a later step fails and the |
| * invalidation needs to be reverted. |
| */ |
| if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { |
| invalid_slot = kzalloc(sizeof(*invalid_slot), GFP_KERNEL_ACCOUNT); |
| if (!invalid_slot) { |
| mutex_unlock(&kvm->slots_arch_lock); |
| return -ENOMEM; |
| } |
| kvm_invalidate_memslot(kvm, old, invalid_slot); |
| } |
| |
| r = kvm_prepare_memory_region(kvm, old, new, change); |
| if (r) { |
| /* |
| * For DELETE/MOVE, revert the above INVALID change. No |
| * modifications required since the original slot was preserved |
| * in the inactive slots. Changing the active memslots also |
| * release slots_arch_lock. |
| */ |
| if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { |
| kvm_activate_memslot(kvm, invalid_slot, old); |
| kfree(invalid_slot); |
| } else { |
| mutex_unlock(&kvm->slots_arch_lock); |
| } |
| return r; |
| } |
| |
| /* |
| * For DELETE and MOVE, the working slot is now active as the INVALID |
| * version of the old slot. MOVE is particularly special as it reuses |
| * the old slot and returns a copy of the old slot (in working_slot). |
| * For CREATE, there is no old slot. For DELETE and FLAGS_ONLY, the |
| * old slot is detached but otherwise preserved. |
| */ |
| if (change == KVM_MR_CREATE) |
| kvm_create_memslot(kvm, new); |
| else if (change == KVM_MR_DELETE) |
| kvm_delete_memslot(kvm, old, invalid_slot); |
| else if (change == KVM_MR_MOVE) |
| kvm_move_memslot(kvm, old, new, invalid_slot); |
| else if (change == KVM_MR_FLAGS_ONLY) |
| kvm_update_flags_memslot(kvm, old, new); |
| else |
| BUG(); |
| |
| /* Free the temporary INVALID slot used for DELETE and MOVE. */ |
| if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) |
| kfree(invalid_slot); |
| |
| /* |
| * No need to refresh new->arch, changes after dropping slots_arch_lock |
| * will directly hit the final, active memslot. Architectures are |
| * responsible for knowing that new->arch may be stale. |
| */ |
| kvm_commit_memory_region(kvm, old, new, change); |
| |
| return 0; |
| } |
| |
| static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id, |
| gfn_t start, gfn_t end) |
| { |
| struct kvm_memslot_iter iter; |
| |
| kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) { |
| if (iter.slot->id != id) |
| return true; |
| } |
| |
| return false; |
| } |
| |
| /* |
| * Allocate some memory and give it an address in the guest physical address |
| * space. |
| * |
| * Discontiguous memory is allowed, mostly for framebuffers. |
| * |
| * Must be called holding kvm->slots_lock for write. |
| */ |
| int __kvm_set_memory_region(struct kvm *kvm, |
| const struct kvm_userspace_memory_region *mem) |
| { |
| struct kvm_memory_slot *old, *new; |
| struct kvm_memslots *slots; |
| enum kvm_mr_change change; |
| unsigned long npages; |
| gfn_t base_gfn; |
| int as_id, id; |
| int r; |
| |
| r = check_memory_region_flags(mem); |
| if (r) |
| return r; |
| |
| as_id = mem->slot >> 16; |
| id = (u16)mem->slot; |
| |
| /* General sanity checks */ |
| if ((mem->memory_size & (PAGE_SIZE - 1)) || |
| (mem->memory_size != (unsigned long)mem->memory_size)) |
| return -EINVAL; |
| if (mem->guest_phys_addr & (PAGE_SIZE - 1)) |
| return -EINVAL; |
| /* We can read the guest memory with __xxx_user() later on. */ |
| if ((mem->userspace_addr & (PAGE_SIZE - 1)) || |
| (mem->userspace_addr != untagged_addr(mem->userspace_addr)) || |
| !access_ok((void __user *)(unsigned long)mem->userspace_addr, |
| mem->memory_size)) |
| return -EINVAL; |
| if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM) |
| return -EINVAL; |
| if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) |
| return -EINVAL; |
| if ((mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES) |
| return -EINVAL; |
| |
| slots = __kvm_memslots(kvm, as_id); |
| |
| /* |
| * Note, the old memslot (and the pointer itself!) may be invalidated |
| * and/or destroyed by kvm_set_memslot(). |
| */ |
| old = id_to_memslot(slots, id); |
| |
| if (!mem->memory_size) { |
| if (!old || !old->npages) |
| return -EINVAL; |
| |
| if (WARN_ON_ONCE(kvm->nr_memslot_pages < old->npages)) |
| return -EIO; |
| |
| return kvm_set_memslot(kvm, old, NULL, KVM_MR_DELETE); |
| } |
| |
| base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT); |
| npages = (mem->memory_size >> PAGE_SHIFT); |
| |
| if (!old || !old->npages) { |
| change = KVM_MR_CREATE; |
| |
| /* |
| * To simplify KVM internals, the total number of pages across |
| * all memslots must fit in an unsigned long. |
| */ |
| if ((kvm->nr_memslot_pages + npages) < kvm->nr_memslot_pages) |
| return -EINVAL; |
| } else { /* Modify an existing slot. */ |
| if ((mem->userspace_addr != old->userspace_addr) || |
| (npages != old->npages) || |
| ((mem->flags ^ old->flags) & KVM_MEM_READONLY)) |
| return -EINVAL; |
| |
| if (base_gfn != old->base_gfn) |
| change = KVM_MR_MOVE; |
| else if (mem->flags != old->flags) |
| change = KVM_MR_FLAGS_ONLY; |
| else /* Nothing to change. */ |
| return 0; |
| } |
| |
| if ((change == KVM_MR_CREATE || change == KVM_MR_MOVE) && |
| kvm_check_memslot_overlap(slots, id, base_gfn, base_gfn + npages)) |
| return -EEXIST; |
| |
| /* Allocate a slot that will persist in the memslot. */ |
| new = kzalloc(sizeof(*new), GFP_KERNEL_ACCOUNT); |
| if (!new) |
| return -ENOMEM; |
| |
| new->as_id = as_id; |
| new->id = id; |
| new->base_gfn = base_gfn; |
| new->npages = npages; |
| new->flags = mem->flags; |
| new->userspace_addr = mem->userspace_addr; |
| |
| r = kvm_set_memslot(kvm, old, new, change); |
| if (r) |
| kfree(new); |
| return r; |
| } |
| EXPORT_SYMBOL_GPL(__kvm_set_memory_region); |
| |
| int kvm_set_memory_region(struct kvm *kvm, |
| const struct kvm_userspace_memory_region *mem) |
| { |
| int r; |
| |
| mutex_lock(&kvm->slots_lock); |
| r = __kvm_set_memory_region(kvm, mem); |
| mutex_unlock(&kvm->slots_lock); |
| return r; |
| } |
| EXPORT_SYMBOL_GPL(kvm_set_memory_region); |
| |
| static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, |
| struct kvm_userspace_memory_region *mem) |
| { |
| if ((u16)mem->slot >= KVM_USER_MEM_SLOTS) |
| return -EINVAL; |
| |
| return kvm_set_memory_region(kvm, mem); |
| } |
| |
| #ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT |
| /** |
| * kvm_get_dirty_log - get a snapshot of dirty pages |
| * @kvm: pointer to kvm instance |
| * @log: slot id and address to which we copy the log |
| * @is_dirty: set to '1' if any dirty pages were found |
| * @memslot: set to the associated memslot, always valid on success |
| */ |
| int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, |
| int *is_dirty, struct kvm_memory_slot **memslot) |
| { |
| struct kvm_memslots *slots; |
| int i, as_id, id; |
| unsigned long n; |
| unsigned long any = 0; |
| |
| /* Dirty ring tracking is exclusive to dirty log tracking */ |
| if (kvm->dirty_ring_size) |
| return -ENXIO; |
| |
| *memslot = NULL; |
| *is_dirty = 0; |
| |
| as_id = log->slot >> 16; |
| id = (u16)log->slot; |
| if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) |
| return -EINVAL; |
| |
| slots = __kvm_memslots(kvm, as_id); |
| *memslot = id_to_memslot(slots, id); |
| if (!(*memslot) || !(*memslot)->dirty_bitmap) |
| return -ENOENT; |
| |
| kvm_arch_sync_dirty_log(kvm, *memslot); |
| |
| n = kvm_dirty_bitmap_bytes(*memslot); |
| |
| for (i = 0; !any && i < n/sizeof(long); ++i) |
| any = (*memslot)->dirty_bitmap[i]; |
| |
| if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n)) |
| return -EFAULT; |
| |
| if (any) |
| *is_dirty = 1; |
| return 0; |
| } |
| EXPORT_SYMBOL_GPL(kvm_get_dirty_log); |
| |
| #else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ |
| /** |
| * kvm_get_dirty_log_protect - get a snapshot of dirty pages |
| * and reenable dirty page tracking for the corresponding pages. |
| * @kvm: pointer to kvm instance |
| * @log: slot id and address to which we copy the log |
| * |
| * We need to keep it in mind that VCPU threads can write to the bitmap |
| * concurrently. So, to avoid losing track of dirty pages we keep the |
| * following order: |
| * |
| * 1. Take a snapshot of the bit and clear it if needed. |
| * 2. Write protect the corresponding page. |
| * 3. Copy the snapshot to the userspace. |
| * 4. Upon return caller flushes TLB's if needed. |
| * |
| * Between 2 and 4, the guest may write to the page using the remaining TLB |
| * entry. This is not a problem because the page is reported dirty using |
| * the snapshot taken before and step 4 ensures that writes done after |
| * exiting to userspace will be logged for the next call. |
| * |
| */ |
| static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log) |
| { |
| struct kvm_memslots *slots; |
| struct kvm_memory_slot *memslot; |
| int i, as_id, id; |
| unsigned long n; |
| unsigned long *dirty_bitmap; |
| unsigned long *dirty_bitmap_buffer; |
| bool flush; |
| |
| /* Dirty ring tracking is exclusive to dirty log tracking */ |
| if (kvm->dirty_ring_size) |
| return -ENXIO; |
| |
| as_id = log->slot >> 16; |
| id = (u16)log->slot; |
| if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) |
| return -EINVAL; |
| |
| slots = __kvm_memslots(kvm, as_id); |
| memslot = id_to_memslot(slots, id); |
| if (!memslot || !memslot->dirty_bitmap) |
| return -ENOENT; |
| |
| dirty_bitmap = memslot->dirty_bitmap; |
| |
| kvm_arch_sync_dirty_log(kvm, memslot); |
| |
| n = kvm_dirty_bitmap_bytes(memslot); |
| flush = false; |
| if (kvm->manual_dirty_log_protect) { |
| /* |
| * Unlike kvm_get_dirty_log, we always return false in *flush, |
| * because no flush is needed until KVM_CLEAR_DIRTY_LOG. There |
| * is some code duplication between this function and |
| * kvm_get_dirty_log, but hopefully all architecture |
| * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log |
| * can be eliminated. |
| */ |
| dirty_bitmap_buffer = dirty_bitmap; |
| } else { |
| dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); |
| memset(dirty_bitmap_buffer, 0, n); |
| |
| KVM_MMU_LOCK(kvm); |
| for (i = 0; i < n / sizeof(long); i++) { |
| unsigned long mask; |
| gfn_t offset; |
| |
| if (!dirty_bitmap[i]) |
| continue; |
| |
| flush = true; |
| mask = xchg(&dirty_bitmap[i], 0); |
| dirty_bitmap_buffer[i] = mask; |
| |
| offset = i * BITS_PER_LONG; |
| kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, |
| offset, mask); |
| } |
| KVM_MMU_UNLOCK(kvm); |
| } |
| |
| if (flush) |
| kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); |
| |
| if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) |
| return -EFAULT; |
| return 0; |
| } |
| |
| |
| /** |
| * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot |
| * @kvm: kvm instance |
| * @log: slot id and address to which we copy the log |
| * |
| * Steps 1-4 below provide general overview of dirty page logging. See |
| * kvm_get_dirty_log_protect() function description for additional details. |
| * |
| * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we |
| * always flush the TLB (step 4) even if previous step failed and the dirty |
| * bitmap may be corrupt. Regardless of previous outcome the KVM logging API |
| * does not preclude user space subsequent dirty log read. Flushing TLB ensures |
| * writes will be marked dirty for next log read. |
| * |
| * 1. Take a snapshot of the bit and clear it if needed. |
| * 2. Write protect the corresponding page. |
| * 3. Copy the snapshot to the userspace. |
| * 4. Flush TLB's if needed. |
| */ |
| static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, |
| struct kvm_dirty_log *log) |
| { |
| int r; |
| |
| mutex_lock(&kvm->slots_lock); |
| |
| r = kvm_get_dirty_log_protect(kvm, log); |
| |
| mutex_unlock(&kvm->slots_lock); |
| return r; |
| } |
| |
| /** |
| * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap |
| * and reenable dirty page tracking for the corresponding pages. |
| * @kvm: pointer to kvm instance |
| * @log: slot id and address from which to fetch the bitmap of dirty pages |
| */ |
| static int kvm_clear_dirty_log_protect(struct kvm *kvm, |
| struct kvm_clear_dirty_log *log) |
| { |
| struct kvm_memslots *slots; |
| struct kvm_memory_slot *memslot; |
| int as_id, id; |
| gfn_t offset; |
| unsigned long i, n; |
| unsigned long *dirty_bitmap; |
| unsigned long *dirty_bitmap_buffer; |
| bool flush; |
| |
| /* Dirty ring tracking is exclusive to dirty log tracking */ |
| if (kvm->dirty_ring_size) |
| return -ENXIO; |
| |
| as_id = log->slot >> 16; |
| id = (u16)log->slot; |
| if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) |
| return -EINVAL; |
| |
| if (log->first_page & 63) |
| return -EINVAL; |
| |
| slots = __kvm_memslots(kvm, as_id); |
| memslot = id_to_memslot(slots, id); |
| if (!memslot || !memslot->dirty_bitmap) |
| return -ENOENT; |
| |
| dirty_bitmap = memslot->dirty_bitmap; |
| |
| n = ALIGN(log->num_pages, BITS_PER_LONG) / 8; |
| |
| if (log->first_page > memslot->npages || |
| log->num_pages > memslot->npages - log->first_page || |
| (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63))) |
| return -EINVAL; |
| |
| kvm_arch_sync_dirty_log(kvm, memslot); |
| |
| flush = false; |
| dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); |
| if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n)) |
| return -EFAULT; |
| |
| KVM_MMU_LOCK(kvm); |
| for (offset = log->first_page, i = offset / BITS_PER_LONG, |
| n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--; |
| i++, offset += BITS_PER_LONG) { |
| unsigned long mask = *dirty_bitmap_buffer++; |
| atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i]; |
| if (!mask) |
| continue; |
| |
| mask &= atomic_long_fetch_andnot(mask, p); |
| |
| /* |
| * mask contains the bits that really have been cleared. This |
| * never includes any bits beyond the length of the memslot (if |
| * the length is not aligned to 64 pages), therefore it is not |
| * a problem if userspace sets them in log->dirty_bitmap. |
| */ |
| if (mask) { |
| flush = true; |
| kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, |
| offset, mask); |
| } |
| } |
| KVM_MMU_UNLOCK(kvm); |
| |
| if (flush) |
| kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); |
| |
| return 0; |
| } |
| |
| static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, |
| struct kvm_clear_dirty_log *log) |
| { |
| int r; |
| |
| mutex_lock(&kvm->slots_lock); |
| |
| r = kvm_clear_dirty_log_protect(kvm, log); |
| |
| mutex_unlock(&kvm->slots_lock); |
| return r; |
| } |
| #endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ |
| |
| struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) |
| { |
| return __gfn_to_memslot(kvm_memslots(kvm), gfn); |
| } |
| EXPORT_SYMBOL_GPL(gfn_to_memslot); |
| |
| struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) |
| { |
| struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu); |
| u64 gen = slots->generation; |
| struct kvm_memory_slot *slot; |
| |
| /* |
| * This also protects against using a memslot from a different address space, |
| * since different address spaces have different generation numbers. |
| */ |
| if (unlikely(gen != vcpu->last_used_slot_gen)) { |
| vcpu->last_used_slot = NULL; |
| vcpu->last_used_slot_gen = gen; |
| } |
| |
| slot = try_get_memslot(vcpu->last_used_slot, gfn); |
| if (slot) |
| return slot; |
| |
| /* |
| * Fall back to searching all memslots. We purposely use |
| * search_memslots() instead of __gfn_to_memslot() to avoid |
| * thrashing the VM-wide last_used_slot in kvm_memslots. |
| */ |
| slot = search_memslots(slots, gfn, false); |
| if (slot) { |
| vcpu->last_used_slot = slot; |
| return slot; |
| } |
| |
| return NULL; |
| } |
| |
| bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) |
| { |
| struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); |
| |
| return kvm_is_visible_memslot(memslot); |
| } |
| EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); |
| |
| bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) |
| { |
| struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); |
| |
| return kvm_is_visible_memslot(memslot); |
| } |
| EXPORT_SYMBOL_GPL(kvm_vcpu_is_visible_gfn); |
| |
| unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn) |
| { |
| struct vm_area_struct *vma; |
| unsigned long addr, size; |
| |
| size = PAGE_SIZE; |
| |
| addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL); |
| if (kvm_is_error_hva(addr)) |
| return PAGE_SIZE; |
| |
| mmap_read_lock(current->mm); |
| vma = find_vma(current->mm, addr); |
| if (!vma) |
| goto out; |
| |
| size = vma_kernel_pagesize(vma); |
| |
| out: |
| mmap_read_unlock(current->mm); |
| |
| return size; |
| } |
| |
| static bool memslot_is_readonly(const struct kvm_memory_slot *slot) |
| { |
| return slot->flags & KVM_MEM_READONLY; |
| } |
| |
| static unsigned long __gfn_to_hva_many(const struct kvm_memory_slot *slot, gfn_t gfn, |
| gfn_t *nr_pages, bool write) |
| { |
| if (!slot || slot->flags & KVM_MEMSLOT_INVALID) |
| return KVM_HVA_ERR_BAD; |
| |
| if (memslot_is_readonly(slot) && write) |
| return KVM_HVA_ERR_RO_BAD; |
| |
| if (nr_pages) |
| *nr_pages = slot->npages - (gfn - slot->base_gfn); |
| |
| return __gfn_to_hva_memslot(slot, gfn); |
| } |
| |
| static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, |
| gfn_t *nr_pages) |
| { |
| return __gfn_to_hva_many(slot, gfn, nr_pages, true); |
| } |
| |
| unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, |
| gfn_t gfn) |
| { |
| return gfn_to_hva_many(slot, gfn, NULL); |
| } |
| EXPORT_SYMBOL_GPL(gfn_to_hva_memslot); |
| |
| unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) |
| { |
| return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); |
| } |
| EXPORT_SYMBOL_GPL(gfn_to_hva); |
| |
| unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn) |
| { |
| return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL); |
| } |
| EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva); |
| |
| /* |
| * Return the hva of a @gfn and the R/W attribute if possible. |
| * |
| * @slot: the kvm_memory_slot which contains @gfn |
| * @gfn: the gfn to be translated |
| * @writable: used to return the read/write attribute of the @slot if the hva |
| * is valid and @writable is not NULL |
| */ |
| unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, |
| gfn_t gfn, bool *writable) |
| { |
| unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false); |
| |
| if (!kvm_is_error_hva(hva) && writable) |
| *writable = !memslot_is_readonly(slot); |
| |
| return hva; |
| } |
| |
| unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) |
| { |
| struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); |
| |
| return gfn_to_hva_memslot_prot(slot, gfn, writable); |
| } |
| |
| unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable) |
| { |
| struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); |
| |
| return gfn_to_hva_memslot_prot(slot, gfn, writable); |
| } |
| |
| static inline int check_user_page_hwpoison(unsigned long addr) |
| { |
| int rc, flags = FOLL_HWPOISON | FOLL_WRITE; |
| |
| rc = get_user_pages(addr, 1, flags, NULL, NULL); |
| return rc == -EHWPOISON; |
| } |
| |
| /* |
| * The fast path to get the writable pfn which will be stored in @pfn, |
| * true indicates success, otherwise false is returned. It's also the |
| * only part that runs if we can in atomic context. |
| */ |
| static bool hva_to_pfn_fast(unsigned long addr, bool write_fault, |
| bool *writable, kvm_pfn_t *pfn) |
| { |
| struct page *page[1]; |
| |
| /* |
| * Fast pin a writable pfn only if it is a write fault request |
| * or the caller allows to map a writable pfn for a read fault |
| * request. |
| */ |
| if (!(write_fault || writable)) |
| return false; |
| |
| if (get_user_page_fast_only(addr, FOLL_WRITE, page)) { |
| *pfn = page_to_pfn(page[0]); |
| |
| if (writable) |
| *writable = true; |
| return true; |
| } |
| |
| return false; |
| } |
| |
| /* |
| * The slow path to get the pfn of the specified host virtual address, |
| * 1 indicates success, -errno is returned if error is detected. |
| */ |
| static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, |
| bool *writable, kvm_pfn_t *pfn) |
| { |
| unsigned int flags = FOLL_HWPOISON; |
| struct page *page; |
| int npages = 0; |
| |
| might_sleep(); |
| |
| if (writable) |
| *writable = write_fault; |
| |
| if (write_fault) |
| flags |= FOLL_WRITE; |
| if (async) |
| flags |= FOLL_NOWAIT; |
| |
| npages = get_user_pages_unlocked(addr, 1, &page, flags); |
| if (npages != 1) |
| return npages; |
| |
| /* map read fault as writable if possible */ |
| if (unlikely(!write_fault) && writable) { |
| struct page *wpage; |
| |
| if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) { |
| *writable = true; |
| put_page(page); |
| page = wpage; |
| } |
| } |
| *pfn = page_to_pfn(page); |
| return npages; |
| } |
| |
| static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault) |
| { |
| if (unlikely(!(vma->vm_flags & VM_READ))) |
| return false; |
| |
| if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE)))) |
| return false; |
| |
| return true; |
| } |
| |
| static int kvm_try_get_pfn(kvm_pfn_t pfn) |
| { |
| if (kvm_is_reserved_pfn(pfn)) |
| return 1; |
| return get_page_unless_zero(pfn_to_page(pfn)); |
| } |
| |
| static int hva_to_pfn_remapped(struct vm_area_struct *vma, |
| unsigned long addr, bool write_fault, |
| bool *writable, kvm_pfn_t *p_pfn) |
| { |
| kvm_pfn_t pfn; |
| pte_t *ptep; |
| spinlock_t *ptl; |
| int r; |
| |
| r = follow_pte(vma->vm_mm, addr, &ptep, &ptl); |
| if (r) { |
| /* |
| * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does |
| * not call the fault handler, so do it here. |
| */ |
| bool unlocked = false; |
| r = fixup_user_fault(current->mm, addr, |
| (write_fault ? FAULT_FLAG_WRITE : 0), |
| &unlocked); |
| if (unlocked) |
| return -EAGAIN; |
| if (r) |
| return r; |
| |
| r = follow_pte(vma->vm_mm, addr, &ptep, &ptl); |
| if (r) |
| return r; |
| } |
| |
| if (write_fault && !pte_write(*ptep)) { |
| pfn = KVM_PFN_ERR_RO_FAULT; |
| goto out; |
| } |
| |
| if (writable) |
| *writable = pte_write(*ptep); |
| pfn = pte_pfn(*ptep); |
| |
| /* |
| * Get a reference here because callers of *hva_to_pfn* and |
| * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the |
| * returned pfn. This is only needed if the VMA has VM_MIXEDMAP |
| * set, but the kvm_try_get_pfn/kvm_release_pfn_clean pair will |
| * simply do nothing for reserved pfns. |
| * |
| * Whoever called remap_pfn_range is also going to call e.g. |
| * unmap_mapping_range before the underlying pages are freed, |
| * causing a call to our MMU notifier. |
| * |
| * Certain IO or PFNMAP mappings can be backed with valid |
| * struct pages, but be allocated without refcounting e.g., |
| * tail pages of non-compound higher order allocations, which |
| * would then underflow the refcount when the caller does the |
| * required put_page. Don't allow those pages here. |
| */ |
| if (!kvm_try_get_pfn(pfn)) |
| r = -EFAULT; |
| |
| out: |
| pte_unmap_unlock(ptep, ptl); |
| *p_pfn = pfn; |
| |
| return r; |
| } |
| |
| /* |
| * Pin guest page in memory and return its pfn. |
| * @addr: host virtual address which maps memory to the guest |
| * @atomic: whether this function can sleep |
| * @async: whether this function need to wait IO complete if the |
| * host page is not in the memory |
| * @write_fault: whether we should get a writable host page |
| * @writable: whether it allows to map a writable host page for !@write_fault |
| * |
| * The function will map a writable host page for these two cases: |
| * 1): @write_fault = true |
| * 2): @write_fault = false && @writable, @writable will tell the caller |
| * whether the mapping is writable. |
| */ |
| kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, |
| bool write_fault, bool *writable) |
| { |
| struct vm_area_struct *vma; |
| kvm_pfn_t pfn = 0; |
| int npages, r; |
| |
| /* we can do it either atomically or asynchronously, not both */ |
| BUG_ON(atomic && async); |
| |
| if (hva_to_pfn_fast(addr, write_fault, writable, &pfn)) |
| return pfn; |
| |
| if (atomic) |
| return KVM_PFN_ERR_FAULT; |
| |
| npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn); |
| if (npages == 1) |
| return pfn; |
| |
| mmap_read_lock(current->mm); |
| if (npages == -EHWPOISON || |
| (!async && check_user_page_hwpoison(addr))) { |
| pfn = KVM_PFN_ERR_HWPOISON; |
| goto exit; |
| } |
| |
| retry: |
| vma = vma_lookup(current->mm, addr); |
| |
| if (vma == NULL) |
| pfn = KVM_PFN_ERR_FAULT; |
| else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) { |
| r = hva_to_pfn_remapped(vma, addr, write_fault, writable, &pfn); |
| if (r == -EAGAIN) |
| goto retry; |
| if (r < 0) |
| pfn = KVM_PFN_ERR_FAULT; |
| } else { |
| if (async && vma_is_valid(vma, write_fault)) |
| *async = true; |
| pfn = KVM_PFN_ERR_FAULT; |
| } |
| exit: |
| mmap_read_unlock(current->mm); |
| return pfn; |
| } |
| |
| kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn, |
| bool atomic, bool *async, bool write_fault, |
| bool *writable, hva_t *hva) |
| { |
| unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault); |
| |
| if (hva) |
| *hva = addr; |
| |
| if (addr == KVM_HVA_ERR_RO_BAD) { |
| if (writable) |
| *writable = false; |
| return KVM_PFN_ERR_RO_FAULT; |
| } |
| |
| if (kvm_is_error_hva(addr)) { |
| if (writable) |
| *writable = false; |
| return KVM_PFN_NOSLOT; |
| } |
| |
| /* Do not map writable pfn in the readonly memslot. */ |
| if (writable && memslot_is_readonly(slot)) { |
| *writable = false; |
| writable = NULL; |
| } |
| |
| return hva_to_pfn(addr, atomic, async, write_fault, |
| writable); |
| } |
| EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot); |
| |
| kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, |
| bool *writable) |
| { |
| return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL, |
| write_fault, writable, NULL); |
| } |
| EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); |
| |
| kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn) |
| { |
| return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL, NULL); |
| } |
| EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot); |
| |
| kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn) |
| { |
| return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL, NULL); |
| } |
| EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic); |
| |
| kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn) |
| { |
| return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); |
| } |
| EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic); |
| |
| kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) |
| { |
| return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn); |
| } |
| EXPORT_SYMBOL_GPL(gfn_to_pfn); |
| |
| kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) |
| { |
| return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); |
| } |
| EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn); |
| |
| int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, |
| struct page **pages, int nr_pages) |
| { |
| unsigned long addr; |
| gfn_t entry = 0; |
| |
| addr = gfn_to_hva_many(slot, gfn, &entry); |
| if (kvm_is_error_hva(addr)) |
| return -1; |
| |
| if (entry < nr_pages) |
| return 0; |
| |
| return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages); |
| } |
| EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); |
| |
| static struct page *kvm_pfn_to_page(kvm_pfn_t pfn) |
| { |
| if (is_error_noslot_pfn(pfn)) |
| return KVM_ERR_PTR_BAD_PAGE; |
| |
| if (kvm_is_reserved_pfn(pfn)) { |
| WARN_ON(1); |
| return KVM_ERR_PTR_BAD_PAGE; |
| } |
| |
| return pfn_to_page(pfn); |
| } |
| |
| struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) |
| { |
| kvm_pfn_t pfn; |
| |
| pfn = gfn_to_pfn(kvm, gfn); |
| |
| return kvm_pfn_to_page(pfn); |
| } |
| EXPORT_SYMBOL_GPL(gfn_to_page); |
| |
| void kvm_release_pfn(kvm_pfn_t pfn, bool dirty) |
| { |
| if (pfn == 0) |
| return; |
| |
| if (dirty) |
| kvm_release_pfn_dirty(pfn); |
| else |
| kvm_release_pfn_clean(pfn); |
| } |
| |
| int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map) |
| { |
| kvm_pfn_t pfn; |
| void *hva = NULL; |
| struct page *page = KVM_UNMAPPED_PAGE; |
| |
| if (!map) |
| return -EINVAL; |
| |
| pfn = gfn_to_pfn(vcpu->kvm, gfn); |
| if (is_error_noslot_pfn(pfn)) |
| return -EINVAL; |
| |
| if (pfn_valid(pfn)) { |
| page = pfn_to_page(pfn); |
| hva = kmap(page); |
| #ifdef CONFIG_HAS_IOMEM |
| } else { |
| hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB); |
| #endif |
| } |
| |
| if (!hva) |
| return -EFAULT; |
| |
| map->page = page; |
| map->hva = hva; |
| map->pfn = pfn; |
| map->gfn = gfn; |
| |
| return 0; |
| } |
| EXPORT_SYMBOL_GPL(kvm_vcpu_map); |
| |
| void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty) |
| { |
| if (!map) |
| return; |
| |
| if (!map->hva) |
| return; |
| |
| if (map->page != KVM_UNMAPPED_PAGE) |
| kunmap(map->page); |
| #ifdef CONFIG_HAS_IOMEM |
| else |
| memunmap(map->hva); |
| #endif |
| |
| if (dirty) |
| kvm_vcpu_mark_page_dirty(vcpu, map->gfn); |
| |
| kvm_release_pfn(map->pfn, dirty); |
| |
| map->hva = NULL; |
| map->page = NULL; |
| } |
| EXPORT_SYMBOL_GPL(kvm_vcpu_unmap); |
| |
| struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn) |
| { |
| kvm_pfn_t pfn; |
| |
| pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn); |
| |
| return kvm_pfn_to_page(pfn); |
| } |
| EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_page); |
| |
| void kvm_release_page_clean(struct page *page) |
| { |
| WARN_ON(is_error_page(page)); |
| |
| kvm_release_pfn_clean(page_to_pfn(page)); |
| } |
| EXPORT_SYMBOL_GPL(kvm_release_page_clean); |
| |
| void kvm_release_pfn_clean(kvm_pfn_t pfn) |
| { |
| if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn)) |
| put_page(pfn_to_page(pfn)); |
| } |
| EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); |
| |
| void kvm_release_page_dirty(struct page *page) |
| { |
| WARN_ON(is_error_page(page)); |
| |
| kvm_release_pfn_dirty(page_to_pfn(page)); |
| } |
| EXPORT_SYMBOL_GPL(kvm_release_page_dirty); |
| |
| void kvm_release_pfn_dirty(kvm_pfn_t pfn) |
| { |
| kvm_set_pfn_dirty(pfn); |
| kvm_release_pfn_clean(pfn); |
| } |
| EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty); |
| |
| void kvm_set_pfn_dirty(kvm_pfn_t pfn) |
| { |
| if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn)) |
| SetPageDirty(pfn_to_page(pfn)); |
| } |
| EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); |
| |
| void kvm_set_pfn_accessed(kvm_pfn_t pfn) |
| { |
| if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn)) |
| mark_page_accessed(pfn_to_page(pfn)); |
| } |
| EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); |
| |
| static int next_segment(unsigned long len, int offset) |
| { |
| if (len > PAGE_SIZE - offset) |
| return PAGE_SIZE - offset; |
| else |
| return len; |
| } |
| |
| static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn, |
| void *data, int offset, int len) |
| { |
| int r; |
| unsigned long addr; |
| |
| addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); |
| if (kvm_is_error_hva(addr)) |
| return -EFAULT; |
| r = __copy_from_user(data, (void __user *)addr + offset, len); |
| if (r) |
| return -EFAULT; |
| return 0; |
| } |
| |
| int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, |
| int len) |
| { |
| struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); |
| |
| return __kvm_read_guest_page(slot, gfn, data, offset, len); |
| } |
| EXPORT_SYMBOL_GPL(kvm_read_guest_page); |
| |
| int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, |
| int offset, int len) |
| { |
| struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); |
| |
| return __kvm_read_guest_page(slot, gfn, data, offset, len); |
| } |
| EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page); |
| |
| int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) |
| { |
| gfn_t gfn = gpa >> PAGE_SHIFT; |
| int seg; |
| int offset = offset_in_page(gpa); |
| int ret; |
| |
| while ((seg = next_segment(len, offset)) != 0) { |
| ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); |
| if (ret < 0) |
| return ret; |
| offset = 0; |
| len -= seg; |
| data += seg; |
| ++gfn; |
| } |
| return 0; |
| } |
| EXPORT_SYMBOL_GPL(kvm_read_guest); |
| |
| int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len) |
| { |
| gfn_t gfn = gpa >> PAGE_SHIFT; |
| int seg; |
| int offset = offset_in_page(gpa); |
| int ret; |
| |
| while ((seg = next_segment(len, offset)) != 0) { |
| ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg); |
| if (ret < 0) |
| return ret; |
| offset = 0; |
| len -= seg; |
| data += seg; |
| ++gfn; |
| } |
| return 0; |
| } |
| EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest); |
| |
| static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn, |
| void *data, int offset, unsigned long len) |
| { |
| int r; |
| unsigned long addr; |
| |
| addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); |
| if (kvm_is_error_hva(addr)) |
| return -EFAULT; |
| pagefault_disable(); |
| r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); |
| pagefault_enable(); |
| if (r) |
| return -EFAULT; |
| return 0; |
| } |
| |
| int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, |
| void *data, unsigned long len) |
| { |
| gfn_t gfn = gpa >> PAGE_SHIFT; |
| struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); |
| int offset = offset_in_page(gpa); |
| |
| return __kvm_read_guest_atomic(slot, gfn, data, offset, len); |
| } |
| EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic); |
| |
| static int __kvm_write_guest_page(struct kvm *kvm, |
| struct kvm_memory_slot *memslot, gfn_t gfn, |
| const void *data, int offset, int len) |
| { |
| int r; |
| unsigned long addr; |
| |
| addr = gfn_to_hva_memslot(memslot, gfn); |
| if (kvm_is_error_hva(addr)) |
| return -EFAULT; |
| r = __copy_to_user((void __user *)addr + offset, data, len); |
| if (r) |
| return -EFAULT; |
| mark_page_dirty_in_slot(kvm, memslot, gfn); |
| return 0; |
| } |
| |
| int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, |
| const void *data, int offset, int len) |
| { |
| struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); |
| |
| return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len); |
| } |
| EXPORT_SYMBOL_GPL(kvm_write_guest_page); |
| |
| int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, |
| const void *data, int offset, int len) |
| { |
| struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); |
| |
| return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len); |
| } |
| EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page); |
| |
| int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, |
| unsigned long len) |
| { |
| gfn_t gfn = gpa >> PAGE_SHIFT; |
| int seg; |
| int offset = offset_in_page(gpa); |
| int ret; |
| |
| while ((seg = next_segment(len, offset)) != 0) { |
| ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); |
| if (ret < 0) |
| return ret; |
| offset = 0; |
| len -= seg; |
| data += seg; |
| ++gfn; |
| } |
| return 0; |
| } |
| EXPORT_SYMBOL_GPL(kvm_write_guest); |
| |
| int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, |
| unsigned long len) |
| { |
| gfn_t gfn = gpa >> PAGE_SHIFT; |
| int seg; |
| int offset = offset_in_page(gpa); |
| int ret; |
| |
| while ((seg = next_segment(len, offset)) != 0) { |
| ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg); |
| if (ret < 0) |
| return ret; |
| offset = 0; |
| len -= seg; |
| data += seg; |
| ++gfn; |
| } |
| return 0; |
| } |
| EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest); |
| |
| static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots, |
| struct gfn_to_hva_cache *ghc, |
| gpa_t gpa, unsigned long len) |
| { |
| int offset = offset_in_page(gpa); |
| gfn_t start_gfn = gpa >> PAGE_SHIFT; |
| gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; |
| gfn_t nr_pages_needed = end_gfn - start_gfn + 1; |
| gfn_t nr_pages_avail; |
| |
| /* Update ghc->generation before performing any error checks. */ |
| ghc->generation = slots->generation; |
| |
| if (start_gfn > end_gfn) { |
| ghc->hva = KVM_HVA_ERR_BAD; |
| return -EINVAL; |
| } |
| |
| /* |
| * If the requested region crosses two memslots, we still |
| * verify that the entire region is valid here. |
| */ |
| for ( ; start_gfn <= end_gfn; start_gfn += nr_pages_avail) { |
| ghc->memslot = __gfn_to_memslot(slots, start_gfn); |
| ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, |
| &nr_pages_avail); |
| if (kvm_is_error_hva(ghc->hva)) |
| return -EFAULT; |
| } |
| |
| /* Use the slow path for cross page reads and writes. */ |
| if (nr_pages_needed == 1) |
| ghc->hva += offset; |
| else |
| ghc->memslot = NULL; |
| |
| ghc->gpa = gpa; |
| ghc->len = len; |
| return 0; |
| } |
| |
| int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
| gpa_t gpa, unsigned long len) |
| { |
| struct kvm_memslots *slots = kvm_memslots(kvm); |
| return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len); |
| } |
| EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); |
| |
| int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
| void *data, unsigned int offset, |
| unsigned long len) |
| { |
| struct kvm_memslots *slots = kvm_memslots(kvm); |
| int r; |
| gpa_t gpa = ghc->gpa + offset; |
| |
| if (WARN_ON_ONCE(len + offset > ghc->len)) |
| return -EINVAL; |
| |
| if (slots->generation != ghc->generation) { |
| if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) |
| return -EFAULT; |
| } |
| |
| if (kvm_is_error_hva(ghc->hva)) |
| return -EFAULT; |
| |
| if (unlikely(!ghc->memslot)) |
| return kvm_write_guest(kvm, gpa, data, len); |
| |
| r = __copy_to_user((void __user *)ghc->hva + offset, data, len); |
| if (r) |
| return -EFAULT; |
| mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT); |
| |
| return 0; |
| } |
| EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached); |
| |
| int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
| void *data, unsigned long len) |
| { |
| return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len); |
| } |
| EXPORT_SYMBOL_GPL(kvm_write_guest_cached); |
| |
| int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
| void *data, unsigned int offset, |
| unsigned long len) |
| { |
| struct kvm_memslots *slots = kvm_memslots(kvm); |
| int r; |
| gpa_t gpa = ghc->gpa + offset; |
| |
| if (WARN_ON_ONCE(len + offset > ghc->len)) |
| return -EINVAL; |
| |
| if (slots->generation != ghc->generation) { |
| if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) |
| return -EFAULT; |
| } |
| |
| if (kvm_is_error_hva(ghc->hva)) |
| return -EFAULT; |
| |
| if (unlikely(!ghc->memslot)) |
| return kvm_read_guest(kvm, gpa, data, len); |
| |
| r = __copy_from_user(data, (void __user *)ghc->hva + offset, len); |
| if (r) |
| return -EFAULT; |
| |
| return 0; |
| } |
| EXPORT_SYMBOL_GPL(kvm_read_guest_offset_cached); |
| |
| int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
| void *data, unsigned long len) |
| { |
| return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len); |
| } |
| EXPORT_SYMBOL_GPL(kvm_read_guest_cached); |
| |
| int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) |
| { |
| const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0))); |
|