blob: 57d0835e56fdb99ba2be1bc16900d05a1c622448 [file] [log] [blame]
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
* Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
*
* Authors:
* Paul Mackerras <paulus@au1.ibm.com>
* Alexander Graf <agraf@suse.de>
* Kevin Wolf <mail@kevin-wolf.de>
*
* Description: KVM functions specific to running on Book 3S
* processors in hypervisor mode (specifically POWER7 and later).
*
* This file is derived from arch/powerpc/kvm/book3s.c,
* by Alexander Graf <agraf@suse.de>.
*/
#include <linux/kvm_host.h>
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/preempt.h>
#include <linux/sched/signal.h>
#include <linux/sched/stat.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/fs.h>
#include <linux/anon_inodes.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/spinlock.h>
#include <linux/page-flags.h>
#include <linux/srcu.h>
#include <linux/miscdevice.h>
#include <linux/debugfs.h>
#include <linux/gfp.h>
#include <linux/vmalloc.h>
#include <linux/highmem.h>
#include <linux/hugetlb.h>
#include <linux/kvm_irqfd.h>
#include <linux/irqbypass.h>
#include <linux/module.h>
#include <linux/compiler.h>
#include <linux/of.h>
#include <linux/irqdomain.h>
#include <asm/ftrace.h>
#include <asm/reg.h>
#include <asm/ppc-opcode.h>
#include <asm/asm-prototypes.h>
#include <asm/archrandom.h>
#include <asm/debug.h>
#include <asm/disassemble.h>
#include <asm/cputable.h>
#include <asm/cacheflush.h>
#include <linux/uaccess.h>
#include <asm/interrupt.h>
#include <asm/io.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/mmu_context.h>
#include <asm/lppaca.h>
#include <asm/pmc.h>
#include <asm/processor.h>
#include <asm/cputhreads.h>
#include <asm/page.h>
#include <asm/hvcall.h>
#include <asm/switch_to.h>
#include <asm/smp.h>
#include <asm/dbell.h>
#include <asm/hmi.h>
#include <asm/pnv-pci.h>
#include <asm/mmu.h>
#include <asm/opal.h>
#include <asm/xics.h>
#include <asm/xive.h>
#include <asm/hw_breakpoint.h>
#include <asm/kvm_book3s_uvmem.h>
#include <asm/ultravisor.h>
#include <asm/dtl.h>
#include <asm/plpar_wrappers.h>
#include "book3s.h"
#include "book3s_hv.h"
#define CREATE_TRACE_POINTS
#include "trace_hv.h"
/* #define EXIT_DEBUG */
/* #define EXIT_DEBUG_SIMPLE */
/* #define EXIT_DEBUG_INT */
/* Used to indicate that a guest page fault needs to be handled */
#define RESUME_PAGE_FAULT (RESUME_GUEST | RESUME_FLAG_ARCH1)
/* Used to indicate that a guest passthrough interrupt needs to be handled */
#define RESUME_PASSTHROUGH (RESUME_GUEST | RESUME_FLAG_ARCH2)
/* Used as a "null" value for timebase values */
#define TB_NIL (~(u64)0)
static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
static int dynamic_mt_modes = 6;
module_param(dynamic_mt_modes, int, 0644);
MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4, or 6 (= 2 or 4)");
static int target_smt_mode;
module_param(target_smt_mode, int, 0644);
MODULE_PARM_DESC(target_smt_mode, "Target threads per core (0 = max)");
static bool one_vm_per_core;
module_param(one_vm_per_core, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(one_vm_per_core, "Only run vCPUs from the same VM on a core (requires POWER8 or older)");
#ifdef CONFIG_KVM_XICS
static const struct kernel_param_ops module_param_ops = {
.set = param_set_int,
.get = param_get_int,
};
module_param_cb(kvm_irq_bypass, &module_param_ops, &kvm_irq_bypass, 0644);
MODULE_PARM_DESC(kvm_irq_bypass, "Bypass passthrough interrupt optimization");
module_param_cb(h_ipi_redirect, &module_param_ops, &h_ipi_redirect, 0644);
MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core");
#endif
/* If set, guests are allowed to create and control nested guests */
static bool nested = true;
module_param(nested, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(nested, "Enable nested virtualization (only on POWER9)");
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
/*
* RWMR values for POWER8. These control the rate at which PURR
* and SPURR count and should be set according to the number of
* online threads in the vcore being run.
*/
#define RWMR_RPA_P8_1THREAD 0x164520C62609AECAUL
#define RWMR_RPA_P8_2THREAD 0x7FFF2908450D8DA9UL
#define RWMR_RPA_P8_3THREAD 0x164520C62609AECAUL
#define RWMR_RPA_P8_4THREAD 0x199A421245058DA9UL
#define RWMR_RPA_P8_5THREAD 0x164520C62609AECAUL
#define RWMR_RPA_P8_6THREAD 0x164520C62609AECAUL
#define RWMR_RPA_P8_7THREAD 0x164520C62609AECAUL
#define RWMR_RPA_P8_8THREAD 0x164520C62609AECAUL
static unsigned long p8_rwmr_values[MAX_SMT_THREADS + 1] = {
RWMR_RPA_P8_1THREAD,
RWMR_RPA_P8_1THREAD,
RWMR_RPA_P8_2THREAD,
RWMR_RPA_P8_3THREAD,
RWMR_RPA_P8_4THREAD,
RWMR_RPA_P8_5THREAD,
RWMR_RPA_P8_6THREAD,
RWMR_RPA_P8_7THREAD,
RWMR_RPA_P8_8THREAD,
};
static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc,
int *ip)
{
int i = *ip;
struct kvm_vcpu *vcpu;
while (++i < MAX_SMT_THREADS) {
vcpu = READ_ONCE(vc->runnable_threads[i]);
if (vcpu) {
*ip = i;
return vcpu;
}
}
return NULL;
}
/* Used to traverse the list of runnable threads for a given vcore */
#define for_each_runnable_thread(i, vcpu, vc) \
for (i = -1; (vcpu = next_runnable_thread(vc, &i)); )
static bool kvmppc_ipi_thread(int cpu)
{
unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
/* If we're a nested hypervisor, fall back to ordinary IPIs for now */
if (kvmhv_on_pseries())
return false;
/* On POWER9 we can use msgsnd to IPI any cpu */
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
msg |= get_hard_smp_processor_id(cpu);
smp_mb();
__asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
return true;
}
/* On POWER8 for IPIs to threads in the same core, use msgsnd */
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
preempt_disable();
if (cpu_first_thread_sibling(cpu) ==
cpu_first_thread_sibling(smp_processor_id())) {
msg |= cpu_thread_in_core(cpu);
smp_mb();
__asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
preempt_enable();
return true;
}
preempt_enable();
}
#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
if (cpu >= 0 && cpu < nr_cpu_ids) {
if (paca_ptrs[cpu]->kvm_hstate.xics_phys) {
xics_wake_cpu(cpu);
return true;
}
opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY);
return true;
}
#endif
return false;
}
static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
{
int cpu;
struct rcuwait *waitp;
/*
* rcuwait_wake_up contains smp_mb() which orders prior stores that
* create pending work vs below loads of cpu fields. The other side
* is the barrier in vcpu run that orders setting the cpu fields vs
* testing for pending work.
*/
waitp = kvm_arch_vcpu_get_wait(vcpu);
if (rcuwait_wake_up(waitp))
++vcpu->stat.generic.halt_wakeup;
cpu = READ_ONCE(vcpu->arch.thread_cpu);
if (cpu >= 0 && kvmppc_ipi_thread(cpu))
return;
/* CPU points to the first thread of the core */
cpu = vcpu->cpu;
if (cpu >= 0 && cpu < nr_cpu_ids && cpu_online(cpu))
smp_send_reschedule(cpu);
}
/*
* We use the vcpu_load/put functions to measure stolen time.
* Stolen time is counted as time when either the vcpu is able to
* run as part of a virtual core, but the task running the vcore
* is preempted or sleeping, or when the vcpu needs something done
* in the kernel by the task running the vcpu, but that task is
* preempted or sleeping. Those two things have to be counted
* separately, since one of the vcpu tasks will take on the job
* of running the core, and the other vcpu tasks in the vcore will
* sleep waiting for it to do that, but that sleep shouldn't count
* as stolen time.
*
* Hence we accumulate stolen time when the vcpu can run as part of
* a vcore using vc->stolen_tb, and the stolen time when the vcpu
* needs its task to do other things in the kernel (for example,
* service a page fault) in busy_stolen. We don't accumulate
* stolen time for a vcore when it is inactive, or for a vcpu
* when it is in state RUNNING or NOTREADY. NOTREADY is a bit of
* a misnomer; it means that the vcpu task is not executing in
* the KVM_VCPU_RUN ioctl, i.e. it is in userspace or elsewhere in
* the kernel. We don't have any way of dividing up that time
* between time that the vcpu is genuinely stopped, time that
* the task is actively working on behalf of the vcpu, and time
* that the task is preempted, so we don't count any of it as
* stolen.
*
* Updates to busy_stolen are protected by arch.tbacct_lock;
* updates to vc->stolen_tb are protected by the vcore->stoltb_lock
* lock. The stolen times are measured in units of timebase ticks.
* (Note that the != TB_NIL checks below are purely defensive;
* they should never fail.)
*/
static void kvmppc_core_start_stolen(struct kvmppc_vcore *vc, u64 tb)
{
unsigned long flags;
WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300));
spin_lock_irqsave(&vc->stoltb_lock, flags);
vc->preempt_tb = tb;
spin_unlock_irqrestore(&vc->stoltb_lock, flags);
}
static void kvmppc_core_end_stolen(struct kvmppc_vcore *vc, u64 tb)
{
unsigned long flags;
WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300));
spin_lock_irqsave(&vc->stoltb_lock, flags);
if (vc->preempt_tb != TB_NIL) {
vc->stolen_tb += tb - vc->preempt_tb;
vc->preempt_tb = TB_NIL;
}
spin_unlock_irqrestore(&vc->stoltb_lock, flags);
}
static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
{
struct kvmppc_vcore *vc = vcpu->arch.vcore;
unsigned long flags;
u64 now;
if (cpu_has_feature(CPU_FTR_ARCH_300))
return;
now = mftb();
/*
* We can test vc->runner without taking the vcore lock,
* because only this task ever sets vc->runner to this
* vcpu, and once it is set to this vcpu, only this task
* ever sets it to NULL.
*/
if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
kvmppc_core_end_stolen(vc, now);
spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST &&
vcpu->arch.busy_preempt != TB_NIL) {
vcpu->arch.busy_stolen += now - vcpu->arch.busy_preempt;
vcpu->arch.busy_preempt = TB_NIL;
}
spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
}
static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcore *vc = vcpu->arch.vcore;
unsigned long flags;
u64 now;
if (cpu_has_feature(CPU_FTR_ARCH_300))
return;
now = mftb();
if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
kvmppc_core_start_stolen(vc, now);
spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
vcpu->arch.busy_preempt = now;
spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
}
static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
{
vcpu->arch.pvr = pvr;
}
/* Dummy value used in computing PCR value below */
#define PCR_ARCH_31 (PCR_ARCH_300 << 1)
static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
{
unsigned long host_pcr_bit = 0, guest_pcr_bit = 0;
struct kvmppc_vcore *vc = vcpu->arch.vcore;
/* We can (emulate) our own architecture version and anything older */
if (cpu_has_feature(CPU_FTR_ARCH_31))
host_pcr_bit = PCR_ARCH_31;
else if (cpu_has_feature(CPU_FTR_ARCH_300))
host_pcr_bit = PCR_ARCH_300;
else if (cpu_has_feature(CPU_FTR_ARCH_207S))
host_pcr_bit = PCR_ARCH_207;
else if (cpu_has_feature(CPU_FTR_ARCH_206))
host_pcr_bit = PCR_ARCH_206;
else
host_pcr_bit = PCR_ARCH_205;
/* Determine lowest PCR bit needed to run guest in given PVR level */
guest_pcr_bit = host_pcr_bit;
if (arch_compat) {
switch (arch_compat) {
case PVR_ARCH_205:
guest_pcr_bit = PCR_ARCH_205;
break;
case PVR_ARCH_206:
case PVR_ARCH_206p:
guest_pcr_bit = PCR_ARCH_206;
break;
case PVR_ARCH_207:
guest_pcr_bit = PCR_ARCH_207;
break;
case PVR_ARCH_300:
guest_pcr_bit = PCR_ARCH_300;
break;
case PVR_ARCH_31:
guest_pcr_bit = PCR_ARCH_31;
break;
default:
return -EINVAL;
}
}
/* Check requested PCR bits don't exceed our capabilities */
if (guest_pcr_bit > host_pcr_bit)
return -EINVAL;
spin_lock(&vc->lock);
vc->arch_compat = arch_compat;
/*
* Set all PCR bits for which guest_pcr_bit <= bit < host_pcr_bit
* Also set all reserved PCR bits
*/
vc->pcr = (host_pcr_bit - guest_pcr_bit) | PCR_MASK;
spin_unlock(&vc->lock);
return 0;
}
static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
{
int r;
pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id);
pr_err("pc = %.16lx msr = %.16llx trap = %x\n",
vcpu->arch.regs.nip, vcpu->arch.shregs.msr, vcpu->arch.trap);
for (r = 0; r < 16; ++r)
pr_err("r%2d = %.16lx r%d = %.16lx\n",
r, kvmppc_get_gpr(vcpu, r),
r+16, kvmppc_get_gpr(vcpu, r+16));
pr_err("ctr = %.16lx lr = %.16lx\n",
vcpu->arch.regs.ctr, vcpu->arch.regs.link);
pr_err("srr0 = %.16llx srr1 = %.16llx\n",
vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
pr_err("cr = %.8lx xer = %.16lx dsisr = %.8x\n",
vcpu->arch.regs.ccr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr);
pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
pr_err("fault dar = %.16lx dsisr = %.8x\n",
vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
pr_err("SLB (%d entries):\n", vcpu->arch.slb_max);
for (r = 0; r < vcpu->arch.slb_max; ++r)
pr_err(" ESID = %.16llx VSID = %.16llx\n",
vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1,
vcpu->arch.last_inst);
}
static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
{
return kvm_get_vcpu_by_id(kvm, id);
}
static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
{
vpa->__old_status |= LPPACA_OLD_SHARED_PROC;
vpa->yield_count = cpu_to_be32(1);
}
static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v,
unsigned long addr, unsigned long len)
{
/* check address is cacheline aligned */
if (addr & (L1_CACHE_BYTES - 1))
return -EINVAL;
spin_lock(&vcpu->arch.vpa_update_lock);
if (v->next_gpa != addr || v->len != len) {
v->next_gpa = addr;
v->len = addr ? len : 0;
v->update_pending = 1;
}
spin_unlock(&vcpu->arch.vpa_update_lock);
return 0;
}
/* Length for a per-processor buffer is passed in at offset 4 in the buffer */
struct reg_vpa {
u32 dummy;
union {
__be16 hword;
__be32 word;
} length;
};
static int vpa_is_registered(struct kvmppc_vpa *vpap)
{
if (vpap->update_pending)
return vpap->next_gpa != 0;
return vpap->pinned_addr != NULL;
}
static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
unsigned long flags,
unsigned long vcpuid, unsigned long vpa)
{
struct kvm *kvm = vcpu->kvm;
unsigned long len, nb;
void *va;
struct kvm_vcpu *tvcpu;
int err;
int subfunc;
struct kvmppc_vpa *vpap;
tvcpu = kvmppc_find_vcpu(kvm, vcpuid);
if (!tvcpu)
return H_PARAMETER;
subfunc = (flags >> H_VPA_FUNC_SHIFT) & H_VPA_FUNC_MASK;
if (subfunc == H_VPA_REG_VPA || subfunc == H_VPA_REG_DTL ||
subfunc == H_VPA_REG_SLB) {
/* Registering new area - address must be cache-line aligned */
if ((vpa & (L1_CACHE_BYTES - 1)) || !vpa)
return H_PARAMETER;
/* convert logical addr to kernel addr and read length */
va = kvmppc_pin_guest_page(kvm, vpa, &nb);
if (va == NULL)
return H_PARAMETER;
if (subfunc == H_VPA_REG_VPA)
len = be16_to_cpu(((struct reg_vpa *)va)->length.hword);
else
len = be32_to_cpu(((struct reg_vpa *)va)->length.word);
kvmppc_unpin_guest_page(kvm, va, vpa, false);
/* Check length */
if (len > nb || len < sizeof(struct reg_vpa))
return H_PARAMETER;
} else {
vpa = 0;
len = 0;
}
err = H_PARAMETER;
vpap = NULL;
spin_lock(&tvcpu->arch.vpa_update_lock);
switch (subfunc) {
case H_VPA_REG_VPA: /* register VPA */
/*
* The size of our lppaca is 1kB because of the way we align
* it for the guest to avoid crossing a 4kB boundary. We only
* use 640 bytes of the structure though, so we should accept
* clients that set a size of 640.
*/
BUILD_BUG_ON(sizeof(struct lppaca) != 640);
if (len < sizeof(struct lppaca))
break;
vpap = &tvcpu->arch.vpa;
err = 0;
break;
case H_VPA_REG_DTL: /* register DTL */
if (len < sizeof(struct dtl_entry))
break;
len -= len % sizeof(struct dtl_entry);
/* Check that they have previously registered a VPA */
err = H_RESOURCE;
if (!vpa_is_registered(&tvcpu->arch.vpa))
break;
vpap = &tvcpu->arch.dtl;
err = 0;
break;
case H_VPA_REG_SLB: /* register SLB shadow buffer */
/* Check that they have previously registered a VPA */
err = H_RESOURCE;
if (!vpa_is_registered(&tvcpu->arch.vpa))
break;
vpap = &tvcpu->arch.slb_shadow;
err = 0;
break;
case H_VPA_DEREG_VPA: /* deregister VPA */
/* Check they don't still have a DTL or SLB buf registered */
err = H_RESOURCE;
if (vpa_is_registered(&tvcpu->arch.dtl) ||
vpa_is_registered(&tvcpu->arch.slb_shadow))
break;
vpap = &tvcpu->arch.vpa;
err = 0;
break;
case H_VPA_DEREG_DTL: /* deregister DTL */
vpap = &tvcpu->arch.dtl;
err = 0;
break;
case H_VPA_DEREG_SLB: /* deregister SLB shadow buffer */
vpap = &tvcpu->arch.slb_shadow;
err = 0;
break;
}
if (vpap) {
vpap->next_gpa = vpa;
vpap->len = len;
vpap->update_pending = 1;
}
spin_unlock(&tvcpu->arch.vpa_update_lock);
return err;
}
static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
{
struct kvm *kvm = vcpu->kvm;
void *va;
unsigned long nb;
unsigned long gpa;
/*
* We need to pin the page pointed to by vpap->next_gpa,
* but we can't call kvmppc_pin_guest_page under the lock
* as it does get_user_pages() and down_read(). So we
* have to drop the lock, pin the page, then get the lock
* again and check that a new area didn't get registered
* in the meantime.
*/
for (;;) {
gpa = vpap->next_gpa;
spin_unlock(&vcpu->arch.vpa_update_lock);
va = NULL;
nb = 0;
if (gpa)
va = kvmppc_pin_guest_page(kvm, gpa, &nb);
spin_lock(&vcpu->arch.vpa_update_lock);
if (gpa == vpap->next_gpa)
break;
/* sigh... unpin that one and try again */
if (va)
kvmppc_unpin_guest_page(kvm, va, gpa, false);
}
vpap->update_pending = 0;
if (va && nb < vpap->len) {
/*
* If it's now too short, it must be that userspace
* has changed the mappings underlying guest memory,
* so unregister the region.
*/
kvmppc_unpin_guest_page(kvm, va, gpa, false);
va = NULL;
}
if (vpap->pinned_addr)
kvmppc_unpin_guest_page(kvm, vpap->pinned_addr, vpap->gpa,
vpap->dirty);
vpap->gpa = gpa;
vpap->pinned_addr = va;
vpap->dirty = false;
if (va)
vpap->pinned_end = va + vpap->len;
}
static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
{
if (!(vcpu->arch.vpa.update_pending ||
vcpu->arch.slb_shadow.update_pending ||
vcpu->arch.dtl.update_pending))
return;
spin_lock(&vcpu->arch.vpa_update_lock);
if (vcpu->arch.vpa.update_pending) {
kvmppc_update_vpa(vcpu, &vcpu->arch.vpa);
if (vcpu->arch.vpa.pinned_addr)
init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
}
if (vcpu->arch.dtl.update_pending) {
kvmppc_update_vpa(vcpu, &vcpu->arch.dtl);
vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
vcpu->arch.dtl_index = 0;
}
if (vcpu->arch.slb_shadow.update_pending)
kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow);
spin_unlock(&vcpu->arch.vpa_update_lock);
}
/*
* Return the accumulated stolen time for the vcore up until `now'.
* The caller should hold the vcore lock.
*/
static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
{
u64 p;
unsigned long flags;
WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300));
spin_lock_irqsave(&vc->stoltb_lock, flags);
p = vc->stolen_tb;
if (vc->vcore_state != VCORE_INACTIVE &&
vc->preempt_tb != TB_NIL)
p += now - vc->preempt_tb;
spin_unlock_irqrestore(&vc->stoltb_lock, flags);
return p;
}
static void __kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
unsigned int pcpu, u64 now,
unsigned long stolen)
{
struct dtl_entry *dt;
struct lppaca *vpa;
dt = vcpu->arch.dtl_ptr;
vpa = vcpu->arch.vpa.pinned_addr;
if (!dt || !vpa)
return;
dt->dispatch_reason = 7;
dt->preempt_reason = 0;
dt->processor_id = cpu_to_be16(pcpu + vcpu->arch.ptid);
dt->enqueue_to_dispatch_time = cpu_to_be32(stolen);
dt->ready_to_enqueue_time = 0;
dt->waiting_to_ready_time = 0;
dt->timebase = cpu_to_be64(now);
dt->fault_addr = 0;
dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu));
dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr);
++dt;
if (dt == vcpu->arch.dtl.pinned_end)
dt = vcpu->arch.dtl.pinned_addr;
vcpu->arch.dtl_ptr = dt;
/* order writing *dt vs. writing vpa->dtl_idx */
smp_wmb();
vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index);
vcpu->arch.dtl.dirty = true;
}
static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
struct kvmppc_vcore *vc)
{
unsigned long stolen;
unsigned long core_stolen;
u64 now;
unsigned long flags;
now = mftb();
core_stolen = vcore_stolen_time(vc, now);
stolen = core_stolen - vcpu->arch.stolen_logged;
vcpu->arch.stolen_logged = core_stolen;
spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
stolen += vcpu->arch.busy_stolen;
vcpu->arch.busy_stolen = 0;
spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
__kvmppc_create_dtl_entry(vcpu, vc->pcpu, now + vc->tb_offset, stolen);
}
/* See if there is a doorbell interrupt pending for a vcpu */
static bool kvmppc_doorbell_pending(struct kvm_vcpu *vcpu)
{
int thr;
struct kvmppc_vcore *vc;
if (vcpu->arch.doorbell_request)
return true;
if (cpu_has_feature(CPU_FTR_ARCH_300))
return false;
/*
* Ensure that the read of vcore->dpdes comes after the read
* of vcpu->doorbell_request. This barrier matches the
* smp_wmb() in kvmppc_guest_entry_inject().
*/
smp_rmb();
vc = vcpu->arch.vcore;
thr = vcpu->vcpu_id - vc->first_vcpuid;
return !!(vc->dpdes & (1 << thr));
}
static bool kvmppc_power8_compatible(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207)
return true;
if ((!vcpu->arch.vcore->arch_compat) &&
cpu_has_feature(CPU_FTR_ARCH_207S))
return true;
return false;
}
static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
unsigned long resource, unsigned long value1,
unsigned long value2)
{
switch (resource) {
case H_SET_MODE_RESOURCE_SET_CIABR:
if (!kvmppc_power8_compatible(vcpu))
return H_P2;
if (value2)
return H_P4;
if (mflags)
return H_UNSUPPORTED_FLAG_START;
/* Guests can't breakpoint the hypervisor */
if ((value1 & CIABR_PRIV) == CIABR_PRIV_HYPER)
return H_P3;
vcpu->arch.ciabr = value1;
return H_SUCCESS;
case H_SET_MODE_RESOURCE_SET_DAWR0:
if (!kvmppc_power8_compatible(vcpu))
return H_P2;
if (!ppc_breakpoint_available())
return H_P2;
if (mflags)
return H_UNSUPPORTED_FLAG_START;
if (value2 & DABRX_HYP)
return H_P4;
vcpu->arch.dawr0 = value1;
vcpu->arch.dawrx0 = value2;
return H_SUCCESS;
case H_SET_MODE_RESOURCE_SET_DAWR1:
if (!kvmppc_power8_compatible(vcpu))
return H_P2;
if (!ppc_breakpoint_available())
return H_P2;
if (!cpu_has_feature(CPU_FTR_DAWR1))
return H_P2;
if (!vcpu->kvm->arch.dawr1_enabled)
return H_FUNCTION;
if (mflags)
return H_UNSUPPORTED_FLAG_START;
if (value2 & DABRX_HYP)
return H_P4;
vcpu->arch.dawr1 = value1;
vcpu->arch.dawrx1 = value2;
return H_SUCCESS;
case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE:
/*
* KVM does not support mflags=2 (AIL=2) and AIL=1 is reserved.
* Keep this in synch with kvmppc_filter_guest_lpcr_hv.
*/
if (cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG) &&
kvmhv_vcpu_is_radix(vcpu) && mflags == 3)
return H_UNSUPPORTED_FLAG_START;
return H_TOO_HARD;
default:
return H_TOO_HARD;
}
}
/* Copy guest memory in place - must reside within a single memslot */
static int kvmppc_copy_guest(struct kvm *kvm, gpa_t to, gpa_t from,
unsigned long len)
{
struct kvm_memory_slot *to_memslot = NULL;
struct kvm_memory_slot *from_memslot = NULL;
unsigned long to_addr, from_addr;
int r;
/* Get HPA for from address */
from_memslot = gfn_to_memslot(kvm, from >> PAGE_SHIFT);
if (!from_memslot)
return -EFAULT;
if ((from + len) >= ((from_memslot->base_gfn + from_memslot->npages)
<< PAGE_SHIFT))
return -EINVAL;
from_addr = gfn_to_hva_memslot(from_memslot, from >> PAGE_SHIFT);
if (kvm_is_error_hva(from_addr))
return -EFAULT;
from_addr |= (from & (PAGE_SIZE - 1));
/* Get HPA for to address */
to_memslot = gfn_to_memslot(kvm, to >> PAGE_SHIFT);
if (!to_memslot)
return -EFAULT;
if ((to + len) >= ((to_memslot->base_gfn + to_memslot->npages)
<< PAGE_SHIFT))
return -EINVAL;
to_addr = gfn_to_hva_memslot(to_memslot, to >> PAGE_SHIFT);
if (kvm_is_error_hva(to_addr))
return -EFAULT;
to_addr |= (to & (PAGE_SIZE - 1));
/* Perform copy */
r = raw_copy_in_user((void __user *)to_addr, (void __user *)from_addr,
len);
if (r)
return -EFAULT;
mark_page_dirty(kvm, to >> PAGE_SHIFT);
return 0;
}
static long kvmppc_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long dest, unsigned long src)
{
u64 pg_sz = SZ_4K; /* 4K page size */
u64 pg_mask = SZ_4K - 1;
int ret;
/* Check for invalid flags (H_PAGE_SET_LOANED covers all CMO flags) */
if (flags & ~(H_ICACHE_INVALIDATE | H_ICACHE_SYNCHRONIZE |
H_ZERO_PAGE | H_COPY_PAGE | H_PAGE_SET_LOANED))
return H_PARAMETER;
/* dest (and src if copy_page flag set) must be page aligned */
if ((dest & pg_mask) || ((flags & H_COPY_PAGE) && (src & pg_mask)))
return H_PARAMETER;
/* zero and/or copy the page as determined by the flags */
if (flags & H_COPY_PAGE) {
ret = kvmppc_copy_guest(vcpu->kvm, dest, src, pg_sz);
if (ret < 0)
return H_PARAMETER;
} else if (flags & H_ZERO_PAGE) {
ret = kvm_clear_guest(vcpu->kvm, dest, pg_sz);
if (ret < 0)
return H_PARAMETER;
}
/* We can ignore the remaining flags */
return H_SUCCESS;
}
static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target)
{
struct kvmppc_vcore *vcore = target->arch.vcore;
/*
* We expect to have been called by the real mode handler
* (kvmppc_rm_h_confer()) which would have directly returned
* H_SUCCESS if the source vcore wasn't idle (e.g. if it may
* have useful work to do and should not confer) so we don't
* recheck that here.
*
* In the case of the P9 single vcpu per vcore case, the real
* mode handler is not called but no other threads are in the
* source vcore.
*/
if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
spin_lock(&vcore->lock);
if (target->arch.state == KVMPPC_VCPU_RUNNABLE &&
vcore->vcore_state != VCORE_INACTIVE &&
vcore->runner)
target = vcore->runner;
spin_unlock(&vcore->lock);
}
return kvm_vcpu_yield_to(target);
}
static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
{
int yield_count = 0;
struct lppaca *lppaca;
spin_lock(&vcpu->arch.vpa_update_lock);
lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr;
if (lppaca)
yield_count = be32_to_cpu(lppaca->yield_count);
spin_unlock(&vcpu->arch.vpa_update_lock);
return yield_count;
}
/*
* H_RPT_INVALIDATE hcall handler for nested guests.
*
* Handles only nested process-scoped invalidation requests in L0.
*/
static int kvmppc_nested_h_rpt_invalidate(struct kvm_vcpu *vcpu)
{
unsigned long type = kvmppc_get_gpr(vcpu, 6);
unsigned long pid, pg_sizes, start, end;
/*
* The partition-scoped invalidations aren't handled here in L0.
*/
if (type & H_RPTI_TYPE_NESTED)
return RESUME_HOST;
pid = kvmppc_get_gpr(vcpu, 4);
pg_sizes = kvmppc_get_gpr(vcpu, 7);
start = kvmppc_get_gpr(vcpu, 8);
end = kvmppc_get_gpr(vcpu, 9);
do_h_rpt_invalidate_prt(pid, vcpu->arch.nested->shadow_lpid,
type, pg_sizes, start, end);
kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
return RESUME_GUEST;
}
static long kvmppc_h_rpt_invalidate(struct kvm_vcpu *vcpu,
unsigned long id, unsigned long target,
unsigned long type, unsigned long pg_sizes,
unsigned long start, unsigned long end)
{
if (!kvm_is_radix(vcpu->kvm))
return H_UNSUPPORTED;
if (end < start)
return H_P5;
/*
* Partition-scoped invalidation for nested guests.
*/
if (type & H_RPTI_TYPE_NESTED) {
if (!nesting_enabled(vcpu->kvm))
return H_FUNCTION;
/* Support only cores as target */
if (target != H_RPTI_TARGET_CMMU)
return H_P2;
return do_h_rpt_invalidate_pat(vcpu, id, type, pg_sizes,
start, end);
}
/*
* Process-scoped invalidation for L1 guests.
*/
do_h_rpt_invalidate_prt(id, vcpu->kvm->arch.lpid,
type, pg_sizes, start, end);
return H_SUCCESS;
}
int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
unsigned long req = kvmppc_get_gpr(vcpu, 3);
unsigned long target, ret = H_SUCCESS;
int yield_count;
struct kvm_vcpu *tvcpu;
int idx, rc;
if (req <= MAX_HCALL_OPCODE &&
!test_bit(req/4, vcpu->kvm->arch.enabled_hcalls))
return RESUME_HOST;
switch (req) {
case H_REMOVE:
ret = kvmppc_h_remove(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6));
if (ret == H_TOO_HARD)
return RESUME_HOST;
break;
case H_ENTER:
ret = kvmppc_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6),
kvmppc_get_gpr(vcpu, 7));
if (ret == H_TOO_HARD)
return RESUME_HOST;
break;
case H_READ:
ret = kvmppc_h_read(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5));
if (ret == H_TOO_HARD)
return RESUME_HOST;
break;
case H_CLEAR_MOD:
ret = kvmppc_h_clear_mod(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5));
if (ret == H_TOO_HARD)
return RESUME_HOST;
break;
case H_CLEAR_REF:
ret = kvmppc_h_clear_ref(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5));
if (ret == H_TOO_HARD)
return RESUME_HOST;
break;
case H_PROTECT:
ret = kvmppc_h_protect(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6));
if (ret == H_TOO_HARD)
return RESUME_HOST;
break;
case H_BULK_REMOVE:
ret = kvmppc_h_bulk_remove(vcpu);
if (ret == H_TOO_HARD)
return RESUME_HOST;
break;
case H_CEDE:
break;
case H_PROD:
target = kvmppc_get_gpr(vcpu, 4);
tvcpu = kvmppc_find_vcpu(kvm, target);
if (!tvcpu) {
ret = H_PARAMETER;
break;
}
tvcpu->arch.prodded = 1;
smp_mb(); /* This orders prodded store vs ceded load */
if (tvcpu->arch.ceded)
kvmppc_fast_vcpu_kick_hv(tvcpu);
break;
case H_CONFER:
target = kvmppc_get_gpr(vcpu, 4);
if (target == -1)
break;
tvcpu = kvmppc_find_vcpu(kvm, target);
if (!tvcpu) {
ret = H_PARAMETER;
break;
}
yield_count = kvmppc_get_gpr(vcpu, 5);
if (kvmppc_get_yield_count(tvcpu) != yield_count)
break;
kvm_arch_vcpu_yield_to(tvcpu);
break;
case H_REGISTER_VPA:
ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6));
break;
case H_RTAS:
if (list_empty(&kvm->arch.rtas_tokens))
return RESUME_HOST;
idx = srcu_read_lock(&kvm->srcu);
rc = kvmppc_rtas_hcall(vcpu);
srcu_read_unlock(&kvm->srcu, idx);
if (rc == -ENOENT)
return RESUME_HOST;
else if (rc == 0)
break;
/* Send the error out to userspace via KVM_RUN */
return rc;
case H_LOGICAL_CI_LOAD:
ret = kvmppc_h_logical_ci_load(vcpu);
if (ret == H_TOO_HARD)
return RESUME_HOST;
break;
case H_LOGICAL_CI_STORE:
ret = kvmppc_h_logical_ci_store(vcpu);
if (ret == H_TOO_HARD)
return RESUME_HOST;
break;
case H_SET_MODE:
ret = kvmppc_h_set_mode(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6),
kvmppc_get_gpr(vcpu, 7));
if (ret == H_TOO_HARD)
return RESUME_HOST;
break;
case H_XIRR:
case H_CPPR:
case H_EOI:
case H_IPI:
case H_IPOLL:
case H_XIRR_X:
if (kvmppc_xics_enabled(vcpu)) {
if (xics_on_xive()) {
ret = H_NOT_AVAILABLE;
return RESUME_GUEST;
}
ret = kvmppc_xics_hcall(vcpu, req);
break;
}
return RESUME_HOST;
case H_SET_DABR:
ret = kvmppc_h_set_dabr(vcpu, kvmppc_get_gpr(vcpu, 4));
break;
case H_SET_XDABR:
ret = kvmppc_h_set_xdabr(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5));
break;
#ifdef CONFIG_SPAPR_TCE_IOMMU
case H_GET_TCE:
ret = kvmppc_h_get_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5));
if (ret == H_TOO_HARD)
return RESUME_HOST;
break;
case H_PUT_TCE:
ret = kvmppc_h_put_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6));
if (ret == H_TOO_HARD)
return RESUME_HOST;
break;
case H_PUT_TCE_INDIRECT:
ret = kvmppc_h_put_tce_indirect(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6),
kvmppc_get_gpr(vcpu, 7));
if (ret == H_TOO_HARD)
return RESUME_HOST;
break;
case H_STUFF_TCE:
ret = kvmppc_h_stuff_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6),
kvmppc_get_gpr(vcpu, 7));
if (ret == H_TOO_HARD)
return RESUME_HOST;
break;
#endif
case H_RANDOM:
if (!arch_get_random_seed_longs(&vcpu->arch.regs.gpr[4], 1))
ret = H_HARDWARE;
break;
case H_RPT_INVALIDATE:
ret = kvmppc_h_rpt_invalidate(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6),
kvmppc_get_gpr(vcpu, 7),
kvmppc_get_gpr(vcpu, 8),
kvmppc_get_gpr(vcpu, 9));
break;
case H_SET_PARTITION_TABLE:
ret = H_FUNCTION;
if (nesting_enabled(kvm))
ret = kvmhv_set_partition_table(vcpu);
break;
case H_ENTER_NESTED:
ret = H_FUNCTION;
if (!nesting_enabled(kvm))
break;
ret = kvmhv_enter_nested_guest(vcpu);
if (ret == H_INTERRUPT) {
kvmppc_set_gpr(vcpu, 3, 0);
vcpu->arch.hcall_needed = 0;
return -EINTR;
} else if (ret == H_TOO_HARD) {
kvmppc_set_gpr(vcpu, 3, 0);
vcpu->arch.hcall_needed = 0;
return RESUME_HOST;
}
break;
case H_TLB_INVALIDATE:
ret = H_FUNCTION;
if (nesting_enabled(kvm))
ret = kvmhv_do_nested_tlbie(vcpu);
break;
case H_COPY_TOFROM_GUEST:
ret = H_FUNCTION;
if (nesting_enabled(kvm))
ret = kvmhv_copy_tofrom_guest_nested(vcpu);
break;
case H_PAGE_INIT:
ret = kvmppc_h_page_init(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6));
break;
case H_SVM_PAGE_IN:
ret = H_UNSUPPORTED;
if (kvmppc_get_srr1(vcpu) & MSR_S)
ret = kvmppc_h_svm_page_in(kvm,
kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6));
break;
case H_SVM_PAGE_OUT:
ret = H_UNSUPPORTED;
if (kvmppc_get_srr1(vcpu) & MSR_S)
ret = kvmppc_h_svm_page_out(kvm,
kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6));
break;
case H_SVM_INIT_START:
ret = H_UNSUPPORTED;
if (kvmppc_get_srr1(vcpu) & MSR_S)
ret = kvmppc_h_svm_init_start(kvm);
break;
case H_SVM_INIT_DONE:
ret = H_UNSUPPORTED;
if (kvmppc_get_srr1(vcpu) & MSR_S)
ret = kvmppc_h_svm_init_done(kvm);
break;
case H_SVM_INIT_ABORT:
/*
* Even if that call is made by the Ultravisor, the SSR1 value
* is the guest context one, with the secure bit clear as it has
* not yet been secured. So we can't check it here.
* Instead the kvm->arch.secure_guest flag is checked inside
* kvmppc_h_svm_init_abort().
*/
ret = kvmppc_h_svm_init_abort(kvm);
break;
default:
return RESUME_HOST;
}
WARN_ON_ONCE(ret == H_TOO_HARD);
kvmppc_set_gpr(vcpu, 3, ret);
vcpu->arch.hcall_needed = 0;
return RESUME_GUEST;
}
/*
* Handle H_CEDE in the P9 path where we don't call the real-mode hcall
* handlers in book3s_hv_rmhandlers.S.
*
* This has to be done early, not in kvmppc_pseries_do_hcall(), so
* that the cede logic in kvmppc_run_single_vcpu() works properly.
*/
static void kvmppc_cede(struct kvm_vcpu *vcpu)
{
vcpu->arch.shregs.msr |= MSR_EE;
vcpu->arch.ceded = 1;
smp_mb();
if (vcpu->arch.prodded) {
vcpu->arch.prodded = 0;
smp_mb();
vcpu->arch.ceded = 0;
}
}
static int kvmppc_hcall_impl_hv(unsigned long cmd)
{
switch (cmd) {
case H_CEDE:
case H_PROD:
case H_CONFER:
case H_REGISTER_VPA:
case H_SET_MODE:
#ifdef CONFIG_SPAPR_TCE_IOMMU
case H_GET_TCE:
case H_PUT_TCE:
case H_PUT_TCE_INDIRECT:
case H_STUFF_TCE:
#endif
case H_LOGICAL_CI_LOAD:
case H_LOGICAL_CI_STORE:
#ifdef CONFIG_KVM_XICS
case H_XIRR:
case H_CPPR:
case H_EOI:
case H_IPI:
case H_IPOLL:
case H_XIRR_X:
#endif
case H_PAGE_INIT:
case H_RPT_INVALIDATE:
return 1;
}
/* See if it's in the real-mode table */
return kvmppc_hcall_impl_hv_realmode(cmd);
}
static int kvmppc_emulate_debug_inst(struct kvm_vcpu *vcpu)
{
u32 last_inst;
if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) !=
EMULATE_DONE) {
/*
* Fetch failed, so return to guest and
* try executing it again.
*/
return RESUME_GUEST;
}
if (last_inst == KVMPPC_INST_SW_BREAKPOINT) {
vcpu->run->exit_reason = KVM_EXIT_DEBUG;
vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu);
return RESUME_HOST;
} else {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
return RESUME_GUEST;
}
}
static void do_nothing(void *x)
{
}
static unsigned long kvmppc_read_dpdes(struct kvm_vcpu *vcpu)
{
int thr, cpu, pcpu, nthreads;
struct kvm_vcpu *v;
unsigned long dpdes;
nthreads = vcpu->kvm->arch.emul_smt_mode;
dpdes = 0;
cpu = vcpu->vcpu_id & ~(nthreads - 1);
for (thr = 0; thr < nthreads; ++thr, ++cpu) {
v = kvmppc_find_vcpu(vcpu->kvm, cpu);
if (!v)
continue;
/*
* If the vcpu is currently running on a physical cpu thread,
* interrupt it in order to pull it out of the guest briefly,
* which will update its vcore->dpdes value.
*/
pcpu = READ_ONCE(v->cpu);
if (pcpu >= 0)
smp_call_function_single(pcpu, do_nothing, NULL, 1);
if (kvmppc_doorbell_pending(v))
dpdes |= 1 << thr;
}
return dpdes;
}
/*
* On POWER9, emulate doorbell-related instructions in order to
* give the guest the illusion of running on a multi-threaded core.
* The instructions emulated are msgsndp, msgclrp, mfspr TIR,
* and mfspr DPDES.
*/
static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu)
{
u32 inst, rb, thr;
unsigned long arg;
struct kvm *kvm = vcpu->kvm;
struct kvm_vcpu *tvcpu;
if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst) != EMULATE_DONE)
return RESUME_GUEST;
if (get_op(inst) != 31)
return EMULATE_FAIL;
rb = get_rb(inst);
thr = vcpu->vcpu_id & (kvm->arch.emul_smt_mode - 1);
switch (get_xop(inst)) {
case OP_31_XOP_MSGSNDP:
arg = kvmppc_get_gpr(vcpu, rb);
if (((arg >> 27) & 0x1f) != PPC_DBELL_SERVER)
break;
arg &= 0x7f;
if (arg >= kvm->arch.emul_smt_mode)
break;
tvcpu = kvmppc_find_vcpu(kvm, vcpu->vcpu_id - thr + arg);
if (!tvcpu)
break;
if (!tvcpu->arch.doorbell_request) {
tvcpu->arch.doorbell_request = 1;
kvmppc_fast_vcpu_kick_hv(tvcpu);
}
break;
case OP_31_XOP_MSGCLRP:
arg = kvmppc_get_gpr(vcpu, rb);
if (((arg >> 27) & 0x1f) != PPC_DBELL_SERVER)
break;
vcpu->arch.vcore->dpdes = 0;
vcpu->arch.doorbell_request = 0;
break;
case OP_31_XOP_MFSPR:
switch (get_sprn(inst)) {
case SPRN_TIR:
arg = thr;
break;
case SPRN_DPDES:
arg = kvmppc_read_dpdes(vcpu);
break;
default:
return EMULATE_FAIL;
}
kvmppc_set_gpr(vcpu, get_rt(inst), arg);
break;
default:
return EMULATE_FAIL;
}
kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
return RESUME_GUEST;
}
/*
* If the lppaca had pmcregs_in_use clear when we exited the guest, then
* HFSCR_PM is cleared for next entry. If the guest then tries to access
* the PMU SPRs, we get this facility unavailable interrupt. Putting HFSCR_PM
* back in the guest HFSCR will cause the next entry to load the PMU SPRs and
* allow the guest access to continue.
*/
static int kvmppc_pmu_unavailable(struct kvm_vcpu *vcpu)
{
if (!(vcpu->arch.hfscr_permitted & HFSCR_PM))
return EMULATE_FAIL;
vcpu->arch.hfscr |= HFSCR_PM;
return RESUME_GUEST;
}
static int kvmppc_ebb_unavailable(struct kvm_vcpu *vcpu)
{
if (!(vcpu->arch.hfscr_permitted & HFSCR_EBB))
return EMULATE_FAIL;
vcpu->arch.hfscr |= HFSCR_EBB;
return RESUME_GUEST;
}
static int kvmppc_tm_unavailable(struct kvm_vcpu *vcpu)
{
if (!(vcpu->arch.hfscr_permitted & HFSCR_TM))
return EMULATE_FAIL;
vcpu->arch.hfscr |= HFSCR_TM;
return RESUME_GUEST;
}
static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
struct task_struct *tsk)
{
struct kvm_run *run = vcpu->run;
int r = RESUME_HOST;
vcpu->stat.sum_exits++;
/*
* This can happen if an interrupt occurs in the last stages
* of guest entry or the first stages of guest exit (i.e. after
* setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV
* and before setting it to KVM_GUEST_MODE_HOST_HV).
* That can happen due to a bug, or due to a machine check
* occurring at just the wrong time.
*/
if (vcpu->arch.shregs.msr & MSR_HV) {
printk(KERN_EMERG "KVM trap in HV mode!\n");
printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
vcpu->arch.trap, kvmppc_get_pc(vcpu),
vcpu->arch.shregs.msr);
kvmppc_dump_regs(vcpu);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
run->hw.hardware_exit_reason = vcpu->arch.trap;
return RESUME_HOST;
}
run->exit_reason = KVM_EXIT_UNKNOWN;
run->ready_for_interrupt_injection = 1;
switch (vcpu->arch.trap) {
/* We're good on these - the host merely wanted to get our attention */
case BOOK3S_INTERRUPT_NESTED_HV_DECREMENTER:
WARN_ON_ONCE(1); /* Should never happen */
vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER;
fallthrough;
case BOOK3S_INTERRUPT_HV_DECREMENTER:
vcpu->stat.dec_exits++;
r = RESUME_GUEST;
break;
case BOOK3S_INTERRUPT_EXTERNAL:
case BOOK3S_INTERRUPT_H_DOORBELL:
case BOOK3S_INTERRUPT_H_VIRT:
vcpu->stat.ext_intr_exits++;
r = RESUME_GUEST;
break;
/* SR/HMI/PMI are HV interrupts that host has handled. Resume guest.*/
case BOOK3S_INTERRUPT_HMI:
case BOOK3S_INTERRUPT_PERFMON:
case BOOK3S_INTERRUPT_SYSTEM_RESET:
r = RESUME_GUEST;
break;
case BOOK3S_INTERRUPT_MACHINE_CHECK: {
static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
/*
* Print the MCE event to host console. Ratelimit so the guest
* can't flood the host log.
*/
if (__ratelimit(&rs))
machine_check_print_event_info(&vcpu->arch.mce_evt,false, true);
/*
* If the guest can do FWNMI, exit to userspace so it can
* deliver a FWNMI to the guest.
* Otherwise we synthesize a machine check for the guest
* so that it knows that the machine check occurred.
*/
if (!vcpu->kvm->arch.fwnmi_enabled) {
ulong flags = vcpu->arch.shregs.msr & 0x083c0000;
kvmppc_core_queue_machine_check(vcpu, flags);
r = RESUME_GUEST;
break;
}
/* Exit to guest with KVM_EXIT_NMI as exit reason */
run->exit_reason = KVM_EXIT_NMI;
run->hw.hardware_exit_reason = vcpu->arch.trap;
/* Clear out the old NMI status from run->flags */
run->flags &= ~KVM_RUN_PPC_NMI_DISP_MASK;
/* Now set the NMI status */
if (vcpu->arch.mce_evt.disposition == MCE_DISPOSITION_RECOVERED)
run->flags |= KVM_RUN_PPC_NMI_DISP_FULLY_RECOV;
else
run->flags |= KVM_RUN_PPC_NMI_DISP_NOT_RECOV;
r = RESUME_HOST;
break;
}
case BOOK3S_INTERRUPT_PROGRAM:
{
ulong flags;
/*
* Normally program interrupts are delivered directly
* to the guest by the hardware, but we can get here
* as a result of a hypervisor emulation interrupt
* (e40) getting turned into a 700 by BML RTAS.
*/
flags = vcpu->arch.shregs.msr & 0x1f0000ull;
kvmppc_core_queue_program(vcpu, flags);
r = RESUME_GUEST;
break;
}
case BOOK3S_INTERRUPT_SYSCALL:
{
int i;
if (unlikely(vcpu->arch.shregs.msr & MSR_PR)) {
/*
* Guest userspace executed sc 1. This can only be
* reached by the P9 path because the old path
* handles this case in realmode hcall handlers.
*/
if (!kvmhv_vcpu_is_radix(vcpu)) {
/*
* A guest could be running PR KVM, so this
* may be a PR KVM hcall. It must be reflected
* to the guest kernel as a sc interrupt.
*/
kvmppc_core_queue_syscall(vcpu);
} else {
/*
* Radix guests can not run PR KVM or nested HV
* hash guests which might run PR KVM, so this
* is always a privilege fault. Send a program
* check to guest kernel.
*/
kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
}
r = RESUME_GUEST;
break;
}
/*
* hcall - gather args and set exit_reason. This will next be
* handled by kvmppc_pseries_do_hcall which may be able to deal
* with it and resume guest, or may punt to userspace.
*/
run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
for (i = 0; i < 9; ++i)
run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
run->exit_reason = KVM_EXIT_PAPR_HCALL;
vcpu->arch.hcall_needed = 1;
r = RESUME_HOST;
break;
}
/*
* We get these next two if the guest accesses a page which it thinks
* it has mapped but which is not actually present, either because
* it is for an emulated I/O device or because the corresonding
* host page has been paged out.
*
* Any other HDSI/HISI interrupts have been handled already for P7/8
* guests. For POWER9 hash guests not using rmhandlers, basic hash
* fault handling is done here.
*/
case BOOK3S_INTERRUPT_H_DATA_STORAGE: {
unsigned long vsid;
long err;
if (cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG) &&
unlikely(vcpu->arch.fault_dsisr == HDSISR_CANARY)) {
r = RESUME_GUEST; /* Just retry if it's the canary */
break;
}
if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) {
/*
* Radix doesn't require anything, and pre-ISAv3.0 hash
* already attempted to handle this in rmhandlers. The
* hash fault handling below is v3 only (it uses ASDR
* via fault_gpa).
*/
r = RESUME_PAGE_FAULT;
break;
}
if (!(vcpu->arch.fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT))) {
kvmppc_core_queue_data_storage(vcpu,
vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
r = RESUME_GUEST;
break;
}
if (!(vcpu->arch.shregs.msr & MSR_DR))
vsid = vcpu->kvm->arch.vrma_slb_v;
else
vsid = vcpu->arch.fault_gpa;
err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar,
vsid, vcpu->arch.fault_dsisr, true);
if (err == 0) {
r = RESUME_GUEST;
} else if (err == -1 || err == -2) {
r = RESUME_PAGE_FAULT;
} else {
kvmppc_core_queue_data_storage(vcpu,
vcpu->arch.fault_dar, err);
r = RESUME_GUEST;
}
break;
}
case BOOK3S_INTERRUPT_H_INST_STORAGE: {
unsigned long vsid;
long err;
vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
vcpu->arch.fault_dsisr = vcpu->arch.shregs.msr &
DSISR_SRR1_MATCH_64S;
if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) {
/*
* Radix doesn't require anything, and pre-ISAv3.0 hash
* already attempted to handle this in rmhandlers. The
* hash fault handling below is v3 only (it uses ASDR
* via fault_gpa).
*/
if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
r = RESUME_PAGE_FAULT;
break;
}
if (!(vcpu->arch.fault_dsisr & SRR1_ISI_NOPT)) {
kvmppc_core_queue_inst_storage(vcpu,
vcpu->arch.fault_dsisr);
r = RESUME_GUEST;
break;
}
if (!(vcpu->arch.shregs.msr & MSR_IR))
vsid = vcpu->kvm->arch.vrma_slb_v;
else
vsid = vcpu->arch.fault_gpa;
err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar,
vsid, vcpu->arch.fault_dsisr, false);
if (err == 0) {
r = RESUME_GUEST;
} else if (err == -1) {
r = RESUME_PAGE_FAULT;
} else {
kvmppc_core_queue_inst_storage(vcpu, err);
r = RESUME_GUEST;
}
break;
}
/*
* This occurs if the guest executes an illegal instruction.
* If the guest debug is disabled, generate a program interrupt
* to the guest. If guest debug is enabled, we need to check
* whether the instruction is a software breakpoint instruction.
* Accordingly return to Guest or Host.
*/
case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED)
vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ?
swab32(vcpu->arch.emul_inst) :
vcpu->arch.emul_inst;
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
r = kvmppc_emulate_debug_inst(vcpu);
} else {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
r = RESUME_GUEST;
}
break;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
case BOOK3S_INTERRUPT_HV_SOFTPATCH:
/*
* This occurs for various TM-related instructions that
* we need to emulate on POWER9 DD2.2. We have already
* handled the cases where the guest was in real-suspend
* mode and was transitioning to transactional state.
*/
r = kvmhv_p9_tm_emulation(vcpu);
if (r != -1)
break;
fallthrough; /* go to facility unavailable handler */
#endif
/*
* This occurs if the guest (kernel or userspace), does something that
* is prohibited by HFSCR.
* On POWER9, this could be a doorbell instruction that we need
* to emulate.
* Otherwise, we just generate a program interrupt to the guest.
*/
case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: {
u64 cause = vcpu->arch.hfscr >> 56;
r = EMULATE_FAIL;
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
if (cause == FSCR_MSGP_LG)
r = kvmppc_emulate_doorbell_instr(vcpu);
if (cause == FSCR_PM_LG)
r = kvmppc_pmu_unavailable(vcpu);
if (cause == FSCR_EBB_LG)
r = kvmppc_ebb_unavailable(vcpu);
if (cause == FSCR_TM_LG)
r = kvmppc_tm_unavailable(vcpu);
}
if (r == EMULATE_FAIL) {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
r = RESUME_GUEST;
}
break;
}
case BOOK3S_INTERRUPT_HV_RM_HARD:
r = RESUME_PASSTHROUGH;
break;
default:
kvmppc_dump_regs(vcpu);
printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
vcpu->arch.trap, kvmppc_get_pc(vcpu),
vcpu->arch.shregs.msr);
run->hw.hardware_exit_reason = vcpu->arch.trap;
r = RESUME_HOST;
break;
}
return r;
}
static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
{
int r;
int srcu_idx;
vcpu->stat.sum_exits++;
/*
* This can happen if an interrupt occurs in the last stages
* of guest entry or the first stages of guest exit (i.e. after
* setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV
* and before setting it to KVM_GUEST_MODE_HOST_HV).
* That can happen due to a bug, or due to a machine check
* occurring at just the wrong time.
*/
if (vcpu->arch.shregs.msr & MSR_HV) {
pr_emerg("KVM trap in HV mode while nested!\n");
pr_emerg("trap=0x%x | pc=0x%lx | msr=0x%llx\n",
vcpu->arch.trap, kvmppc_get_pc(vcpu),
vcpu->arch.shregs.msr);
kvmppc_dump_regs(vcpu);
return RESUME_HOST;
}
switch (vcpu->arch.trap) {
/* We're good on these - the host merely wanted to get our attention */
case BOOK3S_INTERRUPT_HV_DECREMENTER:
vcpu->stat.dec_exits++;
r = RESUME_GUEST;
break;
case BOOK3S_INTERRUPT_EXTERNAL:
vcpu->stat.ext_intr_exits++;
r = RESUME_HOST;
break;
case BOOK3S_INTERRUPT_H_DOORBELL:
case BOOK3S_INTERRUPT_H_VIRT:
vcpu->stat.ext_intr_exits++;
r = RESUME_GUEST;
break;
/* These need to go to the nested HV */
case BOOK3S_INTERRUPT_NESTED_HV_DECREMENTER:
vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER;
vcpu->stat.dec_exits++;
r = RESUME_HOST;
break;
/* SR/HMI/PMI are HV interrupts that host has handled. Resume guest.*/
case BOOK3S_INTERRUPT_HMI:
case BOOK3S_INTERRUPT_PERFMON:
case BOOK3S_INTERRUPT_SYSTEM_RESET:
r = RESUME_GUEST;
break;
case BOOK3S_INTERRUPT_MACHINE_CHECK:
{
static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
/* Pass the machine check to the L1 guest */
r = RESUME_HOST;
/* Print the MCE event to host console. */
if (__ratelimit(&rs))
machine_check_print_event_info(&vcpu->arch.mce_evt, false, true);
break;
}
/*
* We get these next two if the guest accesses a page which it thinks
* it has mapped but which is not actually present, either because
* it is for an emulated I/O device or because the corresonding
* host page has been paged out.
*/
case BOOK3S_INTERRUPT_H_DATA_STORAGE:
srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
r = kvmhv_nested_page_fault(vcpu);
srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
break;
case BOOK3S_INTERRUPT_H_INST_STORAGE:
vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
vcpu->arch.fault_dsisr = kvmppc_get_msr(vcpu) &
DSISR_SRR1_MATCH_64S;
if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
r = kvmhv_nested_page_fault(vcpu);
srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
break;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
case BOOK3S_INTERRUPT_HV_SOFTPATCH:
/*
* This occurs for various TM-related instructions that
* we need to emulate on POWER9 DD2.2. We have already
* handled the cases where the guest was in real-suspend
* mode and was transitioning to transactional state.
*/
r = kvmhv_p9_tm_emulation(vcpu);
if (r != -1)
break;
fallthrough; /* go to facility unavailable handler */
#endif
case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: {
u64 cause = vcpu->arch.hfscr >> 56;
/*
* Only pass HFU interrupts to the L1 if the facility is
* permitted but disabled by the L1's HFSCR, otherwise
* the interrupt does not make sense to the L1 so turn
* it into a HEAI.
*/
if (!(vcpu->arch.hfscr_permitted & (1UL << cause)) ||
(vcpu->arch.nested_hfscr & (1UL << cause))) {
vcpu->arch.trap = BOOK3S_INTERRUPT_H_EMUL_ASSIST;
/*
* If the fetch failed, return to guest and
* try executing it again.
*/
r = kvmppc_get_last_inst(vcpu, INST_GENERIC,
&vcpu->arch.emul_inst);
if (r != EMULATE_DONE)
r = RESUME_GUEST;
else
r = RESUME_HOST;
} else {
r = RESUME_HOST;
}
break;
}
case BOOK3S_INTERRUPT_HV_RM_HARD:
vcpu->arch.trap = 0;
r = RESUME_GUEST;
if (!xics_on_xive())
kvmppc_xics_rm_complete(vcpu, 0);
break;
case BOOK3S_INTERRUPT_SYSCALL:
{
unsigned long req = kvmppc_get_gpr(vcpu, 3);
/*
* The H_RPT_INVALIDATE hcalls issued by nested
* guests for process-scoped invalidations when
* GTSE=0, are handled here in L0.
*/
if (req == H_RPT_INVALIDATE) {
r = kvmppc_nested_h_rpt_invalidate(vcpu);
break;
}
r = RESUME_HOST;
break;
}
default:
r = RESUME_HOST;
break;
}
return r;
}
static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
int i;
memset(sregs, 0, sizeof(struct kvm_sregs));
sregs->pvr = vcpu->arch.pvr;
for (i = 0; i < vcpu->arch.slb_max; i++) {
sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige;
sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
}
return 0;
}
static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
int i, j;
/* Only accept the same PVR as the host's, since we can't spoof it */
if (sregs->pvr != vcpu->arch.pvr)
return -EINVAL;
j = 0;
for (i = 0; i < vcpu->arch.slb_nr; i++) {
if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) {
vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe;
vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv;
++j;
}
}
vcpu->arch.slb_max = j;
return 0;
}
/*
* Enforce limits on guest LPCR values based on hardware availability,
* guest configuration, and possibly hypervisor support and security
* concerns.
*/
unsigned long kvmppc_filter_lpcr_hv(struct kvm *kvm, unsigned long lpcr)
{
/* LPCR_TC only applies to HPT guests */
if (kvm_is_radix(kvm))
lpcr &= ~LPCR_TC;
/* On POWER8 and above, userspace can modify AIL */
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
lpcr &= ~LPCR_AIL;
if ((lpcr & LPCR_AIL) != LPCR_AIL_3)
lpcr &= ~LPCR_AIL; /* LPCR[AIL]=1/2 is disallowed */
/*
* On some POWER9s we force AIL off for radix guests to prevent
* executing in MSR[HV]=1 mode with the MMU enabled and PIDR set to
* guest, which can result in Q0 translations with LPID=0 PID=PIDR to
* be cached, which the host TLB management does not expect.
*/
if (kvm_is_radix(kvm) && cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
lpcr &= ~LPCR_AIL;
/*
* On POWER9, allow userspace to enable large decrementer for the
* guest, whether or not the host has it enabled.
*/
if (!cpu_has_feature(CPU_FTR_ARCH_300))
lpcr &= ~LPCR_LD;
return lpcr;
}
static void verify_lpcr(struct kvm *kvm, unsigned long lpcr)
{
if (lpcr != kvmppc_filter_lpcr_hv(kvm, lpcr)) {
WARN_ONCE(1, "lpcr 0x%lx differs from filtered 0x%lx\n",
lpcr, kvmppc_filter_lpcr_hv(kvm, lpcr));
}
}
static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
bool preserve_top32)
{
struct kvm *kvm = vcpu->kvm;
struct kvmppc_vcore *vc = vcpu->arch.vcore;
u64 mask;
spin_lock(&vc->lock);
/*
* Userspace can only modify
* DPFD (default prefetch depth), ILE (interrupt little-endian),
* TC (translation control), AIL (alternate interrupt location),
* LD (large decrementer).
* These are subject to restrictions from kvmppc_filter_lcpr_hv().
*/
mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD;
/* Broken 32-bit version of LPCR must not clear top bits */
if (preserve_top32)
mask &= 0xFFFFFFFF;
new_lpcr = kvmppc_filter_lpcr_hv(kvm,
(vc->lpcr & ~mask) | (new_lpcr & mask));
/*
* If ILE (interrupt little-endian) has changed, update the
* MSR_LE bit in the intr_msr for each vcpu in this vcore.
*/
if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
struct kvm_vcpu *vcpu;
unsigned long i;
kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu->arch.vcore != vc)
continue;
if (new_lpcr & LPCR_ILE)
vcpu->arch.intr_msr |= MSR_LE;
else
vcpu->arch.intr_msr &= ~MSR_LE;
}
}
vc->lpcr = new_lpcr;
spin_unlock(&vc->lock);
}
static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
union kvmppc_one_reg *val)
{
int r = 0;
long int i;
switch (id) {
case KVM_REG_PPC_DEBUG_INST:
*val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
break;
case KVM_REG_PPC_HIOR:
*val = get_reg_val(id, 0);
break;
case KVM_REG_PPC_DABR:
*val = get_reg_val(id, vcpu->arch.dabr);
break;
case KVM_REG_PPC_DABRX:
*val = get_reg_val(id, vcpu->arch.dabrx);
break;
case KVM_REG_PPC_DSCR:
*val = get_reg_val(id, vcpu->arch.dscr);
break;
case KVM_REG_PPC_PURR:
*val = get_reg_val(id, vcpu->arch.purr);
break;
case KVM_REG_PPC_SPURR:
*val = get_reg_val(id, vcpu->arch.spurr);
break;
case KVM_REG_PPC_AMR:
*val = get_reg_val(id, vcpu->arch.amr);
break;
case KVM_REG_PPC_UAMOR:
*val = get_reg_val(id, vcpu->arch.uamor);
break;
case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1:
i = id - KVM_REG_PPC_MMCR0;
*val = get_reg_val(id, vcpu->arch.mmcr[i]);
break;
case KVM_REG_PPC_MMCR2:
*val = get_reg_val(id, vcpu->arch.mmcr[2]);
break;
case KVM_REG_PPC_MMCRA:
*val = get_reg_val(id, vcpu->arch.mmcra);
break;
case KVM_REG_PPC_MMCRS:
*val = get_reg_val(id, vcpu->arch.mmcrs);
break;
case KVM_REG_PPC_MMCR3:
*val = get_reg_val(id, vcpu->arch.mmcr[3]);
break;
case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
i = id - KVM_REG_PPC_PMC1;
*val = get_reg_val(id, vcpu->arch.pmc[i]);
break;
case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
i = id - KVM_REG_PPC_SPMC1;
*val = get_reg_val(id, vcpu->arch.spmc[i]);
break;
case KVM_REG_PPC_SIAR:
*val = get_reg_val(id, vcpu->arch.siar);
break;
case KVM_REG_PPC_SDAR:
*val = get_reg_val(id, vcpu->arch.sdar);
break;
case KVM_REG_PPC_SIER:
*val = get_reg_val(id, vcpu->arch.sier[0]);
break;
case KVM_REG_PPC_SIER2:
*val = get_reg_val(id, vcpu->arch.sier[1]);
break;
case KVM_REG_PPC_SIER3:
*val = get_reg_val(id, vcpu->arch.sier[2]);
break;
case KVM_REG_PPC_IAMR:
*val = get_reg_val(id, vcpu->arch.iamr);
break;
case KVM_REG_PPC_PSPB:
*val = get_reg_val(id, vcpu->arch.pspb);
break;
case KVM_REG_PPC_DPDES:
/*
* On POWER9, where we are emulating msgsndp etc.,
* we return 1 bit for each vcpu, which can come from
* either vcore->dpdes or doorbell_request.
* On POWER8, doorbell_request is 0.
*/
if (cpu_has_feature(CPU_FTR_ARCH_300))
*val = get_reg_val(id, vcpu->arch.doorbell_request);
else
*val = get_reg_val(id, vcpu->arch.vcore->dpdes);
break;
case KVM_REG_PPC_VTB:
*val = get_reg_val(id, vcpu->arch.vcore->vtb);
break;
case KVM_REG_PPC_DAWR:
*val = get_reg_val(id, vcpu->arch.dawr0);
break;
case KVM_REG_PPC_DAWRX:
*val = get_reg_val(id, vcpu->arch.dawrx0);
break;
case KVM_REG_PPC_DAWR1:
*val = get_reg_val(id, vcpu->arch.dawr1);
break;
case KVM_REG_PPC_DAWRX1:
*val = get_reg_val(id, vcpu->arch.dawrx1);
break;
case KVM_REG_PPC_CIABR:
*val = get_reg_val(id, vcpu->arch.ciabr);
break;
case KVM_REG_PPC_CSIGR:
*val = get_reg_val(id, vcpu->arch.csigr);
break;
case KVM_REG_PPC_TACR:
*val = get_reg_val(id, vcpu->arch.tacr);
break;
case KVM_REG_PPC_TCSCR:
*val = get_reg_val(id, vcpu->arch.tcscr);
break;
case KVM_REG_PPC_PID:
*val = get_reg_val(id, vcpu->arch.pid);
break;
case KVM_REG_PPC_ACOP:
*val = get_reg_val(id, vcpu->arch.acop);
break;
case KVM_REG_PPC_WORT:
*val = get_reg_val(id, vcpu->arch.wort);
break;
case KVM_REG_PPC_TIDR:
*val = get_reg_val(id, vcpu->arch.tid);
break;
case KVM_REG_PPC_PSSCR:
*val = get_reg_val(id, vcpu->arch.psscr);
break;
case KVM_REG_PPC_VPA_ADDR:
spin_lock(&vcpu->arch.vpa_update_lock);
*val = get_reg_val(id, vcpu->arch.vpa.next_gpa);
spin_unlock(&vcpu->arch.vpa_update_lock);
break;
case KVM_REG_PPC_VPA_SLB:
spin_lock(&vcpu->arch.vpa_update_lock);
val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa;
val->vpaval.length = vcpu->arch.slb_shadow.len;
spin_unlock(&vcpu->arch.vpa_update_lock);
break;
case KVM_REG_PPC_VPA_DTL:
spin_lock(&vcpu->arch.vpa_update_lock);
val->vpaval.addr = vcpu->arch.dtl.next_gpa;
val->vpaval.length = vcpu->arch.dtl.len;
spin_unlock(&vcpu->arch.vpa_update_lock);
break;
case KVM_REG_PPC_TB_OFFSET:
*val = get_reg_val(id, vcpu->arch.vcore->tb_offset);
break;
case KVM_REG_PPC_LPCR:
case KVM_REG_PPC_LPCR_64:
*val = get_reg_val(id, vcpu->arch.vcore->lpcr);
break;
case KVM_REG_PPC_PPR:
*val = get_reg_val(id, vcpu->arch.ppr);
break;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
case KVM_REG_PPC_TFHAR:
*val = get_reg_val(id, vcpu->arch.tfhar);
break;
case KVM_REG_PPC_TFIAR:
*val = get_reg_val(id, vcpu->arch.tfiar);
break;
case KVM_REG_PPC_TEXASR:
*val = get_reg_val(id, vcpu->arch.texasr);
break;
case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
i = id - KVM_REG_PPC_TM_GPR0;
*val = get_reg_val(id, vcpu->arch.gpr_tm[i]);
break;
case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
{
int j;
i = id - KVM_REG_PPC_TM_VSR0;
if (i < 32)
for (j = 0; j < TS_FPRWIDTH; j++)
val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j];
else {
if (cpu_has_feature(CPU_FTR_ALTIVEC))
val->vval = vcpu->arch.vr_tm.vr[i-32];
else
r = -ENXIO;
}
break;
}
case KVM_REG_PPC_TM_CR:
*val = get_reg_val(id, vcpu->arch.cr_tm);
break;
case KVM_REG_PPC_TM_XER:
*val = get_reg_val(id, vcpu->arch.xer_tm);
break;
case KVM_REG_PPC_TM_LR:
*val = get_reg_val(id, vcpu->arch.lr_tm);
break;
case KVM_REG_PPC_TM_CTR:
*val = get_reg_val(id, vcpu->arch.ctr_tm);
break;
case KVM_REG_PPC_TM_FPSCR:
*val = get_reg_val(id, vcpu->arch.fp_tm.fpscr);
break;
case KVM_REG_PPC_TM_AMR:
*val = get_reg_val(id, vcpu->arch.amr_tm);
break;
case KVM_REG_PPC_TM_PPR:
*val = get_reg_val(id, vcpu->arch.ppr_tm);
break;
case KVM_REG_PPC_TM_VRSAVE:
*val = get_reg_val(id, vcpu->arch.vrsave_tm);
break;
case KVM_REG_PPC_TM_VSCR:
if (cpu_has_feature(CPU_FTR_ALTIVEC))
*val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]);
else
r = -ENXIO;
break;
case KVM_REG_PPC_TM_DSCR:
*val = get_reg_val(id, vcpu->arch.dscr_tm);
break;
case KVM_REG_PPC_TM_TAR:
*val = get_reg_val(id, vcpu->arch.tar_tm);
break;
#endif
case KVM_REG_PPC_ARCH_COMPAT:
*val = get_reg_val(id, vcpu->arch.vcore->arch_compat);
break;
case KVM_REG_PPC_DEC_EXPIRY:
*val = get_reg_val(id, vcpu->arch.dec_expires);
break;
case KVM_REG_PPC_ONLINE:
*val = get_reg_val(id, vcpu->arch.online);
break;
case KVM_REG_PPC_PTCR:
*val = get_reg_val(id, vcpu->kvm->arch.l1_ptcr);
break;
default:
r = -EINVAL;
break;
}
return r;
}
static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
union kvmppc_one_reg *val)
{
int r = 0;
long int i;
unsigned long addr, len;
switch (id) {
case KVM_REG_PPC_HIOR:
/* Only allow this to be set to zero */
if (set_reg_val(id, *val))
r = -EINVAL;
break;
case KVM_REG_PPC_DABR:
vcpu->arch.dabr = set_reg_val(id, *val);
break;
case KVM_REG_PPC_DABRX:
vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP;
break;
case KVM_REG_PPC_DSCR:
vcpu->arch.dscr = set_reg_val(id, *val);
break;
case KVM_REG_PPC_PURR:
vcpu->arch.purr = set_reg_val(id, *val);
break;
case KVM_REG_PPC_SPURR:
vcpu->arch.spurr = set_reg_val(id, *val);
break;
case KVM_REG_PPC_AMR:
vcpu->arch.amr = set_reg_val(id, *val);
break;
case KVM_REG_PPC_UAMOR:
vcpu->arch.uamor = set_reg_val(id, *val);
break;
case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1:
i = id - KVM_REG_PPC_MMCR0;
vcpu->arch.mmcr[i] = set_reg_val(id, *val);
break;
case KVM_REG_PPC_MMCR2:
vcpu->arch.mmcr[2] = set_reg_val(id, *val);
break;
case KVM_REG_PPC_MMCRA:
vcpu->arch.mmcra = set_reg_val(id, *val);
break;
case KVM_REG_PPC_MMCRS:
vcpu->arch.mmcrs = set_reg_val(id, *val);
break;
case KVM_REG_PPC_MMCR3:
*val = get_reg_val(id, vcpu->arch.mmcr[3]);
break;
case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
i = id - KVM_REG_PPC_PMC1;
vcpu->arch.pmc[i] = set_reg_val(id, *val);
break;
case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
i = id - KVM_REG_PPC_SPMC1;
vcpu->arch.spmc[i] = set_reg_val(id, *val);
break;
case KVM_REG_PPC_SIAR:
vcpu->arch.siar = set_reg_val(id, *val);
break;
case KVM_REG_PPC_SDAR:
vcpu->arch.sdar = set_reg_val(id, *val);
break;
case KVM_REG_PPC_SIER:
vcpu->arch.sier[0] = set_reg_val(id, *val);
break;
case KVM_REG_PPC_SIER2:
vcpu->arch.sier[1] = set_reg_val(id, *val);
break;
case KVM_REG_PPC_SIER3:
vcpu->arch.sier[2] = set_reg_val(id, *val);
break;
case KVM_REG_PPC_IAMR:
vcpu->arch.iamr = set_reg_val(id, *val);
break;
case KVM_REG_PPC_PSPB:
vcpu->arch.pspb = set_reg_val(id, *val);
break;
case KVM_REG_PPC_DPDES:
if (cpu_has_feature(CPU_FTR_ARCH_300))
vcpu->arch.doorbell_request = set_reg_val(id, *val) & 1;
else
vcpu->arch.vcore->dpdes = set_reg_val(id, *val);
break;
case KVM_REG_PPC_VTB:
vcpu->arch.vcore->vtb = set_reg_val(id, *val);
break;
case KVM_REG_PPC_DAWR:
vcpu->arch.dawr0 = set_reg_val(id, *val);
break;
case KVM_REG_PPC_DAWRX:
vcpu->arch.dawrx0 = set_reg_val(id, *val) & ~DAWRX_HYP;
break;
case KVM_REG_PPC_DAWR1:
vcpu->arch.dawr1 = set_reg_val(id, *val);
break;
case KVM_REG_PPC_DAWRX1:
vcpu->arch.dawrx1 = set_reg_val(id, *val) & ~DAWRX_HYP;
break;
case KVM_REG_PPC_CIABR:
vcpu->arch.ciabr = set_reg_val(id, *val);
/* Don't allow setting breakpoints in hypervisor code */
if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */
break;
case KVM_REG_PPC_CSIGR:
vcpu->arch.csigr = set_reg_val(id, *val);
break;
case KVM_REG_PPC_TACR:
vcpu->arch.tacr = set_reg_val(id, *val);
break;
case KVM_REG_PPC_TCSCR:
vcpu->arch.tcscr = set_reg_val(id, *val);
break;
case KVM_REG_PPC_PID:
vcpu->arch.pid = set_reg_val(id, *val);
break;
case KVM_REG_PPC_ACOP:
vcpu->arch.acop = set_reg_val(id, *val);
break;
case KVM_REG_PPC_WORT:
vcpu->arch.wort = set_reg_val(id, *val);
break;
case KVM_REG_PPC_TIDR:
vcpu->arch.tid = set_reg_val(id, *val);
break;
case KVM_REG_PPC_PSSCR:
vcpu->arch.psscr = set_reg_val(id, *val) & PSSCR_GUEST_VIS;
break;
case KVM_REG_PPC_VPA_ADDR:
addr = set_reg_val(id, *val);
r = -EINVAL;
if (!addr && (vcpu->arch.slb_shadow.next_gpa ||
vcpu->arch.dtl.next_gpa))
break;
r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca));
break;
case KVM_REG_PPC_VPA_SLB:
addr = val->vpaval.addr;
len = val->vpaval.length;
r = -EINVAL;
if (addr && !vcpu->arch.vpa.next_gpa)
break;
r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len);
break;
case KVM_REG_PPC_VPA_DTL:
addr = val->vpaval.addr;
len = val->vpaval.length;
r = -EINVAL;
if (addr && (len < sizeof(struct dtl_entry) ||
!vcpu->arch.vpa.next_gpa))
break;
len -= len % sizeof(struct dtl_entry);
r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
break;
case KVM_REG_PPC_TB_OFFSET:
/* round up to multiple of 2^24 */
vcpu->arch.vcore->tb_offset =
ALIGN(set_reg_val(id, *val), 1UL << 24);
break;
case KVM_REG_PPC_LPCR:
kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), true);
break;
case KVM_REG_PPC_LPCR_64:
kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), false);
break;
case KVM_REG_PPC_PPR:
vcpu->arch.ppr = set_reg_val(id, *val);
break;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
case KVM_REG_PPC_TFHAR:
vcpu->arch.tfhar = set_reg_val(id, *val);
break;
case KVM_REG_PPC_TFIAR:
vcpu->arch.tfiar = set_reg_val(id, *val);
break;
case KVM_REG_PPC_TEXASR:
vcpu->arch.texasr = set_reg_val(id, *val);
break;
case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
i = id - KVM_REG_PPC_TM_GPR0;
vcpu->arch.gpr_tm[i] = set_reg_val(id, *val);
break;
case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
{
int j;
i = id - KVM_REG_PPC_TM_VSR0;
if (i < 32)
for (j = 0; j < TS_FPRWIDTH; j++)
vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j];
else
if (cpu_has_feature(CPU_FTR_ALTIVEC))
vcpu->arch.vr_tm.vr[i-32] = val->vval;
else
r = -ENXIO;
break;
}
case KVM_REG_PPC_TM_CR:
vcpu->arch.cr_tm = set_reg_val(id, *val);
break;
case KVM_REG_PPC_TM_XER:
vcpu->arch.xer_tm = set_reg_val(id, *val);
break;
case KVM_REG_PPC_TM_LR:
vcpu->arch.lr_tm = set_reg_val(id, *val);
break;
case KVM_REG_PPC_TM_CTR:
vcpu->arch.ctr_tm = set_reg_val(id, *val);
break;
case KVM_REG_PPC_TM_FPSCR:
vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val);
break;
case KVM_REG_PPC_TM_AMR:
vcpu->arch.amr_tm = set_reg_val(id, *val);
break;
case KVM_REG_PPC_TM_PPR:
vcpu->arch.ppr_tm = set_reg_val(id, *val);
break;
case KVM_REG_PPC_TM_VRSAVE:
vcpu->arch.vrsave_tm = set_reg_val(id, *val);
break;
case KVM_REG_PPC_TM_VSCR:
if (cpu_has_feature(CPU_FTR_ALTIVEC))
vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val);
else
r = - ENXIO;
break;
case KVM_REG_PPC_TM_DSCR:
vcpu->arch.dscr_tm = set_reg_val(id, *val);
break;
case KVM_REG_PPC_TM_TAR:
vcpu->arch.tar_tm = set_reg_val(id, *val);
break;
#endif
case KVM_REG_PPC_ARCH_COMPAT:
r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_DEC_EXPIRY:
vcpu->arch.dec_expires = set_reg_val(id, *val);
break;
case KVM_REG_PPC_ONLINE:
i = set_reg_val(id, *val);
if (i && !vcpu->arch.online)
atomic_inc(&vcpu->arch.vcore->online_count);
else if (!i && vcpu->arch.online)
atomic_dec(&vcpu->arch.vcore->online_count);
vcpu->arch.online = i;
break;
case KVM_REG_PPC_PTCR:
vcpu->kvm->arch.l1_ptcr = set_reg_val(id, *val);
break;
default:
r = -EINVAL;
break;
}
return r;
}
/*
* On POWER9, threads are independent and can be in different partitions.
* Therefore we consider each thread to be a subcore.
* There is a restriction that all threads have to be in the same
* MMU mode (radix or HPT), unfortunately, but since we only support
* HPT guests on a HPT host so far, that isn't an impediment yet.
*/
static int threads_per_vcore(struct kvm *kvm)
{
if (cpu_has_feature(CPU_FTR_ARCH_300))
return 1;
return threads_per_subcore;
}
static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int id)
{
struct kvmppc_vcore *vcore;
vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
if (vcore == NULL)
return NULL;
spin_lock_init(&vcore->lock);
spin_lock_init(&vcore->stoltb_lock);
rcuwait_init(&vcore->wait);
vcore->preempt_tb = TB_NIL;
vcore->lpcr = kvm->arch.lpcr;
vcore->first_vcpuid = id;
vcore->kvm = kvm;
INIT_LIST_HEAD(&vcore->preempt_list);
return vcore;
}
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
static struct debugfs_timings_element {
const char *name;
size_t offset;
} timings[] = {
#ifdef CONFIG_KVM_BOOK3S_HV_P9_TIMING
{"vcpu_entry", offsetof(struct kvm_vcpu, arch.vcpu_entry)},
{"guest_entry", offsetof(struct kvm_vcpu, arch.guest_entry)},
{"in_guest", offsetof(struct kvm_vcpu, arch.in_guest)},
{"guest_exit", offsetof(struct kvm_vcpu, arch.guest_exit)},
{"vcpu_exit", offsetof(struct kvm_vcpu, arch.vcpu_exit)},
{"hypercall", offsetof(struct kvm_vcpu, arch.hcall)},
{"page_fault", offsetof(struct kvm_vcpu, arch.pg_fault)},
#else
{"rm_entry", offsetof(struct kvm_vcpu, arch.rm_entry)},
{"rm_intr", offsetof(struct kvm_vcpu, arch.rm_intr)},
{"rm_exit", offsetof(struct kvm_vcpu, arch.rm_exit)},
{"guest", offsetof(struct kvm_vcpu, arch.guest_time)},
{"cede", offsetof(struct kvm_vcpu, arch.cede_time)},
#endif
};
#define N_TIMINGS (ARRAY_SIZE(timings))
struct debugfs_timings_state {
struct kvm_vcpu *vcpu;
unsigned int buflen;
char buf[N_TIMINGS * 100];
};
static int debugfs_timings_open(struct inode *inode, struct file *file)
{
struct kvm_vcpu *vcpu = inode->i_private;
struct debugfs_timings_state *p;
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return -ENOMEM;
kvm_get_kvm(vcpu->kvm);
p->vcpu = vcpu;
file->private_data = p;
return nonseekable_open(inode, file);
}
static int debugfs_timings_release(struct inode *inode, struct file *file)
{
struct debugfs_timings_state *p = file->private_data;
kvm_put_kvm(p->vcpu->kvm);
kfree(p);
return 0;
}
static ssize_t debugfs_timings_read(struct file *file, char __user *buf,
size_t len, loff_t *ppos)
{
struct debugfs_timings_state *p = file->private_data;
struct kvm_vcpu *vcpu = p->vcpu;
char *s, *buf_end;
struct kvmhv_tb_accumulator tb;
u64 count;
loff_t pos;
ssize_t n;
int i, loops;
bool ok;
if (!p->buflen) {
s = p->buf;
buf_end = s + sizeof(p->buf);
for (i = 0; i < N_TIMINGS; ++i) {
struct kvmhv_tb_accumulator *acc;
acc = (struct kvmhv_tb_accumulator *)
((unsigned long)vcpu + timings[i].offset);
ok = false;
for (loops = 0; loops < 1000; ++loops) {
count = acc->seqcount;
if (!(count & 1)) {
smp_rmb();
tb = *acc;
smp_rmb();
if (count == acc->seqcount) {
ok = true;
break;
}
}
udelay(1);
}
if (!ok)
snprintf(s, buf_end - s, "%s: stuck\n",
timings[i].name);
else
snprintf(s, buf_end - s,
"%s: %llu %llu %llu %llu\n",
timings[i].name, count / 2,
tb_to_ns(tb.tb_total),
tb_to_ns(tb.tb_min),
tb_to_ns(tb.tb_max));
s += strlen(s);
}
p->buflen = s - p->buf;
}
pos = *ppos;
if (pos >= p->buflen)
return 0;
if (len > p->buflen - pos)
len = p->buflen - pos;
n = copy_to_user(buf, p->buf + pos, len);
if (n) {
if (n == len)
return -EFAULT;
len -= n;
}
*ppos = pos + len;
return len;
}
static ssize_t debugfs_timings_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos)
{
return -EACCES;
}
static const struct file_operations debugfs_timings_ops = {
.owner = THIS_MODULE,
.open = debugfs_timings_open,
.release = debugfs_timings_release,
.read = debugfs_timings_read,
.write = debugfs_timings_write,
.llseek = generic_file_llseek,
};
/* Create a debugfs directory for the vcpu */
static int kvmppc_arch_create_vcpu_debugfs_hv(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry)
{
if (cpu_has_feature(CPU_FTR_ARCH_300) == IS_ENABLED(CONFIG_KVM_BOOK3S_HV_P9_TIMING))
debugfs_create_file("timings", 0444, debugfs_dentry, vcpu,
&debugfs_timings_ops);
return 0;
}
#else /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
static int kvmppc_arch_create_vcpu_debugfs_hv(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry)
{
return 0;
}
#endif /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
{
int err;
int core;
struct kvmppc_vcore *vcore;
struct kvm *kvm;
unsigned int id;
kvm = vcpu->kvm;
id = vcpu->vcpu_id;
vcpu->arch.shared = &vcpu->arch.shregs;
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
/*
* The shared struct is never shared on HV,
* so we can always use host endianness
*/
#ifdef __BIG_ENDIAN__
vcpu->arch.shared_big_endian = true;
#else
vcpu->arch.shared_big_endian = false;
#endif
#endif
vcpu->arch.mmcr[0] = MMCR0_FC;
if (cpu_has_feature(CPU_FTR_ARCH_31)) {
vcpu->arch.mmcr[0] |= MMCR0_PMCCEXT;
vcpu->arch.mmcra = MMCRA_BHRB_DISABLE;
}
vcpu->arch.ctrl = CTRL_RUNLATCH;
/* default to host PVR, since we can't spoof it */
kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR));
spin_lock_init(&vcpu->arch.vpa_update_lock);
spin_lock_init(&vcpu->arch.tbacct_lock);
vcpu->arch.busy_preempt = TB_NIL;
vcpu->arch.shregs.msr = MSR_ME;
vcpu->arch.intr_msr = MSR_SF | MSR_ME;
/*
* Set the default HFSCR for the guest from the host value.
* This value is only used on POWER9.
* On POWER9, we want to virtualize the doorbell facility, so we
* don't set the HFSCR_MSGP bit, and that causes those instructions
* to trap and then we emulate them.
*/
vcpu->arch.hfscr = HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB |
HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP;
if (cpu_has_feature(CPU_FTR_HVMODE)) {
vcpu->arch.hfscr &= mfspr(SPRN_HFSCR);
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
vcpu->arch.hfscr |= HFSCR_TM;
#endif
}
if (cpu_has_feature(CPU_FTR_TM_COMP))
vcpu->arch.hfscr |= HFSCR_TM;
vcpu->arch.hfscr_permitted = vcpu->arch.hfscr;
/*
* PM, EBB, TM are demand-faulted so start with it clear.
*/
vcpu->arch.hfscr &= ~(HFSCR_PM | HFSCR_EBB | HFSCR_TM);
kvmppc_mmu_book3s_hv_init(vcpu);
vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
init_waitqueue_head(&vcpu->arch.cpu_run);
mutex_lock(&kvm->lock);
vcore = NULL;
err = -EINVAL;
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
if (id >= (KVM_MAX_VCPUS * kvm->arch.emul_smt_mode)) {
pr_devel("KVM: VCPU ID too high\n");
core = KVM_MAX_VCORES;
} else {
BUG_ON(kvm->arch.smt_mode != 1);
core = kvmppc_pack_vcpu_id(kvm, id);
}
} else {
core = id / kvm->arch.smt_mode;
}
if (core < KVM_MAX_VCORES) {
vcore = kvm->arch.vcores[core];
if (vcore && cpu_has_feature(CPU_FTR_ARCH_300)) {
pr_devel("KVM: collision on id %u", id);
vcore = NULL;
} else if (!vcore) {
/*
* Take mmu_setup_lock for mutual exclusion
* with kvmppc_update_lpcr().
*/
err = -ENOMEM;
vcore = kvmppc_vcore_create(kvm,
id & ~(kvm->arch.smt_mode - 1));
mutex_lock(&kvm->arch.mmu_setup_lock);
kvm->arch.vcores[core] = vcore;
kvm->arch.online_vcores++;
mutex_unlock(&kvm->arch.mmu_setup_lock);
}
}
mutex_unlock(&kvm->lock);
if (!vcore)
return err;
spin_lock(&vcore->lock);
++vcore->num_threads;
spin_unlock(&vcore->lock);
vcpu->arch.vcore = vcore;
vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid;
vcpu->arch.thread_cpu = -1;
vcpu->arch.prev_cpu = -1;
vcpu->arch.cpu_type = KVM_CPU_3S_64;
kvmppc_sanity_check(vcpu);
return 0;
}
static int kvmhv_set_smt_mode(struct kvm *kvm, unsigned long smt_mode,
unsigned long flags)
{
int err;
int esmt = 0;
if (flags)
return -EINVAL;
if (smt_mode > MAX_SMT_THREADS || !is_power_of_2(smt_mode))
return -EINVAL;
if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
/*
* On POWER8 (or POWER7), the threading mode is "strict",
* so we pack smt_mode vcpus per vcore.
*/
if (smt_mode > threads_per_subcore)
return -EINVAL;
} else {
/*
* On POWER9, the threading mode is "loose",
* so each vcpu gets its own vcore.
*/
esmt = smt_mode;
smt_mode = 1;
}
mutex_lock(&kvm->lock);
err = -EBUSY;
if (!kvm->arch.online_vcores) {
kvm->arch.smt_mode = smt_mode;
kvm->arch.emul_smt_mode = esmt;
err = 0;
}
mutex_unlock(&kvm->lock);
return err;
}
static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa)
{
if (vpa->pinned_addr)
kvmppc_unpin_guest_page(kvm, vpa->pinned_addr, vpa->gpa,
vpa->dirty);
}
static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu)
{
spin_lock(&vcpu->arch.vpa_update_lock);
unpin_vpa(vcpu->kvm, &vcpu->arch.dtl);
unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow);
unpin_vpa(vcpu->kvm, &vcpu->arch.vpa);
spin_unlock(&vcpu->arch.vpa_update_lock);
}
static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu)
{
/* Indicate we want to get back into the guest */
return 1;
}
static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
{
unsigned long dec_nsec, now;
now = get_tb();
if (now > kvmppc_dec_expires_host_tb(vcpu)) {
/* decrementer has already gone negative */
kvmppc_core_queue_dec(vcpu);
kvmppc_core_prepare_to_enter(vcpu);
return;