|  | // SPDX-License-Identifier: GPL-2.0-only | 
|  | /* | 
|  | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | 
|  | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | 
|  | */ | 
|  |  | 
|  | #include <linux/mm.h> | 
|  | #include <linux/kvm_host.h> | 
|  | #include <asm/kvm_arm.h> | 
|  | #include <asm/kvm_emulate.h> | 
|  | #include <asm/opcodes.h> | 
|  | #include <trace/events/kvm.h> | 
|  |  | 
|  | #include "trace.h" | 
|  |  | 
|  | #define VCPU_NR_MODES		6 | 
|  | #define VCPU_REG_OFFSET_USR	0 | 
|  | #define VCPU_REG_OFFSET_FIQ	1 | 
|  | #define VCPU_REG_OFFSET_IRQ	2 | 
|  | #define VCPU_REG_OFFSET_SVC	3 | 
|  | #define VCPU_REG_OFFSET_ABT	4 | 
|  | #define VCPU_REG_OFFSET_UND	5 | 
|  | #define REG_OFFSET(_reg) \ | 
|  | (offsetof(struct kvm_regs, _reg) / sizeof(u32)) | 
|  |  | 
|  | #define USR_REG_OFFSET(_num) REG_OFFSET(usr_regs.uregs[_num]) | 
|  |  | 
|  | static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = { | 
|  | /* USR/SYS Registers */ | 
|  | [VCPU_REG_OFFSET_USR] = { | 
|  | USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), | 
|  | USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), | 
|  | USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), | 
|  | USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), | 
|  | USR_REG_OFFSET(12), USR_REG_OFFSET(13),	USR_REG_OFFSET(14), | 
|  | }, | 
|  |  | 
|  | /* FIQ Registers */ | 
|  | [VCPU_REG_OFFSET_FIQ] = { | 
|  | USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), | 
|  | USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), | 
|  | USR_REG_OFFSET(6), USR_REG_OFFSET(7), | 
|  | REG_OFFSET(fiq_regs[0]), /* r8 */ | 
|  | REG_OFFSET(fiq_regs[1]), /* r9 */ | 
|  | REG_OFFSET(fiq_regs[2]), /* r10 */ | 
|  | REG_OFFSET(fiq_regs[3]), /* r11 */ | 
|  | REG_OFFSET(fiq_regs[4]), /* r12 */ | 
|  | REG_OFFSET(fiq_regs[5]), /* r13 */ | 
|  | REG_OFFSET(fiq_regs[6]), /* r14 */ | 
|  | }, | 
|  |  | 
|  | /* IRQ Registers */ | 
|  | [VCPU_REG_OFFSET_IRQ] = { | 
|  | USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), | 
|  | USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), | 
|  | USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), | 
|  | USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), | 
|  | USR_REG_OFFSET(12), | 
|  | REG_OFFSET(irq_regs[0]), /* r13 */ | 
|  | REG_OFFSET(irq_regs[1]), /* r14 */ | 
|  | }, | 
|  |  | 
|  | /* SVC Registers */ | 
|  | [VCPU_REG_OFFSET_SVC] = { | 
|  | USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), | 
|  | USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), | 
|  | USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), | 
|  | USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), | 
|  | USR_REG_OFFSET(12), | 
|  | REG_OFFSET(svc_regs[0]), /* r13 */ | 
|  | REG_OFFSET(svc_regs[1]), /* r14 */ | 
|  | }, | 
|  |  | 
|  | /* ABT Registers */ | 
|  | [VCPU_REG_OFFSET_ABT] = { | 
|  | USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), | 
|  | USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), | 
|  | USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), | 
|  | USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), | 
|  | USR_REG_OFFSET(12), | 
|  | REG_OFFSET(abt_regs[0]), /* r13 */ | 
|  | REG_OFFSET(abt_regs[1]), /* r14 */ | 
|  | }, | 
|  |  | 
|  | /* UND Registers */ | 
|  | [VCPU_REG_OFFSET_UND] = { | 
|  | USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), | 
|  | USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), | 
|  | USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), | 
|  | USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), | 
|  | USR_REG_OFFSET(12), | 
|  | REG_OFFSET(und_regs[0]), /* r13 */ | 
|  | REG_OFFSET(und_regs[1]), /* r14 */ | 
|  | }, | 
|  | }; | 
|  |  | 
|  | /* | 
|  | * Return a pointer to the register number valid in the current mode of | 
|  | * the virtual CPU. | 
|  | */ | 
|  | unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num) | 
|  | { | 
|  | unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs; | 
|  | unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK; | 
|  |  | 
|  | switch (mode) { | 
|  | case USR_MODE...SVC_MODE: | 
|  | mode &= ~MODE32_BIT; /* 0 ... 3 */ | 
|  | break; | 
|  |  | 
|  | case ABT_MODE: | 
|  | mode = VCPU_REG_OFFSET_ABT; | 
|  | break; | 
|  |  | 
|  | case UND_MODE: | 
|  | mode = VCPU_REG_OFFSET_UND; | 
|  | break; | 
|  |  | 
|  | case SYSTEM_MODE: | 
|  | mode = VCPU_REG_OFFSET_USR; | 
|  | break; | 
|  |  | 
|  | default: | 
|  | BUG(); | 
|  | } | 
|  |  | 
|  | return reg_array + vcpu_reg_offsets[mode][reg_num]; | 
|  | } | 
|  |  | 
|  | /* | 
|  | * Return the SPSR for the current mode of the virtual CPU. | 
|  | */ | 
|  | unsigned long *__vcpu_spsr(struct kvm_vcpu *vcpu) | 
|  | { | 
|  | unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK; | 
|  | switch (mode) { | 
|  | case SVC_MODE: | 
|  | return &vcpu->arch.ctxt.gp_regs.KVM_ARM_SVC_spsr; | 
|  | case ABT_MODE: | 
|  | return &vcpu->arch.ctxt.gp_regs.KVM_ARM_ABT_spsr; | 
|  | case UND_MODE: | 
|  | return &vcpu->arch.ctxt.gp_regs.KVM_ARM_UND_spsr; | 
|  | case IRQ_MODE: | 
|  | return &vcpu->arch.ctxt.gp_regs.KVM_ARM_IRQ_spsr; | 
|  | case FIQ_MODE: | 
|  | return &vcpu->arch.ctxt.gp_regs.KVM_ARM_FIQ_spsr; | 
|  | default: | 
|  | BUG(); | 
|  | } | 
|  | } | 
|  |  | 
|  | /****************************************************************************** | 
|  | * Inject exceptions into the guest | 
|  | */ | 
|  |  | 
|  | /** | 
|  | * kvm_inject_vabt - inject an async abort / SError into the guest | 
|  | * @vcpu: The VCPU to receive the exception | 
|  | * | 
|  | * It is assumed that this code is called from the VCPU thread and that the | 
|  | * VCPU therefore is not currently executing guest code. | 
|  | */ | 
|  | void kvm_inject_vabt(struct kvm_vcpu *vcpu) | 
|  | { | 
|  | *vcpu_hcr(vcpu) |= HCR_VA; | 
|  | } |