Sean Christopherson | 35a7831 | 2020-12-30 16:27:00 -0800 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef __KVM_X86_SVM_OPS_H |
| 3 | #define __KVM_X86_SVM_OPS_H |
| 4 | |
| 5 | #include <linux/compiler_types.h> |
| 6 | |
Uros Bizjak | 6529734 | 2021-08-09 10:39:55 -0700 | [diff] [blame] | 7 | #include "x86.h" |
Sean Christopherson | 35a7831 | 2020-12-30 16:27:00 -0800 | [diff] [blame] | 8 | |
| 9 | #define svm_asm(insn, clobber...) \ |
| 10 | do { \ |
Linus Torvalds | 4356e9f | 2024-02-09 12:39:31 -0800 | [diff] [blame] | 11 | asm goto("1: " __stringify(insn) "\n\t" \ |
Sean Christopherson | 35a7831 | 2020-12-30 16:27:00 -0800 | [diff] [blame] | 12 | _ASM_EXTABLE(1b, %l[fault]) \ |
| 13 | ::: clobber : fault); \ |
| 14 | return; \ |
| 15 | fault: \ |
| 16 | kvm_spurious_fault(); \ |
| 17 | } while (0) |
| 18 | |
| 19 | #define svm_asm1(insn, op1, clobber...) \ |
| 20 | do { \ |
Linus Torvalds | 4356e9f | 2024-02-09 12:39:31 -0800 | [diff] [blame] | 21 | asm goto("1: " __stringify(insn) " %0\n\t" \ |
Sean Christopherson | 35a7831 | 2020-12-30 16:27:00 -0800 | [diff] [blame] | 22 | _ASM_EXTABLE(1b, %l[fault]) \ |
| 23 | :: op1 : clobber : fault); \ |
| 24 | return; \ |
| 25 | fault: \ |
| 26 | kvm_spurious_fault(); \ |
| 27 | } while (0) |
| 28 | |
| 29 | #define svm_asm2(insn, op1, op2, clobber...) \ |
| 30 | do { \ |
Linus Torvalds | 4356e9f | 2024-02-09 12:39:31 -0800 | [diff] [blame] | 31 | asm goto("1: " __stringify(insn) " %1, %0\n\t" \ |
Sean Christopherson | 35a7831 | 2020-12-30 16:27:00 -0800 | [diff] [blame] | 32 | _ASM_EXTABLE(1b, %l[fault]) \ |
| 33 | :: op1, op2 : clobber : fault); \ |
| 34 | return; \ |
| 35 | fault: \ |
| 36 | kvm_spurious_fault(); \ |
| 37 | } while (0) |
| 38 | |
| 39 | static inline void clgi(void) |
| 40 | { |
| 41 | svm_asm(clgi); |
| 42 | } |
| 43 | |
| 44 | static inline void stgi(void) |
| 45 | { |
| 46 | svm_asm(stgi); |
| 47 | } |
| 48 | |
| 49 | static inline void invlpga(unsigned long addr, u32 asid) |
| 50 | { |
| 51 | svm_asm2(invlpga, "c"(asid), "a"(addr)); |
| 52 | } |
| 53 | |
| 54 | /* |
| 55 | * Despite being a physical address, the portion of rAX that is consumed by |
| 56 | * VMSAVE, VMLOAD, etc... is still controlled by the effective address size, |
| 57 | * hence 'unsigned long' instead of 'hpa_t'. |
| 58 | */ |
Peter Zijlstra | a168233 | 2021-06-24 11:41:04 +0200 | [diff] [blame] | 59 | static __always_inline void vmsave(unsigned long pa) |
Sean Christopherson | 35a7831 | 2020-12-30 16:27:00 -0800 | [diff] [blame] | 60 | { |
| 61 | svm_asm1(vmsave, "a" (pa), "memory"); |
| 62 | } |
| 63 | |
Sean Christopherson | 35a7831 | 2020-12-30 16:27:00 -0800 | [diff] [blame] | 64 | #endif /* __KVM_X86_SVM_OPS_H */ |