Download raw body.
SEV-ES: vmm(4): vmentry/vmexit for SEV-ES guests
On Tue, May 20, 2025 at 11:28:16AM +0200, Hans-Jörg Höxer wrote:
> Hi,
>
> this diff prepares the entry/exit path for SEV-ES enabled guests:
>
> With SEV-ES the full vCPU state is automatically loaded from or
> saved to the encrypted VMSA. However, host state is not fully saved
> and restored. Therefore, we need a seperate entry/exit path for
> SEV-ES enabled guests.
>
> svm_seves_enter_guest() accomplishes this. Maybe we can streamline
> svm_enter_guest() and svm_seves_enter_guest() to share code.
>
> Take care,
> HJ.
was this the same diff I ok'ed yesterday? It looks fine regardless.
ok mlarkin
> ---------------------------------------------------------------------------
> commit 1d876e9aa856bac3d580470133c6936c094164d6
> Author: Hans-Joerg Hoexer <hshoexer@genua.de>
> Date: Mon Jul 29 15:25:31 2024 +0200
>
> vmm(4): vmentry/vmexit for SEV-ES guests
>
> With SEV-ES the full vCPU state is automatically loaded from or
> saved to the encrypted VMSA. However, host state is not fully saved
> and restored. Therefore, we need a seperate entry/exit path for
> SEV-ES enabled guests.
>
> svm_seves_enter_guest() accomplishes this. Maybe we can streamline
> svm_enter_guest() and svm_seves_enter_guest() to share code.
>
> diff --git a/sys/arch/amd64/amd64/vmm_machdep.c b/sys/arch/amd64/amd64/vmm_machdep.c
> index 7351221b3a5..9470222c351 100644
> --- a/sys/arch/amd64/amd64/vmm_machdep.c
> +++ b/sys/arch/amd64/amd64/vmm_machdep.c
> @@ -6399,8 +6399,13 @@ vcpu_run_svm(struct vcpu *vcpu, struct vm_run_params *vrp)
> KASSERT(vmcb->v_intercept1 & SVM_INTERCEPT_INTR);
> wrmsr(MSR_AMD_VM_HSAVE_PA, vcpu->vc_svm_hsa_pa);
>
> - ret = svm_enter_guest(vcpu->vc_control_pa,
> - &vcpu->vc_gueststate, &gdt);
> + if (vcpu->vc_seves) {
> + ret = svm_seves_enter_guest(vcpu->vc_control_pa,
> + vcpu->vc_svm_hsa_va + SVM_HSA_OFFSET, &gdt);
> + } else {
> + ret = svm_enter_guest(vcpu->vc_control_pa,
> + &vcpu->vc_gueststate, &gdt);
> + }
>
> /* Restore host PKRU state. */
> if (vmm_softc->sc_md.pkru_enabled) {
> diff --git a/sys/arch/amd64/amd64/vmm_support.S b/sys/arch/amd64/amd64/vmm_support.S
> index 30c1b75834f..f9f663cff62 100644
> --- a/sys/arch/amd64/amd64/vmm_support.S
> +++ b/sys/arch/amd64/amd64/vmm_support.S
> @@ -42,6 +42,7 @@
> .global vmx_enter_guest
> .global vmm_dispatch_intr
> .global svm_enter_guest
> + .global svm_seves_enter_guest
>
> .text
> .code64
> @@ -662,3 +663,163 @@ restore_host_svm:
> ret
> lfence
> END(svm_enter_guest)
> +
> +/*
> + * When using SEV-ES we have to save some of the host registers to
> + * the host state save area (HSA). According to the AMD Programmer's
> + * Manual Volume 2 Appendix B the HSA has the same layout as the guest
> + * save area (VMSA) except that it starts at offset 0x400 in the HSA
> + * page.
> + */
> +ENTRY(svm_seves_enter_guest)
> + RETGUARD_SETUP(svm_seves_enter_guest, r11)
> + clgi
> + movq %rdi, %r8
> + pushfq
> +
> + pushq %rdx /* gdt pointer */
> +
> + /*
> + * Save (possibly) lazy-switched selectors
> + */
> + strw %ax
> + pushw %ax
> + movw %es, %ax
> + pushw %ax
> + movw %ds, %ax
> + pushw %ax
> + movw %ss, %ax
> + pushw %ax
> +
> + movq $MSR_FSBASE, %rcx
> + rdmsr
> + pushq %rax
> + pushq %rdx
> + pushw %fs
> + movq $MSR_GSBASE, %rcx
> + rdmsr
> + pushq %rax
> + pushq %rdx
> + pushw %gs
> + movq $MSR_KERNELGSBASE, %rcx
> + rdmsr
> + pushq %rax
> + pushq %rdx
> +
> + /*
> + * Save various MSRs
> + */
> + movq $MSR_STAR, %rcx
> + rdmsr
> + pushq %rax
> + pushq %rdx
> +
> + movq $MSR_LSTAR, %rcx
> + rdmsr
> + pushq %rax
> + pushq %rdx
> +
> + movq $MSR_SFMASK, %rcx
> + rdmsr
> + pushq %rax
> + pushq %rdx
> +
> + RETGUARD_PUSH(r11)
> +
> + /*
> + * Preserve callee-preserved registers as per AMD64 ABI in
> + * HSA. Although all registers will be restored from HSA
> + * on vmexit, these will not be saved on vmrun.
> + */
> + movq %r15, 0x378(%rsi)
> + movq %r14, 0x370(%rsi)
> + movq %r13, 0x368(%rsi)
> + movq %r12, 0x360(%rsi)
> + movq %rbp, 0x328(%rsi)
> + movq %rbx, 0x318(%rsi)
> +
> + movq %r8, %rax /* rax = vmcb pa */
> +
> + vmrun %rax
> +
> + /* %rdi = 0 means we took an exit */
> + xorq %rdi, %rdi
> +
> + RETGUARD_POP(r11)
> +
> + /*
> + * Restore saved MSRs
> + */
> + popq %rdx
> + popq %rax
> + movq $MSR_SFMASK, %rcx
> + wrmsr
> +
> + /* make sure guest doesn't bleed into host */
> + xorl %edx, %edx
> + xorl %eax, %eax
> + movq $MSR_CSTAR, %rcx
> + wrmsr
> +
> + popq %rdx
> + popq %rax
> + movq $MSR_LSTAR, %rcx
> + wrmsr
> +
> + popq %rdx
> + popq %rax
> + movq $MSR_STAR, %rcx
> + wrmsr
> +
> + /*
> + * popw %gs will reset gsbase to 0, so preserve it
> + * first. This is to accommodate possibly lazy-switched
> + * selectors from above
> + */
> + cli
> + popq %rdx
> + popq %rax
> + movq $MSR_KERNELGSBASE, %rcx
> + wrmsr
> +
> + popw %gs
> + popq %rdx
> + popq %rax
> + movq $MSR_GSBASE, %rcx
> + wrmsr
> +
> + popw %fs
> + popq %rdx
> + popq %rax
> + movq $MSR_FSBASE, %rcx
> + wrmsr
> +
> + popw %ax
> + movw %ax, %ss
> + popw %ax
> + movw %ax, %ds
> + popw %ax
> + movw %ax, %es
> +
> + xorq %rax, %rax
> + lldtw %ax /* Host LDT is always 0 */
> +
> + popw %ax /* ax = saved TR */
> +
> + popq %rdx
> + addq $0x2, %rdx
> + movq (%rdx), %rdx
> +
> + /* rdx = GDTR base addr */
> + andb $0xF9, 5(%rdx, %rax)
> +
> + ltrw %ax
> +
> + popfq
> +
> + movq %rdi, %rax
> +
> + RETGUARD_CHECK(svm_seves_enter_guest, r11)
> + ret
> + lfence
> +END(svm_seves_enter_guest)
> diff --git a/sys/arch/amd64/include/vmmvar.h b/sys/arch/amd64/include/vmmvar.h
> index 8714fd1aace..03d86639476 100644
> --- a/sys/arch/amd64/include/vmmvar.h
> +++ b/sys/arch/amd64/include/vmmvar.h
> @@ -852,6 +852,13 @@ struct vmsa {
> uint64_t v_ic_ibs_xtd_ct;/* 7C0h */
> };
>
> +/*
> + * With SEV-ES the host save are (HSA) has the same layout as the
> + * VMSA. However, it has the offset 0x400 into the HSA page.
> + * See AMD APM Vol 2, Appendix B.
> + */
> +#define SVM_HSA_OFFSET 0x400
> +
> struct vmcs {
> uint32_t vmcs_revision;
> };
> @@ -1028,6 +1035,7 @@ int invept(uint64_t, struct vmx_invept_descriptor *);
> int vmx_enter_guest(paddr_t *, struct vcpu_gueststate *, int, uint8_t);
> int svm_enter_guest(uint64_t, struct vcpu_gueststate *,
> struct region_descriptor *);
> +int svm_seves_enter_guest(uint64_t, vaddr_t, struct region_descriptor *);
> void start_vmm_on_cpu(struct cpu_info *);
> void stop_vmm_on_cpu(struct cpu_info *);
> void vmclear_on_cpu(struct cpu_info *);
SEV-ES: vmm(4): vmentry/vmexit for SEV-ES guests