1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2026, Red Hat, Inc.
*
* Test that vmx_leave_smm() validates vmcs12 controls before re-entering
* nested guest mode on RSM.
*/
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include "test_util.h"
#include "kvm_util.h"
#include "smm.h"
#include "hyperv.h"
#include "vmx.h"
#define SMRAM_GPA 0x1000000
#define SMRAM_STAGE 0xfe
#define SYNC_PORT 0xe
#define STR(x) #x
#define XSTR(s) STR(s)
/*
* SMI handler: runs in real-address mode.
* Reports SMRAM_STAGE via port IO, then does RSM.
*/
static uint8_t smi_handler[] = {
0xb0, SMRAM_STAGE, /* mov $SMRAM_STAGE, %al */
0xe4, SYNC_PORT, /* in $SYNC_PORT, %al */
0x0f, 0xaa, /* rsm */
};
static inline void sync_with_host(uint64_t phase)
{
asm volatile("in $" XSTR(SYNC_PORT) ", %%al \n"
: "+a" (phase));
}
static void l2_guest_code(void)
{
sync_with_host(1);
/* After SMI+RSM with invalid controls, we should not reach here. */
vmcall();
}
static void guest_code(struct vmx_pages *vmx_pages,
struct hyperv_test_pages *hv_pages)
{
#define L2_GUEST_STACK_SIZE 64
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
/* Set up Hyper-V enlightenments and eVMCS */
wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
enable_vp_assist(hv_pages->vp_assist_gpa, hv_pages->vp_assist);
evmcs_enable();
GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
GUEST_ASSERT(load_evmcs(hv_pages));
prepare_vmcs(vmx_pages, l2_guest_code,
&l2_guest_stack[L2_GUEST_STACK_SIZE]);
GUEST_ASSERT(!vmlaunch());
/* L2 exits via vmcall if test fails */
sync_with_host(2);
}
int main(int argc, char *argv[])
{
vm_vaddr_t vmx_pages_gva = 0, hv_pages_gva = 0;
struct hyperv_test_pages *hv;
struct hv_enlightened_vmcs *evmcs;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct kvm_regs regs;
int stage_reported;
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
TEST_REQUIRE(kvm_has_cap(KVM_CAP_NESTED_STATE));
TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS));
TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_SMM));
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
setup_smram(vm, vcpu, SMRAM_GPA, smi_handler, sizeof(smi_handler));
vcpu_set_hv_cpuid(vcpu);
vcpu_enable_evmcs(vcpu);
vcpu_alloc_vmx(vm, &vmx_pages_gva);
hv = vcpu_alloc_hyperv_test_pages(vm, &hv_pages_gva);
vcpu_args_set(vcpu, 2, vmx_pages_gva, hv_pages_gva);
vcpu_run(vcpu);
/* L2 is running and syncs with host. */
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
vcpu_regs_get(vcpu, ®s);
stage_reported = regs.rax & 0xff;
TEST_ASSERT(stage_reported == 1,
"Expected stage 1, got %d", stage_reported);
/* Inject SMI while L2 is running. */
inject_smi(vcpu);
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
vcpu_regs_get(vcpu, ®s);
stage_reported = regs.rax & 0xff;
TEST_ASSERT(stage_reported == SMRAM_STAGE,
"Expected SMM handler stage %#x, got %#x",
SMRAM_STAGE, stage_reported);
/*
* Guest is now paused in the SMI handler, about to execute RSM.
* Hack the eVMCS page to set-up invalid pin-based execution
* control (PIN_BASED_VIRTUAL_NMIS without PIN_BASED_NMI_EXITING).
*/
evmcs = hv->enlightened_vmcs_hva;
evmcs->pin_based_vm_exec_control |= PIN_BASED_VIRTUAL_NMIS;
evmcs->hv_clean_fields = 0;
/*
* Trigger copy_enlightened_to_vmcs12() via KVM_GET_NESTED_STATE,
* copying the invalid pin_based_vm_exec_control into cached_vmcs12.
*/
union {
struct kvm_nested_state state;
char state_[16384];
} nested_state_buf;
memset(&nested_state_buf, 0, sizeof(nested_state_buf));
nested_state_buf.state.size = sizeof(nested_state_buf);
vcpu_nested_state_get(vcpu, &nested_state_buf.state);
/*
* Resume the guest. The SMI handler executes RSM, which calls
* vmx_leave_smm(). nested_vmx_check_controls() should detect
* VIRTUAL_NMIS without NMI_EXITING and cause a triple fault.
*/
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_SHUTDOWN);
kvm_vm_free(vm);
return 0;
}
|