5e8735d
From: Jan Beulich <jbeulich@suse.com>
5e8735d
Subject: x86: force EFLAGS.IF on when exiting to PV guests
5e8735d
5e8735d
Guest kernels modifying instructions in the process of being emulated
5e8735d
for another of their vCPU-s may effect EFLAGS.IF to be cleared upon
5e8735d
next exiting to guest context, by converting the being emulated
5e8735d
instruction to CLI (at the right point in time). Prevent any such bad
5e8735d
effects by always forcing EFLAGS.IF on. And to cover hypothetical other
5e8735d
similar issues, also force EFLAGS.{IOPL,NT,VM} to zero.
5e8735d
5e8735d
This is XSA-202.
5e8735d
5e8735d
Signed-off-by: Jan Beulich <jbeulich@suse.com>
5e8735d
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
5e8735d
---
5e8735d
5e8735d
--- a/xen/arch/x86/x86_64/compat/entry.S
5e8735d
+++ b/xen/arch/x86/x86_64/compat/entry.S
5e8735d
@@ -109,6 +109,8 @@ compat_process_trap:
5e8735d
 /* %rbx: struct vcpu, interrupts disabled */
5e8735d
 ENTRY(compat_restore_all_guest)
5e8735d
         ASSERT_INTERRUPTS_DISABLED
5e8735d
+        mov   $~(X86_EFLAGS_IOPL|X86_EFLAGS_NT|X86_EFLAGS_VM),%r11d
5e8735d
+        and   UREGS_eflags(%rsp),%r11d
5e8735d
 .Lcr4_orig:
5e8735d
         .skip .Lcr4_alt_end - .Lcr4_alt, 0x90
5e8735d
 .Lcr4_orig_end:
5e8735d
@@ -144,6 +146,8 @@ ENTRY(compat_restore_all_guest)
5e8735d
                              (.Lcr4_orig_end - .Lcr4_orig), \
5e8735d
                              (.Lcr4_alt_end - .Lcr4_alt)
5e8735d
         .popsection
5e8735d
+        or    $X86_EFLAGS_IF,%r11
5e8735d
+        mov   %r11d,UREGS_eflags(%rsp)
5e8735d
         RESTORE_ALL adj=8 compat=1
5e8735d
 .Lft0:  iretq
5e8735d
         _ASM_PRE_EXTABLE(.Lft0, handle_exception)
5e8735d
--- a/xen/arch/x86/x86_64/entry.S
5e8735d
+++ b/xen/arch/x86/x86_64/entry.S
5e8735d
@@ -40,28 +40,29 @@ restore_all_guest:
5e8735d
         testw $TRAP_syscall,4(%rsp)
5e8735d
         jz    iret_exit_to_guest
5e8735d
 
5e8735d
+        movq  24(%rsp),%r11           # RFLAGS
5e8735d
+        andq  $~(X86_EFLAGS_IOPL|X86_EFLAGS_NT|X86_EFLAGS_VM),%r11
5e8735d
+        orq   $X86_EFLAGS_IF,%r11
5e8735d
+
5e8735d
         /* Don't use SYSRET path if the return address is not canonical. */
5e8735d
         movq  8(%rsp),%rcx
5e8735d
         sarq  $47,%rcx
5e8735d
         incl  %ecx
5e8735d
         cmpl  $1,%ecx
5e8735d
-        ja    .Lforce_iret
5e8735d
+        movq  8(%rsp),%rcx            # RIP
5e8735d
+        ja    iret_exit_to_guest
5e8735d
 
5e8735d
         cmpw  $FLAT_USER_CS32,16(%rsp)# CS
5e8735d
-        movq  8(%rsp),%rcx            # RIP
5e8735d
-        movq  24(%rsp),%r11           # RFLAGS
5e8735d
         movq  32(%rsp),%rsp           # RSP
5e8735d
         je    1f
5e8735d
         sysretq
5e8735d
 1:      sysretl
5e8735d
 
5e8735d
-.Lforce_iret:
5e8735d
-        /* Mimic SYSRET behavior. */
5e8735d
-        movq  8(%rsp),%rcx            # RIP
5e8735d
-        movq  24(%rsp),%r11           # RFLAGS
5e8735d
         ALIGN
5e8735d
 /* No special register assumptions. */
5e8735d
 iret_exit_to_guest:
5e8735d
+        andl  $~(X86_EFLAGS_IOPL|X86_EFLAGS_NT|X86_EFLAGS_VM),24(%rsp)
5e8735d
+        orl   $X86_EFLAGS_IF,24(%rsp)
5e8735d
         addq  $8,%rsp
5e8735d
 .Lft0:  iretq
5e8735d
         _ASM_PRE_EXTABLE(.Lft0, handle_exception)