diff --git a/.gitignore b/.gitignore index 139f059..a3ba858 100644 --- a/.gitignore +++ b/.gitignore @@ -6,4 +6,4 @@ lwip-1.3.0.tar.gz pciutils-2.2.9.tar.bz2 zlib-1.2.3.tar.gz polarssl-1.1.4-gpl.tgz -/xen-4.17.0.tar.gz +/xen-4.17.1.tar.gz diff --git a/sources b/sources index 435fdc0..c81c5e2 100644 --- a/sources +++ b/sources @@ -4,4 +4,4 @@ SHA512 (newlib-1.16.0.tar.gz) = 40eb96bbc6736a16b6399e0cdb73e853d0d90b685c967e77 SHA512 (zlib-1.2.3.tar.gz) = 021b958fcd0d346c4ba761bcf0cc40f3522de6186cf5a0a6ea34a70504ce9622b1c2626fce40675bc8282cf5f5ade18473656abc38050f72f5d6480507a2106e SHA512 (polarssl-1.1.4-gpl.tgz) = 88da614e4d3f4409c4fd3bb3e44c7587ba051e3fed4e33d526069a67e8180212e1ea22da984656f50e290049f60ddca65383e5983c0f8884f648d71f698303ad SHA512 (pciutils-2.2.9.tar.bz2) = 2b3d98d027e46d8c08037366dde6f0781ca03c610ef2b380984639e4ef39899ed8d8b8e4cd9c9dc54df101279b95879bd66bfd4d04ad07fef41e847ea7ae32b5 -SHA512 (xen-4.17.0.tar.gz) = 8aa7c3025c81127a4f653411bc1cbe8fc27b767a2f96a2a454329a6773cb74d714ecc1ff011a8e7169c6b83b2cc5cb39c39798bd6b1178df75ce9ff00f5a1011 +SHA512 (xen-4.17.1.tar.gz) = bd98c5a2d75a0368ef312274b8e47f27db7f6b79302fd0c8b4975185e63640f98d9302e2834344acc1e97133e19f3993359dfab85aea5928008debada78c5f7a diff --git a/xen.spec b/xen.spec index 9467ab7..0c8c5de 100644 --- a/xen.spec +++ b/xen.spec @@ -54,8 +54,8 @@ Summary: Xen is a virtual machine monitor Name: xen -Version: 4.17.0 -Release: 9%{?dist} +Version: 4.17.1 +Release: 1%{?dist} License: GPLv2+ and LGPLv2+ and BSD URL: http://xen.org/ Source0: https://downloads.xenproject.org/release/xen/%{version}/xen-%{version}.tar.gz @@ -110,13 +110,6 @@ Patch43: xen.gcc11.fixes.patch Patch45: xen.gcc12.fixes.patch Patch46: xen.efi.build.patch Patch47: xen.gcc13.fixes.patch -Patch48: xsa425.patch -Patch49: xsa426.patch -Patch50: xsa427.patch -Patch51: xsa428-4.17-1.patch -Patch52: xsa428-4.17-2.patch -Patch53: xsa429.patch -Patch54: xsa430.patch %if %build_qemutrad @@ -298,56 +291,49 @@ manage Xen virtual machines. %prep %setup -q -%patch4 -p1 -%patch5 -p1 -%patch6 -p1 -%patch7 -p1 -%patch8 -p1 -%patch9 -p1 -%patch10 -p1 -%patch11 -p1 -%patch12 -p1 -%patch13 -p1 -%patch14 -p1 -%patch15 -p1 -%patch16 -p1 -%patch17 -p1 -%patch18 -p1 -%patch19 -p1 -%patch20 -p1 -%patch21 -p1 -%patch22 -p1 -%patch23 -p1 -%patch24 -p1 -%patch25 -p1 -%patch33 -p1 -%patch34 -p1 -%patch37 -p1 -%patch40 -p1 -%patch41 -p1 -%patch43 -p1 -%patch45 -p1 -%patch46 -p1 -%patch47 -p1 -%patch48 -p1 -%patch49 -p1 -%patch50 -p1 -%patch51 -p1 -%patch52 -p1 -%patch53 -p1 -%patch54 -p1 +%patch 4 -p1 +%patch 5 -p1 +%patch 6 -p1 +%patch 7 -p1 +%patch 8 -p1 +%patch 9 -p1 +%patch 10 -p1 +%patch 11 -p1 +%patch 12 -p1 +%patch 13 -p1 +%patch 14 -p1 +%patch 15 -p1 +%patch 16 -p1 +%patch 17 -p1 +%patch 18 -p1 +%patch 19 -p1 +%patch 20 -p1 +%patch 21 -p1 +%patch 22 -p1 +%patch 23 -p1 +%patch 24 -p1 +%patch 25 -p1 +%patch 33 -p1 +%patch 34 -p1 +%patch 37 -p1 +%patch 40 -p1 +%patch 41 -p1 +%patch 43 -p1 +%patch 45 -p1 +%patch 46 -p1 +%patch 47 -p1 # qemu-xen-traditional patches pushd tools/qemu-xen-traditional -%patch27 -p1 -%patch29 -p1 -%patch30 -p1 -%patch31 -p1 -%patch32 -p1 -%patch35 -p1 -%patch36 -p1 -%patch38 -p1 -%patch39 -p1 +%patch 27 -p1 +%patch 29 -p1 +%patch 30 -p1 +%patch 31 -p1 +%patch 32 -p1 +%patch 35 -p1 +%patch 36 -p1 +%patch 38 -p1 +%patch 39 -p1 popd # qemu-xen patches @@ -943,6 +929,11 @@ fi %endif %changelog +* Tue May 02 2023 Michael Young - 4.17.1-1 +- update to xen-4.17.1 + remove patches now included upstream + switch from patchN to patch N format for applying patches + * Tue Apr 25 2023 Michael Young - 4.17.0-9 - x86 shadow paging arbitrary pointer dereference [XSA-430, CVE-2022-42335] diff --git a/xsa425.patch b/xsa425.patch deleted file mode 100644 index b367320..0000000 --- a/xsa425.patch +++ /dev/null @@ -1,132 +0,0 @@ -From: Jason Andryuk -Subject: Revert "tools/xenstore: simplify loop handling connection I/O" - -I'm observing guest kexec trigger xenstored to abort on a double free. - -gdb output: -Program received signal SIGABRT, Aborted. -__pthread_kill_implementation (no_tid=0, signo=6, threadid=140645614258112) at ./nptl/pthread_kill.c:44 -44 ./nptl/pthread_kill.c: No such file or directory. -(gdb) bt - at ./nptl/pthread_kill.c:44 - at ./nptl/pthread_kill.c:78 - at ./nptl/pthread_kill.c:89 - at ../sysdeps/posix/raise.c:26 - at talloc.c:119 - ptr=ptr@entry=0x559fae724290) at talloc.c:232 - at xenstored_core.c:2945 -(gdb) frame 5 - at talloc.c:119 -119 TALLOC_ABORT("Bad talloc magic value - double free"); -(gdb) frame 7 - at xenstored_core.c:2945 -2945 talloc_increase_ref_count(conn); -(gdb) p conn -$1 = (struct connection *) 0x559fae724290 - -Looking at a xenstore trace, we have: -IN 0x559fae71f250 20230120 17:40:53 READ (/local/domain/3/image/device-model-dom -id ) -wrl: dom 0 1 msec 10000 credit 1000000 reserve 100 disc -ard -wrl: dom 3 1 msec 10000 credit 1000000 reserve 100 disc -ard -wrl: dom 0 0 msec 10000 credit 1000000 reserve 0 disc -ard -wrl: dom 3 0 msec 10000 credit 1000000 reserve 0 disc -ard -OUT 0x559fae71f250 20230120 17:40:53 ERROR (ENOENT ) -wrl: dom 0 1 msec 10000 credit 1000000 reserve 100 disc -ard -wrl: dom 3 1 msec 10000 credit 1000000 reserve 100 disc -ard -IN 0x559fae71f250 20230120 17:40:53 RELEASE (3 ) -DESTROY watch 0x559fae73f630 -DESTROY watch 0x559fae75ddf0 -DESTROY watch 0x559fae75ec30 -DESTROY watch 0x559fae75ea60 -DESTROY watch 0x559fae732c00 -DESTROY watch 0x559fae72cea0 -DESTROY watch 0x559fae728fc0 -DESTROY watch 0x559fae729570 -DESTROY connection 0x559fae724290 -orphaned node /local/domain/3/device/suspend/event-channel deleted -orphaned node /local/domain/3/device/vbd/51712 deleted -orphaned node /local/domain/3/device/vkbd/0 deleted -orphaned node /local/domain/3/device/vif/0 deleted -orphaned node /local/domain/3/control/shutdown deleted -orphaned node /local/domain/3/control/feature-poweroff deleted -orphaned node /local/domain/3/control/feature-reboot deleted -orphaned node /local/domain/3/control/feature-suspend deleted -orphaned node /local/domain/3/control/feature-s3 deleted -orphaned node /local/domain/3/control/feature-s4 deleted -orphaned node /local/domain/3/control/sysrq deleted -orphaned node /local/domain/3/data deleted -orphaned node /local/domain/3/drivers deleted -orphaned node /local/domain/3/feature deleted -orphaned node /local/domain/3/attr deleted -orphaned node /local/domain/3/error deleted -orphaned node /local/domain/3/console/backend-id deleted - -and no further output. - -The trace shows that DESTROY was called for connection 0x559fae724290, -but that is the same pointer (conn) main() was looping through from -connections. So it wasn't actually removed from the connections list? - -Reverting commit e8e6e42279a5 "tools/xenstore: simplify loop handling -connection I/O" fixes the abort/double free. I think the use of -list_for_each_entry_safe is incorrect. list_for_each_entry_safe makes -traversal safe for deleting the current iterator, but RELEASE/do_release -will delete some other entry in the connections list. I think the -observed abort is because list_for_each_entry has next pointing to the -deleted connection, and it is used in the subsequent iteration. - -Add a comment explaining the unsuitability of list_for_each_entry_safe. -Also notice that the old code takes a reference on next which would -prevents a use-after-free. - -This reverts commit e8e6e42279a5723239c5c40ba4c7f579a979465d. - -This is XSA-425/CVE-2022-42330. - -Fixes: e8e6e42279a5 ("tools/xenstore: simplify loop handling connection I/O") -Signed-off-by: Jason Andryuk -Reviewed-by: Juergen Gross -Reviewed-by: Julien Grall ---- - tools/xenstore/xenstored_core.c | 19 +++++++++++++++++-- - 1 file changed, 17 insertions(+), 2 deletions(-) - -diff --git a/tools/xenstore/xenstored_core.c b/tools/xenstore/xenstored_core.c -index 78a3edaa4e..029e3852fc 100644 ---- a/tools/xenstore/xenstored_core.c -+++ b/tools/xenstore/xenstored_core.c -@@ -2941,8 +2941,23 @@ int main(int argc, char *argv[]) - } - } - -- list_for_each_entry_safe(conn, next, &connections, list) { -- talloc_increase_ref_count(conn); -+ /* -+ * list_for_each_entry_safe is not suitable here because -+ * handle_input may delete entries besides the current one, but -+ * those may be in the temporary next which would trigger a -+ * use-after-free. list_for_each_entry_safe is only safe for -+ * deleting the current entry. -+ */ -+ next = list_entry(connections.next, typeof(*conn), list); -+ if (&next->list != &connections) -+ talloc_increase_ref_count(next); -+ while (&next->list != &connections) { -+ conn = next; -+ -+ next = list_entry(conn->list.next, -+ typeof(*conn), list); -+ if (&next->list != &connections) -+ talloc_increase_ref_count(next); - - if (conn_can_read(conn)) - handle_input(conn); --- -2.34.1 diff --git a/xsa426.patch b/xsa426.patch deleted file mode 100644 index 3f5f952..0000000 --- a/xsa426.patch +++ /dev/null @@ -1,107 +0,0 @@ -From: Andrew Cooper -Subject: x86/spec-ctrl: Mitigate Cross-Thread Return Address Predictions - -This is XSA-426 / CVE-2022-27672 - -Signed-off-by: Andrew Cooper -Reviewed-by: Jan Beulich - -diff --git a/docs/misc/xen-command-line.pandoc b/docs/misc/xen-command-line.pandoc -index 923910f553c5..a2ff38cdebf2 100644 ---- a/docs/misc/xen-command-line.pandoc -+++ b/docs/misc/xen-command-line.pandoc -@@ -2355,7 +2355,7 @@ guests to use. - on entry and exit. These blocks are necessary to virtualise support for - guests and if disabled, guests will be unable to use IBRS/STIBP/SSBD/etc. - * `rsb=` offers control over whether to overwrite the Return Stack Buffer / -- Return Address Stack on entry to Xen. -+ Return Address Stack on entry to Xen and on idle. - * `md-clear=` offers control over whether to use VERW to flush - microarchitectural buffers on idle and exit from Xen. *Note: For - compatibility with development versions of this fix, `mds=` is also accepted -diff --git a/xen/arch/x86/include/asm/cpufeatures.h b/xen/arch/x86/include/asm/cpufeatures.h -index 865f1109866d..da0593de8542 100644 ---- a/xen/arch/x86/include/asm/cpufeatures.h -+++ b/xen/arch/x86/include/asm/cpufeatures.h -@@ -35,7 +35,8 @@ XEN_CPUFEATURE(SC_RSB_HVM, X86_SYNTH(19)) /* RSB overwrite needed for HVM - XEN_CPUFEATURE(XEN_SELFSNOOP, X86_SYNTH(20)) /* SELFSNOOP gets used by Xen itself */ - XEN_CPUFEATURE(SC_MSR_IDLE, X86_SYNTH(21)) /* Clear MSR_SPEC_CTRL on idle */ - XEN_CPUFEATURE(XEN_LBR, X86_SYNTH(22)) /* Xen uses MSR_DEBUGCTL.LBR */ --/* Bits 23,24 unused. */ -+/* Bits 23 unused. */ -+XEN_CPUFEATURE(SC_RSB_IDLE, X86_SYNTH(24)) /* RSB overwrite needed for idle. */ - XEN_CPUFEATURE(SC_VERW_IDLE, X86_SYNTH(25)) /* VERW used by Xen for idle */ - XEN_CPUFEATURE(XEN_SHSTK, X86_SYNTH(26)) /* Xen uses CET Shadow Stacks */ - XEN_CPUFEATURE(XEN_IBT, X86_SYNTH(27)) /* Xen uses CET Indirect Branch Tracking */ -diff --git a/xen/arch/x86/include/asm/spec_ctrl.h b/xen/arch/x86/include/asm/spec_ctrl.h -index 6a77c3937844..391973ef6a28 100644 ---- a/xen/arch/x86/include/asm/spec_ctrl.h -+++ b/xen/arch/x86/include/asm/spec_ctrl.h -@@ -159,6 +159,21 @@ static always_inline void spec_ctrl_enter_idle(struct cpu_info *info) - */ - alternative_input("", "verw %[sel]", X86_FEATURE_SC_VERW_IDLE, - [sel] "m" (info->verw_sel)); -+ -+ /* -+ * Cross-Thread Return Address Predictions: -+ * -+ * On vulnerable systems, the return predictions (RSB/RAS) are statically -+ * partitioned between active threads. When entering idle, our entries -+ * are re-partitioned to allow the other threads to use them. -+ * -+ * In some cases, we might still have guest entries in the RAS, so flush -+ * them before injecting them sideways to our sibling thread. -+ * -+ * (ab)use alternative_input() to specify clobbers. -+ */ -+ alternative_input("", "DO_OVERWRITE_RSB", X86_FEATURE_SC_RSB_IDLE, -+ : "rax", "rcx"); - } - - /* WARNING! `ret`, `call *`, `jmp *` not safe before this call. */ -diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c -index a320b81947c8..e80e2a5ed1a9 100644 ---- a/xen/arch/x86/spec_ctrl.c -+++ b/xen/arch/x86/spec_ctrl.c -@@ -1327,13 +1327,38 @@ void __init init_speculation_mitigations(void) - * 3) Some CPUs have RSBs which are not full width, which allow the - * attacker's entries to alias Xen addresses. - * -+ * 4) Some CPUs have RSBs which are re-partitioned based on thread -+ * idleness, which allows an attacker to inject entries into the other -+ * thread. We still active the optimisation in this case, and mitigate -+ * in the idle path which has lower overhead. -+ * - * It is safe to turn off RSB stuffing when Xen is using SMEP itself, and - * 32bit PV guests are disabled, and when the RSB is full width. - */ - BUILD_BUG_ON(RO_MPT_VIRT_START != PML4_ADDR(256)); -- if ( opt_rsb_pv == -1 && boot_cpu_has(X86_FEATURE_XEN_SMEP) && -- !opt_pv32 && rsb_is_full_width() ) -- opt_rsb_pv = 0; -+ if ( opt_rsb_pv == -1 ) -+ { -+ opt_rsb_pv = (opt_pv32 || !boot_cpu_has(X86_FEATURE_XEN_SMEP) || -+ !rsb_is_full_width()); -+ -+ /* -+ * Cross-Thread Return Address Predictions. -+ * -+ * Vulnerable systems are Zen1/Zen2 uarch, which is AMD Fam17 / Hygon -+ * Fam18, when SMT is active. -+ * -+ * To mitigate, we must flush the RSB/RAS/RAP once between entering -+ * Xen and going idle. -+ * -+ * Most cases flush on entry to Xen anyway. The one case where we -+ * don't is when using the SMEP optimisation for PV guests. Flushing -+ * before going idle is less overhead than flushing on PV entry. -+ */ -+ if ( !opt_rsb_pv && hw_smt_enabled && -+ (boot_cpu_data.x86_vendor & (X86_VENDOR_AMD|X86_VENDOR_HYGON)) && -+ (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) ) -+ setup_force_cpu_cap(X86_FEATURE_SC_RSB_IDLE); -+ } - - if ( opt_rsb_pv ) - { diff --git a/xsa427.patch b/xsa427.patch deleted file mode 100644 index 861f81d..0000000 --- a/xsa427.patch +++ /dev/null @@ -1,76 +0,0 @@ -From: Jan Beulich -Subject: x86/shadow: account for log-dirty mode when pre-allocating - -Pre-allocation is intended to ensure that in the course of constructing -or updating shadows there won't be any risk of just made shadows or -shadows being acted upon can disappear under our feet. The amount of -pages pre-allocated then, however, needs to account for all possible -subsequent allocations. While the use in sh_page_fault() accounts for -all shadows which may need making, so far it didn't account for -allocations coming from log-dirty tracking (which piggybacks onto the -P2M allocation functions). - -Since shadow_prealloc() takes a count of shadows (or other data -structures) rather than a count of pages, putting the adjustment at the -call site of this function won't work very well: We simply can't express -the correct count that way in all cases. Instead take care of this in -the function itself, by "snooping" for L1 type requests. (While not -applicable right now, future new request sites of L1 tables would then -also be covered right away.) - -It is relevant to note here that pre-allocations like the one done from -shadow_alloc_p2m_page() are benign when they fall in the "scope" of an -earlier pre-alloc which already included that count: The inner call will -simply find enough pages available then; it'll bail right away. - -This is CVE-2022-42332 / XSA-427. - -Signed-off-by: Jan Beulich -Reviewed-by: Tim Deegan ---- -v2: Entirely different approach. - ---- a/xen/arch/x86/include/asm/paging.h -+++ b/xen/arch/x86/include/asm/paging.h -@@ -189,6 +189,10 @@ bool paging_mfn_is_dirty(const struct do - #define L4_LOGDIRTY_IDX(pfn) ((pfn_x(pfn) >> (PAGE_SHIFT + 3 + PAGETABLE_ORDER * 2)) & \ - (LOGDIRTY_NODE_ENTRIES-1)) - -+#define paging_logdirty_levels() \ -+ (DIV_ROUND_UP(PADDR_BITS - PAGE_SHIFT - (PAGE_SHIFT + 3), \ -+ PAGE_SHIFT - ilog2(sizeof(mfn_t))) + 1) -+ - #ifdef CONFIG_HVM - /* VRAM dirty tracking support */ - struct sh_dirty_vram { ---- a/xen/arch/x86/mm/paging.c -+++ b/xen/arch/x86/mm/paging.c -@@ -282,6 +282,7 @@ void paging_mark_pfn_dirty(struct domain - if ( unlikely(!VALID_M2P(pfn_x(pfn))) ) - return; - -+ BUILD_BUG_ON(paging_logdirty_levels() != 4); - i1 = L1_LOGDIRTY_IDX(pfn); - i2 = L2_LOGDIRTY_IDX(pfn); - i3 = L3_LOGDIRTY_IDX(pfn); ---- a/xen/arch/x86/mm/shadow/common.c -+++ b/xen/arch/x86/mm/shadow/common.c -@@ -1011,7 +1011,17 @@ bool shadow_prealloc(struct domain *d, u - if ( unlikely(d->is_dying) ) - return false; - -- ret = _shadow_prealloc(d, shadow_size(type) * count); -+ count *= shadow_size(type); -+ /* -+ * Log-dirty handling may result in allocations when populating its -+ * tracking structures. Tie this to the caller requesting space for L1 -+ * shadows. -+ */ -+ if ( paging_mode_log_dirty(d) && -+ ((SHF_L1_ANY | SHF_FL1_ANY) & (1u << type)) ) -+ count += paging_logdirty_levels(); -+ -+ ret = _shadow_prealloc(d, count); - if ( !ret && (!d->is_shutting_down || d->shutdown_code != SHUTDOWN_crash) ) - /* - * Failing to allocate memory required for shadow usage can only result in diff --git a/xsa428-4.17-1.patch b/xsa428-4.17-1.patch deleted file mode 100644 index 3852f08..0000000 --- a/xsa428-4.17-1.patch +++ /dev/null @@ -1,40 +0,0 @@ -From: Jan Beulich -Subject: x86/HVM: bound number of pinned cache attribute regions - -This is exposed via DMOP, i.e. to potentially not fully privileged -device models. With that we may not permit registration of an (almost) -unbounded amount of such regions. - -This is CVE-2022-42333 / part of XSA-428. - -Reported-by: Andrew Cooper -Fixes: 642123c5123f ("x86/hvm: provide XEN_DMOP_pin_memory_cacheattr") -Signed-off-by: Jan Beulich -Reviewed-by: Andrew Cooper - ---- a/xen/arch/x86/hvm/mtrr.c -+++ b/xen/arch/x86/hvm/mtrr.c -@@ -595,6 +595,7 @@ int hvm_set_mem_pinned_cacheattr(struct - uint64_t gfn_end, uint32_t type) - { - struct hvm_mem_pinned_cacheattr_range *range; -+ unsigned int nr = 0; - int rc = 1; - - if ( !is_hvm_domain(d) ) -@@ -666,11 +667,15 @@ int hvm_set_mem_pinned_cacheattr(struct - rc = -EBUSY; - break; - } -+ ++nr; - } - rcu_read_unlock(&pinned_cacheattr_rcu_lock); - if ( rc <= 0 ) - return rc; - -+ if ( nr >= 64 /* The limit is arbitrary. */ ) -+ return -ENOSPC; -+ - range = xzalloc(struct hvm_mem_pinned_cacheattr_range); - if ( range == NULL ) - return -ENOMEM; diff --git a/xsa428-4.17-2.patch b/xsa428-4.17-2.patch deleted file mode 100644 index 087230a..0000000 --- a/xsa428-4.17-2.patch +++ /dev/null @@ -1,114 +0,0 @@ -From: Jan Beulich -Subject: x86/HVM: serialize pinned cache attribute list manipulation - -While the RCU variants of list insertion and removal allow lockless list -traversal (with RCU just read-locked), insertions and removals still -need serializing amongst themselves. To keep things simple, use the -domain lock for this purpose. - -This is CVE-2022-42334 / part of XSA-428. - -Fixes: 642123c5123f ("x86/hvm: provide XEN_DMOP_pin_memory_cacheattr") -Signed-off-by: Jan Beulich -Reviewed-by: Julien Grall - ---- a/xen/arch/x86/hvm/mtrr.c -+++ b/xen/arch/x86/hvm/mtrr.c -@@ -594,7 +594,7 @@ static void cf_check free_pinned_cacheat - int hvm_set_mem_pinned_cacheattr(struct domain *d, uint64_t gfn_start, - uint64_t gfn_end, uint32_t type) - { -- struct hvm_mem_pinned_cacheattr_range *range; -+ struct hvm_mem_pinned_cacheattr_range *range, *newr; - unsigned int nr = 0; - int rc = 1; - -@@ -608,14 +608,15 @@ int hvm_set_mem_pinned_cacheattr(struct - { - case XEN_DOMCTL_DELETE_MEM_CACHEATTR: - /* Remove the requested range. */ -- rcu_read_lock(&pinned_cacheattr_rcu_lock); -- list_for_each_entry_rcu ( range, -- &d->arch.hvm.pinned_cacheattr_ranges, -- list ) -+ domain_lock(d); -+ list_for_each_entry ( range, -+ &d->arch.hvm.pinned_cacheattr_ranges, -+ list ) - if ( range->start == gfn_start && range->end == gfn_end ) - { -- rcu_read_unlock(&pinned_cacheattr_rcu_lock); - list_del_rcu(&range->list); -+ domain_unlock(d); -+ - type = range->type; - call_rcu(&range->rcu, free_pinned_cacheattr_entry); - p2m_memory_type_changed(d); -@@ -636,7 +637,7 @@ int hvm_set_mem_pinned_cacheattr(struct - } - return 0; - } -- rcu_read_unlock(&pinned_cacheattr_rcu_lock); -+ domain_unlock(d); - return -ENOENT; - - case PAT_TYPE_UC_MINUS: -@@ -651,7 +652,10 @@ int hvm_set_mem_pinned_cacheattr(struct - return -EINVAL; - } - -- rcu_read_lock(&pinned_cacheattr_rcu_lock); -+ newr = xzalloc(struct hvm_mem_pinned_cacheattr_range); -+ -+ domain_lock(d); -+ - list_for_each_entry_rcu ( range, - &d->arch.hvm.pinned_cacheattr_ranges, - list ) -@@ -669,27 +673,34 @@ int hvm_set_mem_pinned_cacheattr(struct - } - ++nr; - } -- rcu_read_unlock(&pinned_cacheattr_rcu_lock); -+ - if ( rc <= 0 ) -- return rc; -+ /* nothing */; -+ else if ( nr >= 64 /* The limit is arbitrary. */ ) -+ rc = -ENOSPC; -+ else if ( !newr ) -+ rc = -ENOMEM; -+ else -+ { -+ newr->start = gfn_start; -+ newr->end = gfn_end; -+ newr->type = type; - -- if ( nr >= 64 /* The limit is arbitrary. */ ) -- return -ENOSPC; -+ list_add_rcu(&newr->list, &d->arch.hvm.pinned_cacheattr_ranges); -+ -+ newr = NULL; -+ rc = 0; -+ } - -- range = xzalloc(struct hvm_mem_pinned_cacheattr_range); -- if ( range == NULL ) -- return -ENOMEM; -+ domain_unlock(d); - -- range->start = gfn_start; -- range->end = gfn_end; -- range->type = type; -+ xfree(newr); - -- list_add_rcu(&range->list, &d->arch.hvm.pinned_cacheattr_ranges); - p2m_memory_type_changed(d); - if ( type != PAT_TYPE_WRBACK ) - flush_all(FLUSH_CACHE); - -- return 0; -+ return rc; - } - - static int cf_check hvm_save_mtrr_msr(struct vcpu *v, hvm_domain_context_t *h) diff --git a/xsa429.patch b/xsa429.patch deleted file mode 100644 index 443869f..0000000 --- a/xsa429.patch +++ /dev/null @@ -1,46 +0,0 @@ -From: Andrew Cooper -Subject: x86/spec-ctrl: Defer CR4_PV32_RESTORE on the cstar_enter path - -As stated (correctly) by the comment next to SPEC_CTRL_ENTRY_FROM_PV, between -the two hunks visible in the patch, RET's are not safe prior to this point. - -CR4_PV32_RESTORE hides a CALL/RET pair in certain configurations (PV32 -compiled in, SMEP or SMAP active), and the RET can be attacked with one of -several known speculative issues. - -Furthermore, CR4_PV32_RESTORE also hides a reference to the cr4_pv32_mask -global variable, which is not safe when XPTI is active before restoring Xen's -full pagetables. - -This crash has gone unnoticed because it is only AMD CPUs which permit the -SYSCALL instruction in compatibility mode, and these are not vulnerable to -Meltdown so don't activate XPTI by default. - -This is XSA-429 / CVE-2022-42331 - -Fixes: 5e7962901131 ("x86/entry: Organise the use of MSR_SPEC_CTRL at each entry/exit point") -Fixes: 5784de3e2067 ("x86: Meltdown band-aid against malicious 64-bit PV guests") -Signed-off-by: Andrew Cooper -Reviewed-by: Jan Beulich - -diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S -index ae012851819a..7675a59ff057 100644 ---- a/xen/arch/x86/x86_64/entry.S -+++ b/xen/arch/x86/x86_64/entry.S -@@ -288,7 +288,6 @@ ENTRY(cstar_enter) - ALTERNATIVE "", "setssbsy", X86_FEATURE_XEN_SHSTK - #endif - push %rax /* Guest %rsp */ -- CR4_PV32_RESTORE - movq 8(%rsp), %rax /* Restore guest %rax. */ - movq $FLAT_USER_SS32, 8(%rsp) /* Assume a 64bit domain. Compat handled lower. */ - pushq %r11 -@@ -312,6 +311,8 @@ ENTRY(cstar_enter) - .Lcstar_cr3_okay: - sti - -+ CR4_PV32_RESTORE -+ - movq STACK_CPUINFO_FIELD(current_vcpu)(%rbx), %rbx - - #ifdef CONFIG_PV32 diff --git a/xsa430.patch b/xsa430.patch deleted file mode 100644 index 4472e41..0000000 --- a/xsa430.patch +++ /dev/null @@ -1,65 +0,0 @@ -From 57b3a2ace5c4a78118b372c95f69af4f0585b48d Mon Sep 17 00:00:00 2001 -From: Roger Pau Monne -Date: Mon, 20 Mar 2023 12:08:52 +0100 -Subject: [PATCH] x86/shadow: restore dropped check in - sh_unshadow_for_p2m_change() -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -As a result of 241702e064604dbb3e0d9b731aa8f45be448243b the -mfn_valid() check in sh_unshadow_for_p2m_change() was lost. That -allows sh_remove_shadows() to be called with gfns that have no backing -page, causing an ASSERT to trigger in debug builds or dereferencing an -arbitrary pointer partially under guest control in non-debug builds: - -RIP: e008:[] sh_remove_shadows+0x19f/0x722 -RFLAGS: 0000000000010246 CONTEXT: hypervisor (d0v2) -[...] -Xen call trace: - [] R sh_remove_shadows+0x19f/0x722 - [] F arch/x86/mm/shadow/hvm.c#sh_unshadow_for_p2m_change+0xab/0x2b7 - [] F arch/x86/mm/p2m-pt.c#write_p2m_entry+0x19b/0x4d3 - [] F arch/x86/mm/p2m-pt.c#p2m_pt_set_entry+0x67b/0xa8e - [] F p2m_set_entry+0xcc/0x149 - [] F unmap_mmio_regions+0x17b/0x2c9 - [] F do_domctl+0x11f3/0x195e - [] F hvm_hypercall+0x5b1/0xa2d - [] F vmx_vmexit_handler+0x130f/0x1cd5 - [] F vmx_asm_vmexit_handler+0xf2/0x210 - -**************************************** -Panic on CPU 1: -Assertion 'mfn_valid(gmfn)' failed at arch/x86/mm/shadow/common.c:2203 -**************************************** - -Fix this by restoring the mfn_valid() check in -sh_unshadow_for_p2m_change(), unifying it with the rest of the checks -that are done at the start of the function. - -This is XSA-430 / CVE-2022-42335 - -Fixes: 241702e064 ('x86/shadow: slightly consolidate sh_unshadow_for_p2m_change() (part II)') -Signed-off-by: Roger Pau Monné -Reviewed-by: Jan Beulich ---- - xen/arch/x86/mm/shadow/hvm.c | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - -diff --git a/xen/arch/x86/mm/shadow/hvm.c b/xen/arch/x86/mm/shadow/hvm.c -index 88c3c16322..6de479c008 100644 ---- a/xen/arch/x86/mm/shadow/hvm.c -+++ b/xen/arch/x86/mm/shadow/hvm.c -@@ -814,7 +814,8 @@ static void cf_check sh_unshadow_for_p2m_change( - - /* Only previously present / valid entries need processing. */ - if ( !(oflags & _PAGE_PRESENT) || -- (!p2m_is_valid(p2mt) && !p2m_is_grant(p2mt)) ) -+ (!p2m_is_valid(p2mt) && !p2m_is_grant(p2mt)) || -+ !mfn_valid(omfn) ) - return; - - switch ( level ) --- -2.40.0 -