aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDaniel Sabogal <dsabogalcc@gmail.com>2017-11-29 01:26:42 -0500
committerNatanael Copa <ncopa@alpinelinux.org>2017-11-29 16:54:55 +0000
commitc2b95515ab6338ba95776b13b3729d06f534370f (patch)
tree6e11d868c4019ac2b93b738d4da9e40b751e1804
parent2ff685890bc01e209f6c5bdfcfa8fcf135381c05 (diff)
downloadalpine_aports-c2b95515ab6338ba95776b13b3729d06f534370f.tar.bz2
alpine_aports-c2b95515ab6338ba95776b13b3729d06f534370f.tar.xz
alpine_aports-c2b95515ab6338ba95776b13b3729d06f534370f.zip
main/xen: security fixes for XSA-246 and XSA-247
-rw-r--r--main/xen/APKBUILD12
-rw-r--r--main/xen/xsa246-4.9.patch74
-rw-r--r--main/xen/xsa247-4.9-1.patch176
-rw-r--r--main/xen/xsa247-4.9-2.patch109
4 files changed, 370 insertions, 1 deletions
diff --git a/main/xen/APKBUILD b/main/xen/APKBUILD
index e128a57be4..bb02b2bee9 100644
--- a/main/xen/APKBUILD
+++ b/main/xen/APKBUILD
@@ -3,7 +3,7 @@
3# Maintainer: William Pitcock <nenolod@dereferenced.org> 3# Maintainer: William Pitcock <nenolod@dereferenced.org>
4pkgname=xen 4pkgname=xen
5pkgver=4.9.1 5pkgver=4.9.1
6pkgrel=0 6pkgrel=1
7pkgdesc="Xen hypervisor" 7pkgdesc="Xen hypervisor"
8url="http://www.xen.org/" 8url="http://www.xen.org/"
9arch="x86_64 armhf aarch64" 9arch="x86_64 armhf aarch64"
@@ -98,6 +98,9 @@ options="!strip"
98# - CVE-2017-15594 XSA-244 98# - CVE-2017-15594 XSA-244
99# 4.9.0-r7: 99# 4.9.0-r7:
100# - CVE-2017-15597 XSA-236 100# - CVE-2017-15597 XSA-236
101# 4.9.1-r1:
102# - XSA-246
103# - XSA-247
101 104
102case "$CARCH" in 105case "$CARCH" in
103x86*) 106x86*)
@@ -145,6 +148,10 @@ source="https://downloads.xenproject.org/release/$pkgname/$pkgver/$pkgname-$pkgv
145 http://xenbits.xen.org/xen-extfiles/zlib-$_ZLIB_VERSION.tar.gz 148 http://xenbits.xen.org/xen-extfiles/zlib-$_ZLIB_VERSION.tar.gz
146 http://xenbits.xen.org/xen-extfiles/ipxe-git-$_IPXE_GIT_TAG.tar.gz 149 http://xenbits.xen.org/xen-extfiles/ipxe-git-$_IPXE_GIT_TAG.tar.gz
147 150
151 xsa246-4.9.patch
152 xsa247-4.9-1.patch
153 xsa247-4.9-2.patch
154
148 qemu-coroutine-gthread.patch 155 qemu-coroutine-gthread.patch
149 qemu-xen_paths.patch 156 qemu-xen_paths.patch
150 157
@@ -403,6 +410,9 @@ c2bc9ffc8583aeae71cee9ddcc4418969768d4e3764d47307da54f93981c0109fb07d84b061b3a36
4034928b5b82f57645be9408362706ff2c4d9baa635b21b0d41b1c82930e8c60a759b1ea4fa74d7e6c7cae1b7692d006aa5cb72df0c3b88bf049779aa2b566f9d35 tpm_emulator-0.7.4.tar.gz 4104928b5b82f57645be9408362706ff2c4d9baa635b21b0d41b1c82930e8c60a759b1ea4fa74d7e6c7cae1b7692d006aa5cb72df0c3b88bf049779aa2b566f9d35 tpm_emulator-0.7.4.tar.gz
404021b958fcd0d346c4ba761bcf0cc40f3522de6186cf5a0a6ea34a70504ce9622b1c2626fce40675bc8282cf5f5ade18473656abc38050f72f5d6480507a2106e zlib-1.2.3.tar.gz 411021b958fcd0d346c4ba761bcf0cc40f3522de6186cf5a0a6ea34a70504ce9622b1c2626fce40675bc8282cf5f5ade18473656abc38050f72f5d6480507a2106e zlib-1.2.3.tar.gz
40582ba65e1c676d32b29c71e6395c9506cab952c8f8b03f692e2b50133be8f0c0146d0f22c223262d81a4df579986fde5abc6507869f4965be4846297ef7b4b890 ipxe-git-827dd1bfee67daa683935ce65316f7e0f057fe1c.tar.gz 41282ba65e1c676d32b29c71e6395c9506cab952c8f8b03f692e2b50133be8f0c0146d0f22c223262d81a4df579986fde5abc6507869f4965be4846297ef7b4b890 ipxe-git-827dd1bfee67daa683935ce65316f7e0f057fe1c.tar.gz
413b00f42d2069f273e204698177d2c36950cee759a92dfe7833c812ddff4dedde2c4a842980927ec4fc46d1f54b49879bf3a3681c6faf30b72fb3ad6a7eba060b2 xsa246-4.9.patch
414c5e064543048751fda86ce64587493518da87d219ff077abb83ac13d8381ceb29f1b6479fc0b761b8f7a04c8c70203791ac4a8cc79bbc6f4dcfa6661c4790c5e xsa247-4.9-1.patch
41571aefbe27cbd1d1d363b7d5826c69a238e4aad2958a1c6da330ae5daee791f54ce1d01fb79db84ed4248ab8b1593c9c28c3de5108f4d0953b04f7819af23a1d1 xsa247-4.9-2.patch
406c3c46f232f0bd9f767b232af7e8ce910a6166b126bd5427bb8dc325aeb2c634b956de3fc225cab5af72649070c8205cc8e1cab7689fc266c204f525086f1a562 qemu-coroutine-gthread.patch 416c3c46f232f0bd9f767b232af7e8ce910a6166b126bd5427bb8dc325aeb2c634b956de3fc225cab5af72649070c8205cc8e1cab7689fc266c204f525086f1a562 qemu-coroutine-gthread.patch
4071936ab39a1867957fa640eb81c4070214ca4856a2743ba7e49c0cd017917071a9680d015f002c57fa7b9600dbadd29dcea5887f50e6c133305df2669a7a933f3 qemu-xen_paths.patch 4171936ab39a1867957fa640eb81c4070214ca4856a2743ba7e49c0cd017917071a9680d015f002c57fa7b9600dbadd29dcea5887f50e6c133305df2669a7a933f3 qemu-xen_paths.patch
408f095ea373f36381491ad36f0662fb4f53665031973721256b23166e596318581da7cbb0146d0beb2446729adfdb321e01468e377793f6563a67d68b8b0f7ffe3 hotplug-vif-vtrill.patch 418f095ea373f36381491ad36f0662fb4f53665031973721256b23166e596318581da7cbb0146d0beb2446729adfdb321e01468e377793f6563a67d68b8b0f7ffe3 hotplug-vif-vtrill.patch
diff --git a/main/xen/xsa246-4.9.patch b/main/xen/xsa246-4.9.patch
new file mode 100644
index 0000000000..6370a10625
--- /dev/null
+++ b/main/xen/xsa246-4.9.patch
@@ -0,0 +1,74 @@
1From: Julien Grall <julien.grall@linaro.org>
2Subject: x86/pod: prevent infinite loop when shattering large pages
3
4When populating pages, the PoD may need to split large ones using
5p2m_set_entry and request the caller to retry (see ept_get_entry for
6instance).
7
8p2m_set_entry may fail to shatter if it is not possible to allocate
9memory for the new page table. However, the error is not propagated
10resulting to the callers to retry infinitely the PoD.
11
12Prevent the infinite loop by return false when it is not possible to
13shatter the large mapping.
14
15This is XSA-246.
16
17Signed-off-by: Julien Grall <julien.grall@linaro.org>
18Signed-off-by: Jan Beulich <jbeulich@suse.com>
19Reviewed-by: George Dunlap <george.dunlap@citrix.com>
20
21--- a/xen/arch/x86/mm/p2m-pod.c
22+++ b/xen/arch/x86/mm/p2m-pod.c
23@@ -1071,9 +1071,8 @@ p2m_pod_demand_populate(struct p2m_domai
24 * NOTE: In a fine-grained p2m locking scenario this operation
25 * may need to promote its locking from gfn->1g superpage
26 */
27- p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_2M,
28- p2m_populate_on_demand, p2m->default_access);
29- return 0;
30+ return p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_2M,
31+ p2m_populate_on_demand, p2m->default_access);
32 }
33
34 /* Only reclaim if we're in actual need of more cache. */
35@@ -1104,8 +1103,12 @@ p2m_pod_demand_populate(struct p2m_domai
36
37 gfn_aligned = (gfn >> order) << order;
38
39- p2m_set_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw,
40- p2m->default_access);
41+ if ( p2m_set_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw,
42+ p2m->default_access) )
43+ {
44+ p2m_pod_cache_add(p2m, p, order);
45+ goto out_fail;
46+ }
47
48 for( i = 0; i < (1UL << order); i++ )
49 {
50@@ -1150,13 +1153,18 @@ remap_and_retry:
51 BUG_ON(order != PAGE_ORDER_2M);
52 pod_unlock(p2m);
53
54- /* Remap this 2-meg region in singleton chunks */
55- /* NOTE: In a p2m fine-grained lock scenario this might
56- * need promoting the gfn lock from gfn->2M superpage */
57+ /*
58+ * Remap this 2-meg region in singleton chunks. See the comment on the
59+ * 1G page splitting path above for why a single call suffices.
60+ *
61+ * NOTE: In a p2m fine-grained lock scenario this might
62+ * need promoting the gfn lock from gfn->2M superpage.
63+ */
64 gfn_aligned = (gfn>>order)<<order;
65- for(i=0; i<(1<<order); i++)
66- p2m_set_entry(p2m, gfn_aligned + i, INVALID_MFN, PAGE_ORDER_4K,
67- p2m_populate_on_demand, p2m->default_access);
68+ if ( p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_4K,
69+ p2m_populate_on_demand, p2m->default_access) )
70+ return -1;
71+
72 if ( tb_init_done )
73 {
74 struct {
diff --git a/main/xen/xsa247-4.9-1.patch b/main/xen/xsa247-4.9-1.patch
new file mode 100644
index 0000000000..e86d5616c4
--- /dev/null
+++ b/main/xen/xsa247-4.9-1.patch
@@ -0,0 +1,176 @@
1From ad208b8b7e45fb2b7c572b86c61c26412609e82d Mon Sep 17 00:00:00 2001
2From: George Dunlap <george.dunlap@citrix.com>
3Date: Fri, 10 Nov 2017 16:53:54 +0000
4Subject: [PATCH 1/2] p2m: Always check to see if removing a p2m entry actually
5 worked
6
7The PoD zero-check functions speculatively remove memory from the p2m,
8then check to see if it's completely zeroed, before putting it in the
9cache.
10
11Unfortunately, the p2m_set_entry() calls may fail if the underlying
12pagetable structure needs to change and the domain has exhausted its
13p2m memory pool: for instance, if we're removing a 2MiB region out of
14a 1GiB entry (in the p2m_pod_zero_check_superpage() case), or a 4k
15region out of a 2MiB or larger entry (in the p2m_pod_zero_check()
16case); and the return value is not checked.
17
18The underlying mfn will then be added into the PoD cache, and at some
19point mapped into another location in the p2m. If the guest
20afterwards ballons out this memory, it will be freed to the hypervisor
21and potentially reused by another domain, in spite of the fact that
22the original domain still has writable mappings to it.
23
24There are several places where p2m_set_entry() shouldn't be able to
25fail, as it is guaranteed to write an entry of the same order that
26succeeded before. Add a backstop of crashing the domain just in case,
27and an ASSERT_UNREACHABLE() to flag up the broken assumption on debug
28builds.
29
30While we're here, use PAGE_ORDER_2M rather than a magic constant.
31
32This is part of XSA-247.
33
34Reported-by: XXX PERSON <XXX EMAIL>
35Signed-off-by: George Dunlap <george.dunlap@citrix.com>
36Reviewed-by: Jan Beulich <jbeulich@suse.com>
37---
38v4:
39- Removed some training whitespace
40v3:
41- Reformat reset clause to be more compact
42- Make sure to set map[i] = NULL when unmapping in case we need to bail
43v2:
44- Crash a domain if a p2m_set_entry we think cannot fail fails anyway.
45---
46 xen/arch/x86/mm/p2m-pod.c | 77 +++++++++++++++++++++++++++++++++++++----------
47 1 file changed, 61 insertions(+), 16 deletions(-)
48
49diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
50index 730a48f928..f2ed751892 100644
51--- a/xen/arch/x86/mm/p2m-pod.c
52+++ b/xen/arch/x86/mm/p2m-pod.c
53@@ -752,8 +752,10 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn)
54 }
55
56 /* Try to remove the page, restoring old mapping if it fails. */
57- p2m_set_entry(p2m, gfn, INVALID_MFN, PAGE_ORDER_2M,
58- p2m_populate_on_demand, p2m->default_access);
59+ if ( p2m_set_entry(p2m, gfn, INVALID_MFN, PAGE_ORDER_2M,
60+ p2m_populate_on_demand, p2m->default_access) )
61+ goto out;
62+
63 p2m_tlb_flush_sync(p2m);
64
65 /* Make none of the MFNs are used elsewhere... for example, mapped
66@@ -810,9 +812,18 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn)
67 ret = SUPERPAGE_PAGES;
68
69 out_reset:
70- if ( reset )
71- p2m_set_entry(p2m, gfn, mfn0, 9, type0, p2m->default_access);
72-
73+ /*
74+ * This p2m_set_entry() call shouldn't be able to fail, since the same order
75+ * on the same gfn succeeded above. If that turns out to be false, crashing
76+ * the domain should be the safest way of making sure we don't leak memory.
77+ */
78+ if ( reset && p2m_set_entry(p2m, gfn, mfn0, PAGE_ORDER_2M,
79+ type0, p2m->default_access) )
80+ {
81+ ASSERT_UNREACHABLE();
82+ domain_crash(d);
83+ }
84+
85 out:
86 gfn_unlock(p2m, gfn, SUPERPAGE_ORDER);
87 return ret;
88@@ -869,19 +880,30 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count)
89 }
90
91 /* Try to remove the page, restoring old mapping if it fails. */
92- p2m_set_entry(p2m, gfns[i], INVALID_MFN, PAGE_ORDER_4K,
93- p2m_populate_on_demand, p2m->default_access);
94+ if ( p2m_set_entry(p2m, gfns[i], INVALID_MFN, PAGE_ORDER_4K,
95+ p2m_populate_on_demand, p2m->default_access) )
96+ goto skip;
97
98 /* See if the page was successfully unmapped. (Allow one refcount
99 * for being allocated to a domain.) */
100 if ( (mfn_to_page(mfns[i])->count_info & PGC_count_mask) > 1 )
101 {
102+ /*
103+ * If the previous p2m_set_entry call succeeded, this one shouldn't
104+ * be able to fail. If it does, crashing the domain should be safe.
105+ */
106+ if ( p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
107+ types[i], p2m->default_access) )
108+ {
109+ ASSERT_UNREACHABLE();
110+ domain_crash(d);
111+ goto out_unmap;
112+ }
113+
114+ skip:
115 unmap_domain_page(map[i]);
116 map[i] = NULL;
117
118- p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
119- types[i], p2m->default_access);
120-
121 continue;
122 }
123 }
124@@ -900,12 +922,25 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count)
125
126 unmap_domain_page(map[i]);
127
128- /* See comment in p2m_pod_zero_check_superpage() re gnttab
129- * check timing. */
130- if ( j < PAGE_SIZE/sizeof(*map[i]) )
131+ map[i] = NULL;
132+
133+ /*
134+ * See comment in p2m_pod_zero_check_superpage() re gnttab
135+ * check timing.
136+ */
137+ if ( j < (PAGE_SIZE / sizeof(*map[i])) )
138 {
139- p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
140- types[i], p2m->default_access);
141+ /*
142+ * If the previous p2m_set_entry call succeeded, this one shouldn't
143+ * be able to fail. If it does, crashing the domain should be safe.
144+ */
145+ if ( p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
146+ types[i], p2m->default_access) )
147+ {
148+ ASSERT_UNREACHABLE();
149+ domain_crash(d);
150+ goto out_unmap;
151+ }
152 }
153 else
154 {
155@@ -929,7 +964,17 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count)
156 p2m->pod.entry_count++;
157 }
158 }
159-
160+
161+ return;
162+
163+out_unmap:
164+ /*
165+ * Something went wrong, probably crashing the domain. Unmap
166+ * everything and return.
167+ */
168+ for ( i = 0; i < count; i++ )
169+ if ( map[i] )
170+ unmap_domain_page(map[i]);
171 }
172
173 #define POD_SWEEP_LIMIT 1024
174--
1752.15.0
176
diff --git a/main/xen/xsa247-4.9-2.patch b/main/xen/xsa247-4.9-2.patch
new file mode 100644
index 0000000000..13737a9bf2
--- /dev/null
+++ b/main/xen/xsa247-4.9-2.patch
@@ -0,0 +1,109 @@
1From d4bc7833707351a5341a6bdf04c752a028d9560d Mon Sep 17 00:00:00 2001
2From: George Dunlap <george.dunlap@citrix.com>
3Date: Fri, 10 Nov 2017 16:53:55 +0000
4Subject: [PATCH 2/2] p2m: Check return value of p2m_set_entry() when
5 decreasing reservation
6
7If the entire range specified to p2m_pod_decrease_reservation() is marked
8populate-on-demand, then it will make a single p2m_set_entry() call,
9reducing its PoD entry count.
10
11Unfortunately, in the right circumstances, this p2m_set_entry() call
12may fail. It that case, repeated calls to decrease_reservation() may
13cause p2m->pod.entry_count to fall below zero, potentially tripping
14over BUG_ON()s to the contrary.
15
16Instead, check to see if the entry succeeded, and return false if not.
17The caller will then call guest_remove_page() on the gfns, which will
18return -EINVAL upon finding no valid memory there to return.
19
20Unfortunately if the order > 0, the entry may have partially changed.
21A domain_crash() is probably the safest thing in that case.
22
23Other p2m_set_entry() calls in the same function should be fine,
24because they are writing the entry at its current order. Nonetheless,
25check the return value and crash if our assumption turns otu to be
26wrong.
27
28This is part of XSA-247.
29
30Reported-by: XXX PERSON <XXX EMAIL>
31Signed-off-by: George Dunlap <george.dunlap@citrix.com>
32Reviewed-by: Jan Beulich <jbeulich@suse.com>
33---
34v2: Crash the domain if we're not sure it's safe (or if we think it
35can't happen)
36---
37 xen/arch/x86/mm/p2m-pod.c | 42 +++++++++++++++++++++++++++++++++---------
38 1 file changed, 33 insertions(+), 9 deletions(-)
39
40diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
41index f2ed751892..473d6a6dbf 100644
42--- a/xen/arch/x86/mm/p2m-pod.c
43+++ b/xen/arch/x86/mm/p2m-pod.c
44@@ -555,11 +555,23 @@ p2m_pod_decrease_reservation(struct domain *d,
45
46 if ( !nonpod )
47 {
48- /* All PoD: Mark the whole region invalid and tell caller
49- * we're done. */
50- p2m_set_entry(p2m, gpfn, INVALID_MFN, order, p2m_invalid,
51- p2m->default_access);
52- p2m->pod.entry_count-=(1<<order);
53+ /*
54+ * All PoD: Mark the whole region invalid and tell caller
55+ * we're done.
56+ */
57+ if ( p2m_set_entry(p2m, gpfn, INVALID_MFN, order, p2m_invalid,
58+ p2m->default_access) )
59+ {
60+ /*
61+ * If this fails, we can't tell how much of the range was changed.
62+ * Best to crash the domain unless we're sure a partial change is
63+ * impossible.
64+ */
65+ if ( order != 0 )
66+ domain_crash(d);
67+ goto out_unlock;
68+ }
69+ p2m->pod.entry_count -= 1UL << order;
70 BUG_ON(p2m->pod.entry_count < 0);
71 ret = 1;
72 goto out_entry_check;
73@@ -600,8 +612,14 @@ p2m_pod_decrease_reservation(struct domain *d,
74 n = 1UL << cur_order;
75 if ( t == p2m_populate_on_demand )
76 {
77- p2m_set_entry(p2m, gpfn + i, INVALID_MFN, cur_order,
78- p2m_invalid, p2m->default_access);
79+ /* This shouldn't be able to fail */
80+ if ( p2m_set_entry(p2m, gpfn + i, INVALID_MFN, cur_order,
81+ p2m_invalid, p2m->default_access) )
82+ {
83+ ASSERT_UNREACHABLE();
84+ domain_crash(d);
85+ goto out_unlock;
86+ }
87 p2m->pod.entry_count -= n;
88 BUG_ON(p2m->pod.entry_count < 0);
89 pod -= n;
90@@ -622,8 +640,14 @@ p2m_pod_decrease_reservation(struct domain *d,
91
92 page = mfn_to_page(mfn);
93
94- p2m_set_entry(p2m, gpfn + i, INVALID_MFN, cur_order,
95- p2m_invalid, p2m->default_access);
96+ /* This shouldn't be able to fail */
97+ if ( p2m_set_entry(p2m, gpfn + i, INVALID_MFN, cur_order,
98+ p2m_invalid, p2m->default_access) )
99+ {
100+ ASSERT_UNREACHABLE();
101+ domain_crash(d);
102+ goto out_unlock;
103+ }
104 p2m_tlb_flush_sync(p2m);
105 for ( j = 0; j < n; ++j )
106 set_gpfn_from_mfn(mfn_x(mfn), INVALID_M2P_ENTRY);
107--
1082.15.0
109