aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLeo <thinkabit.ukim@gmail.com>2020-04-19 10:21:18 -0300
committerLeo <thinkabit.ukim@gmail.com>2020-04-19 11:23:26 -0300
commit7c4c7fb75cb36f33395f8a86ae820e4fb9f8d59e (patch)
treeb572a58df9e91432696ecf916b2a5a53adedfeca
parent2b1bc27a6961f4e856a9cf6d1306301378898126 (diff)
downloadalpine_aports-7c4c7fb75cb36f33395f8a86ae820e4fb9f8d59e.tar.bz2
alpine_aports-7c4c7fb75cb36f33395f8a86ae820e4fb9f8d59e.tar.xz
alpine_aports-7c4c7fb75cb36f33395f8a86ae820e4fb9f8d59e.zip
main/xen: fix various security issues
For #11400 Fixed CVEs: - CVE-2020-11739 - CVE-2020-11740 - CVE-2020-11741 - CVE-2020-11742 - CVE-2020-11743
-rw-r--r--main/xen/APKBUILD18
-rw-r--r--main/xen/xsa313-1.patch26
-rw-r--r--main/xen/xsa313-2.patch132
-rw-r--r--main/xen/xsa314-4.13.patch121
-rw-r--r--main/xen/xsa316-xen.patch30
-rw-r--r--main/xen/xsa318.patch39
6 files changed, 364 insertions, 2 deletions
diff --git a/main/xen/APKBUILD b/main/xen/APKBUILD
index 304a8c0428..37f62af559 100644
--- a/main/xen/APKBUILD
+++ b/main/xen/APKBUILD
@@ -3,7 +3,7 @@
3# Maintainer: William Pitcock <nenolod@dereferenced.org> 3# Maintainer: William Pitcock <nenolod@dereferenced.org>
4pkgname=xen 4pkgname=xen
5pkgver=4.11.3 5pkgver=4.11.3
6pkgrel=1 6pkgrel=2
7pkgdesc="Xen hypervisor" 7pkgdesc="Xen hypervisor"
8url="https://www.xenproject.org/" 8url="https://www.xenproject.org/"
9arch="x86_64 armhf aarch64" # enable armv7 when builds with gcc8 9arch="x86_64 armhf aarch64" # enable armv7 when builds with gcc8
@@ -173,6 +173,11 @@ options="!strip"
173# - CVE-2019-19578 XSA-309 173# - CVE-2019-19578 XSA-309
174# - CVE-2019-19580 XSA-310 174# - CVE-2019-19580 XSA-310
175# - CVE-2019-19577 XSA-311 175# - CVE-2019-19577 XSA-311
176# 4.11.3-r2:
177# - CVE-2020-11740 CVE-2020-11741 XSA-313
178# - CVE-2020-11739 XSA-314
179# - CVE-2020-11743 XSA-316
180# - CVE-2020-11742 XSA-318
176 181
177case "$CARCH" in 182case "$CARCH" in
178x86*) 183x86*)
@@ -247,6 +252,11 @@ source="https://downloads.xenproject.org/release/$pkgname/$pkgver/$pkgname-$pkgv
247 xsa310-0002-x86-mm-alloc-free_lN_table-Retain-partial_flags-on-E.patch 252 xsa310-0002-x86-mm-alloc-free_lN_table-Retain-partial_flags-on-E.patch
248 xsa310-0003-x86-mm-relinquish_memory-Grab-an-extra-type-ref-when.patch 253 xsa310-0003-x86-mm-relinquish_memory-Grab-an-extra-type-ref-when.patch
249 xsa311-4.11.patch 254 xsa311-4.11.patch
255 xsa313-1.patch
256 xsa313-2.patch
257 xsa314-4.13.patch
258 xsa316-xen.patch
259 xsa318.patch
250 260
251 xenstored.initd 261 xenstored.initd
252 xenstored.confd 262 xenstored.confd
@@ -480,7 +490,6 @@ EOF
480EOF 490EOF
481 491
482} 492}
483
484sha512sums="2204e490e9fc357a05983a9bf4e7345e1d364fe00400ce473988dcb9ca7d4e2b921fe10f095cbbc64248130a92d22c6f0d154dcae250a57a7f915df32e3dc436 xen-4.11.3.tar.gz 493sha512sums="2204e490e9fc357a05983a9bf4e7345e1d364fe00400ce473988dcb9ca7d4e2b921fe10f095cbbc64248130a92d22c6f0d154dcae250a57a7f915df32e3dc436 xen-4.11.3.tar.gz
4852e0b0fd23e6f10742a5517981e5171c6e88b0a93c83da701b296f5c0861d72c19782daab589a7eac3f9032152a0fc7eff7f5362db8fccc4859564a9aa82329cf gmp-4.3.2.tar.bz2 4942e0b0fd23e6f10742a5517981e5171c6e88b0a93c83da701b296f5c0861d72c19782daab589a7eac3f9032152a0fc7eff7f5362db8fccc4859564a9aa82329cf gmp-4.3.2.tar.bz2
486c2bc9ffc8583aeae71cee9ddcc4418969768d4e3764d47307da54f93981c0109fb07d84b061b3a3628bd00ba4d14a54742bc04848110eb3ae8ca25dbfbaabadb grub-0.97.tar.gz 495c2bc9ffc8583aeae71cee9ddcc4418969768d4e3764d47307da54f93981c0109fb07d84b061b3a3628bd00ba4d14a54742bc04848110eb3ae8ca25dbfbaabadb grub-0.97.tar.gz
@@ -511,6 +520,11 @@ ad6468c55c13a259b8baa15f251a77ae5ff0524434201caeb1780ca58e637a9e4be398f264c01091
5116e713158f693c1d38f1044e1e9adea3d9338c47e9c2fec10b95a04a36cbc7c8e2841d593cb6e39b44976b6c29b7eec9919dec738e5fddaedddaaeade220185d8 xsa310-0002-x86-mm-alloc-free_lN_table-Retain-partial_flags-on-E.patch 5206e713158f693c1d38f1044e1e9adea3d9338c47e9c2fec10b95a04a36cbc7c8e2841d593cb6e39b44976b6c29b7eec9919dec738e5fddaedddaaeade220185d8 xsa310-0002-x86-mm-alloc-free_lN_table-Retain-partial_flags-on-E.patch
512bef47261b61f2f9f10d649c8de1ad076517ac5ecea5f26a3a61ded91ced3f274ddeb8a41592edfe7dfd5439b010b647f6c15afeb7cd2b8c6065cd2281413b614 xsa310-0003-x86-mm-relinquish_memory-Grab-an-extra-type-ref-when.patch 521bef47261b61f2f9f10d649c8de1ad076517ac5ecea5f26a3a61ded91ced3f274ddeb8a41592edfe7dfd5439b010b647f6c15afeb7cd2b8c6065cd2281413b614 xsa310-0003-x86-mm-relinquish_memory-Grab-an-extra-type-ref-when.patch
5136e786287e21cd8f7371b75b05067428656cc5985ef98902fab577b9dff3a187d130675063db127a9c2210c935b2eb1f6288d784d595c9bdee30f0c904a81afb4 xsa311-4.11.patch 5226e786287e21cd8f7371b75b05067428656cc5985ef98902fab577b9dff3a187d130675063db127a9c2210c935b2eb1f6288d784d595c9bdee30f0c904a81afb4 xsa311-4.11.patch
523a5443da59c75a786ecd0c5ad5df4c84de8b0f7ac92bc11d840d1fb4c2c33653f7e883640c2081ba594fb1ca92a61f5c970b821a5f2d37c6e666bc2e7da6c8e8f xsa313-1.patch
524afc34c39e14b3b3d7bcd5b9bb7d2e6eaeb52fdc8733845cafd0b200c764ebd5a79f540cd818143f99bf084d1a33e50ad1614e5e98af6582412975bd73a5c48dd xsa313-2.patch
5256e319c3856ed4a4d96705a258c2654c89a7d645d8b16c03dd257c57d320ee220ffa675eeef615c5bbcf4d5d25b66ceb8b77f57df59da757a3a554a316db074b6 xsa314-4.13.patch
526cd6ac97375742bacd55f51062849ba5dcef6026f673d3fb6ab73723befbf52570ea08765af44d636df65b7c16a9dce2fe6c9b6c47b671872ffb83c8121a181df xsa316-xen.patch
52766e178a859844a3839333b19934ede5db1d83d8b84bfcce70c51a46077287811a92a8ad2ad60663a88162112d65a867815605202a2c9ca44ba32251b42f0ca23 xsa318.patch
51452c43beb2596d645934d0f909f2d21f7587b6898ed5e5e7046799a8ed6d58f7a09c5809e1634fa26152f3fd4f3e7cfa07da7076f01b4a20cc8f5df8b9cb77e50 xenstored.initd 52852c43beb2596d645934d0f909f2d21f7587b6898ed5e5e7046799a8ed6d58f7a09c5809e1634fa26152f3fd4f3e7cfa07da7076f01b4a20cc8f5df8b9cb77e50 xenstored.initd
515093f7fbd43faf0a16a226486a0776bade5dc1681d281c5946a3191c32d74f9699c6bf5d0ab8de9d1195a2461165d1660788e92a3156c9b3c7054d7b2d52d7ff0 xenstored.confd 529093f7fbd43faf0a16a226486a0776bade5dc1681d281c5946a3191c32d74f9699c6bf5d0ab8de9d1195a2461165d1660788e92a3156c9b3c7054d7b2d52d7ff0 xenstored.confd
5163c86ed48fbee0af4051c65c4a3893f131fa66e47bf083caf20c9b6aa4b63fdead8832f84a58d0e27964bc49ec8397251b34e5be5c212c139f556916dc8da9523 xenconsoled.initd 5303c86ed48fbee0af4051c65c4a3893f131fa66e47bf083caf20c9b6aa4b63fdead8832f84a58d0e27964bc49ec8397251b34e5be5c212c139f556916dc8da9523 xenconsoled.initd
diff --git a/main/xen/xsa313-1.patch b/main/xen/xsa313-1.patch
new file mode 100644
index 0000000000..95fde7ead4
--- /dev/null
+++ b/main/xen/xsa313-1.patch
@@ -0,0 +1,26 @@
1From: Jan Beulich <jbeulich@suse.com>
2Subject: xenoprof: clear buffer intended to be shared with guests
3
4alloc_xenheap_pages() making use of MEMF_no_scrub is fine for Xen
5internally used allocations, but buffers allocated to be shared with
6(unpriviliged) guests need to be zapped of their prior content.
7
8This is part of XSA-313.
9
10Reported-by: Ilja Van Sprundel <ivansprundel@ioactive.com>
11Signed-off-by: Jan Beulich <jbeulich@suse.com>
12Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
13Reviewed-by: Wei Liu <wl@xen.org>
14
15--- a/xen/common/xenoprof.c
16+++ b/xen/common/xenoprof.c
17@@ -253,6 +253,9 @@ static int alloc_xenoprof_struct(
18 return -ENOMEM;
19 }
20
21+ for ( i = 0; i < npages; ++i )
22+ clear_page(d->xenoprof->rawbuf + i * PAGE_SIZE);
23+
24 d->xenoprof->npages = npages;
25 d->xenoprof->nbuf = nvcpu;
26 d->xenoprof->bufsize = bufsize;
diff --git a/main/xen/xsa313-2.patch b/main/xen/xsa313-2.patch
new file mode 100644
index 0000000000..d81b8232d2
--- /dev/null
+++ b/main/xen/xsa313-2.patch
@@ -0,0 +1,132 @@
1From: Jan Beulich <jbeulich@suse.com>
2Subject: xenoprof: limit consumption of shared buffer data
3
4Since a shared buffer can be written to by the guest, we may only read
5the head and tail pointers from there (all other fields should only ever
6be written to). Furthermore, for any particular operation the two values
7must be read exactly once, with both checks and consumption happening
8with the thus read values. (The backtrace related xenoprof_buf_space()
9use in xenoprof_log_event() is an exception: The values used there get
10re-checked by every subsequent xenoprof_add_sample().)
11
12Since that code needed touching, also fix the double increment of the
13lost samples count in case the backtrace related xenoprof_add_sample()
14invocation in xenoprof_log_event() fails.
15
16Where code is being touched anyway, add const as appropriate, but take
17the opportunity to entirely drop the now unused domain parameter of
18xenoprof_buf_space().
19
20This is part of XSA-313.
21
22Reported-by: Ilja Van Sprundel <ivansprundel@ioactive.com>
23Signed-off-by: Jan Beulich <jbeulich@suse.com>
24Reviewed-by: George Dunlap <george.dunlap@citrix.com>
25Reviewed-by: Wei Liu <wl@xen.org>
26
27--- a/xen/common/xenoprof.c
28+++ b/xen/common/xenoprof.c
29@@ -479,25 +479,22 @@ static int add_passive_list(XEN_GUEST_HA
30
31
32 /* Get space in the buffer */
33-static int xenoprof_buf_space(struct domain *d, xenoprof_buf_t * buf, int size)
34+static int xenoprof_buf_space(int head, int tail, int size)
35 {
36- int head, tail;
37-
38- head = xenoprof_buf(d, buf, event_head);
39- tail = xenoprof_buf(d, buf, event_tail);
40-
41 return ((tail > head) ? 0 : size) + tail - head - 1;
42 }
43
44 /* Check for space and add a sample. Return 1 if successful, 0 otherwise. */
45-static int xenoprof_add_sample(struct domain *d, xenoprof_buf_t *buf,
46+static int xenoprof_add_sample(const struct domain *d,
47+ const struct xenoprof_vcpu *v,
48 uint64_t eip, int mode, int event)
49 {
50+ xenoprof_buf_t *buf = v->buffer;
51 int head, tail, size;
52
53 head = xenoprof_buf(d, buf, event_head);
54 tail = xenoprof_buf(d, buf, event_tail);
55- size = xenoprof_buf(d, buf, event_size);
56+ size = v->event_size;
57
58 /* make sure indexes in shared buffer are sane */
59 if ( (head < 0) || (head >= size) || (tail < 0) || (tail >= size) )
60@@ -506,7 +503,7 @@ static int xenoprof_add_sample(struct do
61 return 0;
62 }
63
64- if ( xenoprof_buf_space(d, buf, size) > 0 )
65+ if ( xenoprof_buf_space(head, tail, size) > 0 )
66 {
67 xenoprof_buf(d, buf, event_log[head].eip) = eip;
68 xenoprof_buf(d, buf, event_log[head].mode) = mode;
69@@ -530,7 +527,6 @@ static int xenoprof_add_sample(struct do
70 int xenoprof_add_trace(struct vcpu *vcpu, uint64_t pc, int mode)
71 {
72 struct domain *d = vcpu->domain;
73- xenoprof_buf_t *buf = d->xenoprof->vcpu[vcpu->vcpu_id].buffer;
74
75 /* Do not accidentally write an escape code due to a broken frame. */
76 if ( pc == XENOPROF_ESCAPE_CODE )
77@@ -539,7 +535,8 @@ int xenoprof_add_trace(struct vcpu *vcpu
78 return 0;
79 }
80
81- return xenoprof_add_sample(d, buf, pc, mode, 0);
82+ return xenoprof_add_sample(d, &d->xenoprof->vcpu[vcpu->vcpu_id],
83+ pc, mode, 0);
84 }
85
86 void xenoprof_log_event(struct vcpu *vcpu, const struct cpu_user_regs *regs,
87@@ -570,17 +567,22 @@ void xenoprof_log_event(struct vcpu *vcp
88 /* Provide backtrace if requested. */
89 if ( backtrace_depth > 0 )
90 {
91- if ( (xenoprof_buf_space(d, buf, v->event_size) < 2) ||
92- !xenoprof_add_sample(d, buf, XENOPROF_ESCAPE_CODE, mode,
93- XENOPROF_TRACE_BEGIN) )
94+ if ( xenoprof_buf_space(xenoprof_buf(d, buf, event_head),
95+ xenoprof_buf(d, buf, event_tail),
96+ v->event_size) < 2 )
97 {
98 xenoprof_buf(d, buf, lost_samples)++;
99 lost_samples++;
100 return;
101 }
102+
103+ /* xenoprof_add_sample() will increment lost_samples on failure */
104+ if ( !xenoprof_add_sample(d, v, XENOPROF_ESCAPE_CODE, mode,
105+ XENOPROF_TRACE_BEGIN) )
106+ return;
107 }
108
109- if ( xenoprof_add_sample(d, buf, pc, mode, event) )
110+ if ( xenoprof_add_sample(d, v, pc, mode, event) )
111 {
112 if ( is_active(vcpu->domain) )
113 active_samples++;
114--- a/xen/include/xen/xenoprof.h
115+++ b/xen/include/xen/xenoprof.h
116@@ -61,12 +61,12 @@ struct xenoprof {
117
118 #ifndef CONFIG_COMPAT
119 #define XENOPROF_COMPAT(x) 0
120-#define xenoprof_buf(d, b, field) ((b)->field)
121+#define xenoprof_buf(d, b, field) ACCESS_ONCE((b)->field)
122 #else
123 #define XENOPROF_COMPAT(x) ((x)->is_compat)
124-#define xenoprof_buf(d, b, field) (*(!(d)->xenoprof->is_compat ? \
125- &(b)->native.field : \
126- &(b)->compat.field))
127+#define xenoprof_buf(d, b, field) ACCESS_ONCE(*(!(d)->xenoprof->is_compat \
128+ ? &(b)->native.field \
129+ : &(b)->compat.field))
130 #endif
131
132 struct domain;
diff --git a/main/xen/xsa314-4.13.patch b/main/xen/xsa314-4.13.patch
new file mode 100644
index 0000000000..67e006681e
--- /dev/null
+++ b/main/xen/xsa314-4.13.patch
@@ -0,0 +1,121 @@
1From ab49f005f7d01d4004d76f2e295d31aca7d4f93a Mon Sep 17 00:00:00 2001
2From: Julien Grall <jgrall@amazon.com>
3Date: Thu, 20 Feb 2020 20:54:40 +0000
4Subject: [PATCH] xen/rwlock: Add missing memory barrier in the unlock path of
5 rwlock
6
7The rwlock unlock paths are using atomic_sub() to release the lock.
8However the implementation of atomic_sub() rightfully doesn't contain a
9memory barrier. On Arm, this means a processor is allowed to re-order
10the memory access with the preceeding access.
11
12In other words, the unlock may be seen by another processor before all
13the memory accesses within the "critical" section.
14
15The rwlock paths already contains barrier indirectly, but they are not
16very useful without the counterpart in the unlock paths.
17
18The memory barriers are not necessary on x86 because loads/stores are
19not re-ordered with lock instructions.
20
21So add arch_lock_release_barrier() in the unlock paths that will only
22add memory barrier on Arm.
23
24Take the opportunity to document each lock paths explaining why a
25barrier is not necessary.
26
27This is XSA-314.
28
29Signed-off-by: Julien Grall <jgrall@amazon.com>
30Reviewed-by: Jan Beulich <jbeulich@suse.com>
31Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
32
33---
34 xen/include/xen/rwlock.h | 29 ++++++++++++++++++++++++++++-
35 1 file changed, 28 insertions(+), 1 deletion(-)
36
37diff --git a/xen/include/xen/rwlock.h b/xen/include/xen/rwlock.h
38index 3dfea1ac2a..516486306f 100644
39--- a/xen/include/xen/rwlock.h
40+++ b/xen/include/xen/rwlock.h
41@@ -48,6 +48,10 @@ static inline int _read_trylock(rwlock_t *lock)
42 if ( likely(!(cnts & _QW_WMASK)) )
43 {
44 cnts = (u32)atomic_add_return(_QR_BIAS, &lock->cnts);
45+ /*
46+ * atomic_add_return() is a full barrier so no need for an
47+ * arch_lock_acquire_barrier().
48+ */
49 if ( likely(!(cnts & _QW_WMASK)) )
50 return 1;
51 atomic_sub(_QR_BIAS, &lock->cnts);
52@@ -64,11 +68,19 @@ static inline void _read_lock(rwlock_t *lock)
53 u32 cnts;
54
55 cnts = atomic_add_return(_QR_BIAS, &lock->cnts);
56+ /*
57+ * atomic_add_return() is a full barrier so no need for an
58+ * arch_lock_acquire_barrier().
59+ */
60 if ( likely(!(cnts & _QW_WMASK)) )
61 return;
62
63 /* The slowpath will decrement the reader count, if necessary. */
64 queue_read_lock_slowpath(lock);
65+ /*
66+ * queue_read_lock_slowpath() is using spinlock and therefore is a
67+ * full barrier. So no need for an arch_lock_acquire_barrier().
68+ */
69 }
70
71 static inline void _read_lock_irq(rwlock_t *lock)
72@@ -92,6 +104,7 @@ static inline unsigned long _read_lock_irqsave(rwlock_t *lock)
73 */
74 static inline void _read_unlock(rwlock_t *lock)
75 {
76+ arch_lock_release_barrier();
77 /*
78 * Atomically decrement the reader count
79 */
80@@ -121,11 +134,20 @@ static inline int _rw_is_locked(rwlock_t *lock)
81 */
82 static inline void _write_lock(rwlock_t *lock)
83 {
84- /* Optimize for the unfair lock case where the fair flag is 0. */
85+ /*
86+ * Optimize for the unfair lock case where the fair flag is 0.
87+ *
88+ * atomic_cmpxchg() is a full barrier so no need for an
89+ * arch_lock_acquire_barrier().
90+ */
91 if ( atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0 )
92 return;
93
94 queue_write_lock_slowpath(lock);
95+ /*
96+ * queue_write_lock_slowpath() is using spinlock and therefore is a
97+ * full barrier. So no need for an arch_lock_acquire_barrier().
98+ */
99 }
100
101 static inline void _write_lock_irq(rwlock_t *lock)
102@@ -157,11 +179,16 @@ static inline int _write_trylock(rwlock_t *lock)
103 if ( unlikely(cnts) )
104 return 0;
105
106+ /*
107+ * atomic_cmpxchg() is a full barrier so no need for an
108+ * arch_lock_acquire_barrier().
109+ */
110 return likely(atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0);
111 }
112
113 static inline void _write_unlock(rwlock_t *lock)
114 {
115+ arch_lock_release_barrier();
116 /*
117 * If the writer field is atomic, it can be cleared directly.
118 * Otherwise, an atomic subtraction will be used to clear it.
119--
1202.17.1
121
diff --git a/main/xen/xsa316-xen.patch b/main/xen/xsa316-xen.patch
new file mode 100644
index 0000000000..4962b4e716
--- /dev/null
+++ b/main/xen/xsa316-xen.patch
@@ -0,0 +1,30 @@
1From: Ross Lagerwall <ross.lagerwall@citrix.com>
2Subject: xen/gnttab: Fix error path in map_grant_ref()
3
4Part of XSA-295 (c/s 863e74eb2cffb) inadvertently re-positioned the brackets,
5changing the logic. If the _set_status() call fails, the grant_map hypercall
6would fail with a status of 1 (rc != GNTST_okay) instead of the expected
7negative GNTST_* error.
8
9This error path can be taken due to bad guest state, and causes net/blk-back
10in Linux to crash.
11
12This is XSA-316.
13
14Signed-off-by: Ross Lagerwall <ross.lagerwall@citrix.com>
15Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
16Reviewed-by: Julien Grall <jgrall@amazon.com>
17
18diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
19index 9fd6e60416..4b5344dc21 100644
20--- a/xen/common/grant_table.c
21+++ b/xen/common/grant_table.c
22@@ -1031,7 +1031,7 @@ map_grant_ref(
23 {
24 if ( (rc = _set_status(shah, status, rd, rgt->gt_version, act,
25 op->flags & GNTMAP_readonly, 1,
26- ld->domain_id) != GNTST_okay) )
27+ ld->domain_id)) != GNTST_okay )
28 goto act_release_out;
29
30 if ( !act->pin )
diff --git a/main/xen/xsa318.patch b/main/xen/xsa318.patch
new file mode 100644
index 0000000000..f4becdf81e
--- /dev/null
+++ b/main/xen/xsa318.patch
@@ -0,0 +1,39 @@
1From: Jan Beulich <jbeulich@suse.com>
2Subject: gnttab: fix GNTTABOP_copy continuation handling
3
4The XSA-226 fix was flawed - the backwards transformation on rc was done
5too early, causing a continuation to not get invoked when the need for
6preemption was determined at the very first iteration of the request.
7This in particular means that all of the status fields of the individual
8operations would be left untouched, i.e. set to whatever the caller may
9or may not have initialized them to.
10
11This is part of XSA-318.
12
13Reported-by: Pawel Wieczorkiewicz <wipawel@amazon.de>
14Tested-by: Pawel Wieczorkiewicz <wipawel@amazon.de>
15Signed-off-by: Jan Beulich <jbeulich@suse.com>
16Reviewed-by: Juergen Gross <jgross@suse.com>
17
18--- a/xen/common/grant_table.c
19+++ b/xen/common/grant_table.c
20@@ -3576,8 +3576,7 @@ do_grant_table_op(
21 rc = gnttab_copy(copy, count);
22 if ( rc > 0 )
23 {
24- rc = count - rc;
25- guest_handle_add_offset(copy, rc);
26+ guest_handle_add_offset(copy, count - rc);
27 uop = guest_handle_cast(copy, void);
28 }
29 break;
30@@ -3644,6 +3643,9 @@ do_grant_table_op(
31 out:
32 if ( rc > 0 || opaque_out != 0 )
33 {
34+ /* Adjust rc, see gnttab_copy() for why this is needed. */
35+ if ( cmd == GNTTABOP_copy )
36+ rc = count - rc;
37 ASSERT(rc < count);
38 ASSERT((opaque_out & GNTTABOP_CMD_MASK) == 0);
39 rc = hypercall_create_continuation(__HYPERVISOR_grant_table_op, "ihi",