aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCarlo Landmeter <clandmeter@gmail.com>2017-05-22 13:26:55 +0200
committerCarlo Landmeter <clandmeter@gmail.com>2017-05-22 13:26:55 +0200
commite6421602f600831f362bf96ddc36ecefbefec3d1 (patch)
treebe100122030e7e57baff5f39ac33908241f41aa2
parent02f653dc5c3514c817450fa2f88a49c1bda04244 (diff)
downloadalpine_aports-e6421602f600831f362bf96ddc36ecefbefec3d1.tar.bz2
alpine_aports-e6421602f600831f362bf96ddc36ecefbefec3d1.tar.xz
alpine_aports-e6421602f600831f362bf96ddc36ecefbefec3d1.zip
main/xen: add missing patches
-rw-r--r--main/xen/xsa213-4.8.patch177
-rw-r--r--main/xen/xsa214.patch41
2 files changed, 218 insertions, 0 deletions
diff --git a/main/xen/xsa213-4.8.patch b/main/xen/xsa213-4.8.patch
new file mode 100644
index 0000000000..2f9fa6ab11
--- /dev/null
+++ b/main/xen/xsa213-4.8.patch
@@ -0,0 +1,177 @@
1From: Jan Beulich <jbeulich@suse.com>
2Subject: multicall: deal with early exit conditions
3
4In particular changes to guest privilege level require the multicall
5sequence to be aborted, as hypercalls are permitted from kernel mode
6only. While likely not very useful in a multicall, also properly handle
7the return value in the HYPERVISOR_iret case (which should be the guest
8specified value).
9
10This is XSA-213.
11
12Reported-by: Jann Horn <jannh@google.com>
13Signed-off-by: Jan Beulich <jbeulich@suse.com>
14Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
15Acked-by: Julien Grall <julien.grall@arm.com>
16
17--- a/xen/arch/arm/traps.c
18+++ b/xen/arch/arm/traps.c
19@@ -1550,7 +1550,7 @@ static bool_t check_multicall_32bit_clea
20 return true;
21 }
22
23-void arch_do_multicall_call(struct mc_state *state)
24+enum mc_disposition arch_do_multicall_call(struct mc_state *state)
25 {
26 struct multicall_entry *multi = &state->call;
27 arm_hypercall_fn_t call = NULL;
28@@ -1558,23 +1558,26 @@ void arch_do_multicall_call(struct mc_st
29 if ( multi->op >= ARRAY_SIZE(arm_hypercall_table) )
30 {
31 multi->result = -ENOSYS;
32- return;
33+ return mc_continue;
34 }
35
36 call = arm_hypercall_table[multi->op].fn;
37 if ( call == NULL )
38 {
39 multi->result = -ENOSYS;
40- return;
41+ return mc_continue;
42 }
43
44 if ( is_32bit_domain(current->domain) &&
45 !check_multicall_32bit_clean(multi) )
46- return;
47+ return mc_continue;
48
49 multi->result = call(multi->args[0], multi->args[1],
50 multi->args[2], multi->args[3],
51 multi->args[4]);
52+
53+ return likely(!psr_mode_is_user(guest_cpu_user_regs()))
54+ ? mc_continue : mc_preempt;
55 }
56
57 /*
58--- a/xen/arch/x86/hypercall.c
59+++ b/xen/arch/x86/hypercall.c
60@@ -255,15 +255,19 @@ void pv_hypercall(struct cpu_user_regs *
61 perfc_incr(hypercalls);
62 }
63
64-void arch_do_multicall_call(struct mc_state *state)
65+enum mc_disposition arch_do_multicall_call(struct mc_state *state)
66 {
67- if ( !is_pv_32bit_vcpu(current) )
68+ struct vcpu *curr = current;
69+ unsigned long op;
70+
71+ if ( !is_pv_32bit_vcpu(curr) )
72 {
73 struct multicall_entry *call = &state->call;
74
75- if ( (call->op < ARRAY_SIZE(pv_hypercall_table)) &&
76- pv_hypercall_table[call->op].native )
77- call->result = pv_hypercall_table[call->op].native(
78+ op = call->op;
79+ if ( (op < ARRAY_SIZE(pv_hypercall_table)) &&
80+ pv_hypercall_table[op].native )
81+ call->result = pv_hypercall_table[op].native(
82 call->args[0], call->args[1], call->args[2],
83 call->args[3], call->args[4], call->args[5]);
84 else
85@@ -274,15 +278,21 @@ void arch_do_multicall_call(struct mc_st
86 {
87 struct compat_multicall_entry *call = &state->compat_call;
88
89- if ( (call->op < ARRAY_SIZE(pv_hypercall_table)) &&
90- pv_hypercall_table[call->op].compat )
91- call->result = pv_hypercall_table[call->op].compat(
92+ op = call->op;
93+ if ( (op < ARRAY_SIZE(pv_hypercall_table)) &&
94+ pv_hypercall_table[op].compat )
95+ call->result = pv_hypercall_table[op].compat(
96 call->args[0], call->args[1], call->args[2],
97 call->args[3], call->args[4], call->args[5]);
98 else
99 call->result = -ENOSYS;
100 }
101 #endif
102+
103+ return unlikely(op == __HYPERVISOR_iret)
104+ ? mc_exit
105+ : likely(guest_kernel_mode(curr, guest_cpu_user_regs()))
106+ ? mc_continue : mc_preempt;
107 }
108
109 /*
110--- a/xen/common/multicall.c
111+++ b/xen/common/multicall.c
112@@ -40,6 +40,7 @@ do_multicall(
113 struct mc_state *mcs = &current->mc_state;
114 uint32_t i;
115 int rc = 0;
116+ enum mc_disposition disp = mc_continue;
117
118 if ( unlikely(__test_and_set_bit(_MCSF_in_multicall, &mcs->flags)) )
119 {
120@@ -50,7 +51,7 @@ do_multicall(
121 if ( unlikely(!guest_handle_okay(call_list, nr_calls)) )
122 rc = -EFAULT;
123
124- for ( i = 0; !rc && i < nr_calls; i++ )
125+ for ( i = 0; !rc && disp == mc_continue && i < nr_calls; i++ )
126 {
127 if ( i && hypercall_preempt_check() )
128 goto preempted;
129@@ -63,7 +64,7 @@ do_multicall(
130
131 trace_multicall_call(&mcs->call);
132
133- arch_do_multicall_call(mcs);
134+ disp = arch_do_multicall_call(mcs);
135
136 #ifndef NDEBUG
137 {
138@@ -77,7 +78,14 @@ do_multicall(
139 }
140 #endif
141
142- if ( unlikely(__copy_field_to_guest(call_list, &mcs->call, result)) )
143+ if ( unlikely(disp == mc_exit) )
144+ {
145+ if ( __copy_field_to_guest(call_list, &mcs->call, result) )
146+ /* nothing, best effort only */;
147+ rc = mcs->call.result;
148+ }
149+ else if ( unlikely(__copy_field_to_guest(call_list, &mcs->call,
150+ result)) )
151 rc = -EFAULT;
152 else if ( mcs->flags & MCSF_call_preempted )
153 {
154@@ -93,6 +101,9 @@ do_multicall(
155 guest_handle_add_offset(call_list, 1);
156 }
157
158+ if ( unlikely(disp == mc_preempt) && i < nr_calls )
159+ goto preempted;
160+
161 perfc_incr(calls_to_multicall);
162 perfc_add(calls_from_multicall, i);
163 mcs->flags = 0;
164--- a/xen/include/xen/multicall.h
165+++ b/xen/include/xen/multicall.h
166@@ -24,6 +24,10 @@ struct mc_state {
167 };
168 };
169
170-void arch_do_multicall_call(struct mc_state *mc);
171+enum mc_disposition {
172+ mc_continue,
173+ mc_exit,
174+ mc_preempt,
175+} arch_do_multicall_call(struct mc_state *mc);
176
177 #endif /* __XEN_MULTICALL_H__ */
diff --git a/main/xen/xsa214.patch b/main/xen/xsa214.patch
new file mode 100644
index 0000000000..46a3d3a4c6
--- /dev/null
+++ b/main/xen/xsa214.patch
@@ -0,0 +1,41 @@
1From: Jan Beulich <jbeulich@suse.com>
2Subject: x86: discard type information when stealing pages
3
4While a page having just a single general reference left necessarily
5has a zero type reference count too, its type may still be valid (and
6in validated state; at present this is only possible and relevant for
7PGT_seg_desc_page, as page tables have their type forcibly zapped when
8their type reference count drops to zero, and
9PGT_{writable,shared}_page pages don't require any validation). In
10such a case when the page is being re-used with the same type again,
11validation is being skipped. As validation criteria differ between
1232- and 64-bit guests, pages to be transferred between guests need to
13have their validation indicator zapped (and with it we zap all other
14type information at once).
15
16This is XSA-214.
17
18Reported-by: Jann Horn <jannh@google.com>
19Signed-off-by: Jan Beulich <jbeulich@suse.com>
20Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
21
22--- a/xen/arch/x86/mm.c
23+++ b/xen/arch/x86/mm.c
24@@ -4466,6 +4466,17 @@ int steal_page(
25 y = cmpxchg(&page->count_info, x, x & ~PGC_count_mask);
26 } while ( y != x );
27
28+ /*
29+ * With the sole reference dropped temporarily, no-one can update type
30+ * information. Type count also needs to be zero in this case, but e.g.
31+ * PGT_seg_desc_page may still have PGT_validated set, which we need to
32+ * clear before transferring ownership (as validation criteria vary
33+ * depending on domain type).
34+ */
35+ BUG_ON(page->u.inuse.type_info & (PGT_count_mask | PGT_locked |
36+ PGT_pinned));
37+ page->u.inuse.type_info = 0;
38+
39 /* Swizzle the owner then reinstate the PGC_allocated reference. */
40 page_set_owner(page, NULL);
41 y = page->count_info;