aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorWilliam Pitcock <nenolod@dereferenced.org>2013-06-01 16:12:19 -0500
committerNatanael Copa <ncopa@alpinelinux.org>2013-06-04 09:56:35 +0000
commitd6daa3bca4926791e3488effbb1d55c58d24f035 (patch)
treed680c1a604f1a517363906ecc7556ae92dcef4d2
parent82707dbe2c74c91652ec3372208e4d75daaed8ba (diff)
downloadalpine_aports-d6daa3bca4926791e3488effbb1d55c58d24f035.tar.bz2
alpine_aports-d6daa3bca4926791e3488effbb1d55c58d24f035.tar.xz
alpine_aports-d6daa3bca4926791e3488effbb1d55c58d24f035.zip
main/xen: remove obsolete XSA patches.
(cherry picked from commit d3978bf9dc42f00c8d05d8eac255f93ef154b503)
-rw-r--r--main/xen/xsa33-4.2-unstable.patch21
-rw-r--r--main/xen/xsa34-4.2.patch30
-rw-r--r--main/xen/xsa35-4.2-with-xsa34.patch24
-rw-r--r--main/xen/xsa36-4.2.patch323
-rw-r--r--main/xen/xsa38.patch73
-rw-r--r--main/xen/xsa44-4.2.patch77
-rw-r--r--main/xen/xsa46-4.2.patch293
-rw-r--r--main/xen/xsa47-4.2-unstable.patch31
8 files changed, 0 insertions, 872 deletions
diff --git a/main/xen/xsa33-4.2-unstable.patch b/main/xen/xsa33-4.2-unstable.patch
deleted file mode 100644
index 369d65bba9..0000000000
--- a/main/xen/xsa33-4.2-unstable.patch
+++ /dev/null
@@ -1,21 +0,0 @@
1VT-d: fix interrupt remapping source validation for devices behind
2legacy bridges
3
4Using SVT_VERIFY_BUS here doesn't make sense; native Linux also
5uses SVT_VERIFY_SID_SQ here instead.
6
7This is XSA-33 / CVE-2012-5634.
8
9Signed-off-by: Jan Beulich <jbeulich@suse.com>
10
11--- a/xen/drivers/passthrough/vtd/intremap.c
12+++ b/xen/drivers/passthrough/vtd/intremap.c
13@@ -466,7 +466,7 @@ static void set_msi_source_id(struct pci_dev *pdev, struct iremap_entry *ire)
14 set_ire_sid(ire, SVT_VERIFY_BUS, SQ_ALL_16,
15 (bus << 8) | pdev->bus);
16 else if ( pdev_type(seg, bus, devfn) == DEV_TYPE_LEGACY_PCI_BRIDGE )
17- set_ire_sid(ire, SVT_VERIFY_BUS, SQ_ALL_16,
18+ set_ire_sid(ire, SVT_VERIFY_SID_SQ, SQ_ALL_16,
19 PCI_BDF2(bus, devfn));
20 }
21 break;
diff --git a/main/xen/xsa34-4.2.patch b/main/xen/xsa34-4.2.patch
deleted file mode 100644
index f5328eff9f..0000000000
--- a/main/xen/xsa34-4.2.patch
+++ /dev/null
@@ -1,30 +0,0 @@
1x86_32: don't allow use of nested HVM
2
3There are (indirect) uses of map_domain_page() in the nested HVM code
4that are unsafe when not just using the 1:1 mapping.
5
6This is XSA-34 / CVE-2013-0151.
7
8Signed-off-by: Jan Beulich <jbeulich@suse.com>
9
10--- a/xen/arch/x86/hvm/hvm.c
11+++ b/xen/arch/x86/hvm/hvm.c
12@@ -3926,6 +3926,10 @@ long do_hvm_op(unsigned long op, XEN_GUE
13 rc = -EINVAL;
14 break;
15 case HVM_PARAM_NESTEDHVM:
16+#ifdef __i386__
17+ if ( a.value )
18+ rc = -EINVAL;
19+#else
20 if ( a.value > 1 )
21 rc = -EINVAL;
22 if ( !is_hvm_domain(d) )
23@@ -3940,6 +3944,7 @@ long do_hvm_op(unsigned long op, XEN_GUE
24 for_each_vcpu(d, v)
25 if ( rc == 0 )
26 rc = nestedhvm_vcpu_initialise(v);
27+#endif
28 break;
29 case HVM_PARAM_BUFIOREQ_EVTCHN:
30 rc = -EINVAL;
diff --git a/main/xen/xsa35-4.2-with-xsa34.patch b/main/xen/xsa35-4.2-with-xsa34.patch
deleted file mode 100644
index 89230e2a46..0000000000
--- a/main/xen/xsa35-4.2-with-xsa34.patch
+++ /dev/null
@@ -1,24 +0,0 @@
1xen: Do not allow guests to enable nested HVM on themselves
2
3There is no reason for this and doing so exposes a memory leak to
4guests. Only toolstacks need write access to this HVM param.
5
6This is XSA-35 / CVE-2013-0152.
7
8Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
9Acked-by: Jan Beulich <JBeulich@suse.com>
10
11--- a/xen/arch/x86/hvm/hvm.c
12+++ b/xen/arch/x86/hvm/hvm.c
13@@ -3862,6 +3862,11 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg)
14 rc = -EINVAL;
15 break;
16 case HVM_PARAM_NESTEDHVM:
17+ if ( !IS_PRIV(current->domain) )
18+ {
19+ rc = -EPERM;
20+ break;
21+ }
22 #ifdef __i386__
23 if ( a.value )
24 rc = -EINVAL;
diff --git a/main/xen/xsa36-4.2.patch b/main/xen/xsa36-4.2.patch
deleted file mode 100644
index 8477701a22..0000000000
--- a/main/xen/xsa36-4.2.patch
+++ /dev/null
@@ -1,323 +0,0 @@
1ACPI: acpi_table_parse() should return handler's error code
2
3Currently, the error code returned by acpi_table_parse()'s handler
4is ignored. This patch will propagate handler's return value to
5acpi_table_parse()'s caller.
6
7AMD,IOMMU: Clean up old entries in remapping tables when creating new
8interrupt mapping.
9
10When changing the affinity of an IRQ associated with a passed
11through PCI device, clear previous mapping.
12
13In addition, because some BIOSes may incorrectly program IVRS
14entries for IOAPIC try to check for entry's consistency. Specifically,
15if conflicting entries are found disable IOMMU if per-device
16remapping table is used. If entries refer to bogus IOAPIC IDs
17disable IOMMU unconditionally
18
19AMD,IOMMU: Disable IOMMU if SATA Combined mode is on
20
21AMD's SP5100 chipset can be placed into SATA Combined mode
22that may cause prevent dom0 from booting when IOMMU is
23enabled and per-device interrupt remapping table is used.
24While SP5100 erratum 28 requires BIOSes to disable this mode,
25some may still use it.
26
27This patch checks whether this mode is on and, if per-device
28table is in use, disables IOMMU.
29
30AMD,IOMMU: Make per-device interrupt remapping table default
31
32Using global interrupt remapping table may be insecure, as
33described by XSA-36. This patch makes per-device mode default.
34
35This is XSA-36 / CVE-2013-0153.
36
37Signed-off-by: Jan Beulich <jbeulich@suse.com>
38Signed-off-by: Boris Ostrovsky <boris.ostrovsky@amd.com>
39
40--- a/xen/arch/x86/irq.c
41+++ b/xen/arch/x86/irq.c
42@@ -1942,9 +1942,6 @@ int map_domain_pirq(
43 spin_lock_irqsave(&desc->lock, flags);
44 set_domain_irq_pirq(d, irq, info);
45 spin_unlock_irqrestore(&desc->lock, flags);
46-
47- if ( opt_irq_vector_map == OPT_IRQ_VECTOR_MAP_PERDEV )
48- printk(XENLOG_INFO "Per-device vector maps for GSIs not implemented yet.\n");
49 }
50
51 done:
52--- a/xen/drivers/acpi/tables.c
53+++ b/xen/drivers/acpi/tables.c
54@@ -267,7 +267,7 @@ acpi_table_parse_madt(enum acpi_madt_typ
55 * @handler: handler to run
56 *
57 * Scan the ACPI System Descriptor Table (STD) for a table matching @id,
58- * run @handler on it. Return 0 if table found, return on if not.
59+ * run @handler on it.
60 */
61 int __init acpi_table_parse(char *id, acpi_table_handler handler)
62 {
63@@ -282,8 +282,7 @@ int __init acpi_table_parse(char *id, ac
64 acpi_get_table(id, 0, &table);
65
66 if (table) {
67- handler(table);
68- return 0;
69+ return handler(table);
70 } else
71 return 1;
72 }
73--- a/xen/drivers/passthrough/amd/iommu_acpi.c
74+++ b/xen/drivers/passthrough/amd/iommu_acpi.c
75@@ -22,6 +22,7 @@
76 #include <xen/errno.h>
77 #include <xen/acpi.h>
78 #include <asm/apicdef.h>
79+#include <asm/io_apic.h>
80 #include <asm/amd-iommu.h>
81 #include <asm/hvm/svm/amd-iommu-proto.h>
82
83@@ -635,6 +636,7 @@ static u16 __init parse_ivhd_device_spec
84 u16 header_length, u16 block_length, struct amd_iommu *iommu)
85 {
86 u16 dev_length, bdf;
87+ int apic;
88
89 dev_length = sizeof(*special);
90 if ( header_length < (block_length + dev_length) )
91@@ -651,10 +653,59 @@ static u16 __init parse_ivhd_device_spec
92 }
93
94 add_ivrs_mapping_entry(bdf, bdf, special->header.data_setting, iommu);
95- /* set device id of ioapic */
96- ioapic_sbdf[special->handle].bdf = bdf;
97- ioapic_sbdf[special->handle].seg = seg;
98- return dev_length;
99+
100+ if ( special->variety != ACPI_IVHD_IOAPIC )
101+ {
102+ if ( special->variety != ACPI_IVHD_HPET )
103+ printk(XENLOG_ERR "Unrecognized IVHD special variety %#x\n",
104+ special->variety);
105+ return dev_length;
106+ }
107+
108+ /*
109+ * Some BIOSes have IOAPIC broken entries so we check for IVRS
110+ * consistency here --- whether entry's IOAPIC ID is valid and
111+ * whether there are conflicting/duplicated entries.
112+ */
113+ for ( apic = 0; apic < nr_ioapics; apic++ )
114+ {
115+ if ( IO_APIC_ID(apic) != special->handle )
116+ continue;
117+
118+ if ( ioapic_sbdf[special->handle].pin_setup )
119+ {
120+ if ( ioapic_sbdf[special->handle].bdf == bdf &&
121+ ioapic_sbdf[special->handle].seg == seg )
122+ AMD_IOMMU_DEBUG("IVHD Warning: Duplicate IO-APIC %#x entries\n",
123+ special->handle);
124+ else
125+ {
126+ printk(XENLOG_ERR "IVHD Error: Conflicting IO-APIC %#x entries\n",
127+ special->handle);
128+ if ( amd_iommu_perdev_intremap )
129+ return 0;
130+ }
131+ }
132+ else
133+ {
134+ /* set device id of ioapic */
135+ ioapic_sbdf[special->handle].bdf = bdf;
136+ ioapic_sbdf[special->handle].seg = seg;
137+
138+ ioapic_sbdf[special->handle].pin_setup = xzalloc_array(
139+ unsigned long, BITS_TO_LONGS(nr_ioapic_entries[apic]));
140+ if ( nr_ioapic_entries[apic] &&
141+ !ioapic_sbdf[IO_APIC_ID(apic)].pin_setup )
142+ {
143+ printk(XENLOG_ERR "IVHD Error: Out of memory\n");
144+ return 0;
145+ }
146+ }
147+ return dev_length;
148+ }
149+
150+ printk(XENLOG_ERR "IVHD Error: Invalid IO-APIC %#x\n", special->handle);
151+ return 0;
152 }
153
154 static int __init parse_ivhd_block(const struct acpi_ivrs_hardware *ivhd_block)
155--- a/xen/drivers/passthrough/amd/iommu_init.c
156+++ b/xen/drivers/passthrough/amd/iommu_init.c
157@@ -1126,12 +1126,45 @@ static int __init amd_iommu_setup_device
158 return 0;
159 }
160
161+/* Check whether SP5100 SATA Combined mode is on */
162+static bool_t __init amd_sp5100_erratum28(void)
163+{
164+ u32 bus, id;
165+ u16 vendor_id, dev_id;
166+ u8 byte;
167+
168+ for (bus = 0; bus < 256; bus++)
169+ {
170+ id = pci_conf_read32(0, bus, 0x14, 0, PCI_VENDOR_ID);
171+
172+ vendor_id = id & 0xffff;
173+ dev_id = (id >> 16) & 0xffff;
174+
175+ /* SP5100 SMBus module sets Combined mode on */
176+ if (vendor_id != 0x1002 || dev_id != 0x4385)
177+ continue;
178+
179+ byte = pci_conf_read8(0, bus, 0x14, 0, 0xad);
180+ if ( (byte >> 3) & 1 )
181+ {
182+ printk(XENLOG_WARNING "AMD-Vi: SP5100 erratum 28 detected, disabling IOMMU.\n"
183+ "If possible, disable SATA Combined mode in BIOS or contact your vendor for BIOS update.\n");
184+ return 1;
185+ }
186+ }
187+
188+ return 0;
189+}
190+
191 int __init amd_iommu_init(void)
192 {
193 struct amd_iommu *iommu;
194
195 BUG_ON( !iommu_found() );
196
197+ if ( amd_iommu_perdev_intremap && amd_sp5100_erratum28() )
198+ goto error_out;
199+
200 ivrs_bdf_entries = amd_iommu_get_ivrs_dev_entries();
201
202 if ( !ivrs_bdf_entries )
203--- a/xen/drivers/passthrough/amd/iommu_intr.c
204+++ b/xen/drivers/passthrough/amd/iommu_intr.c
205@@ -99,12 +99,12 @@ static void update_intremap_entry(u32* e
206 static void update_intremap_entry_from_ioapic(
207 int bdf,
208 struct amd_iommu *iommu,
209- struct IO_APIC_route_entry *ioapic_rte)
210+ const struct IO_APIC_route_entry *rte,
211+ const struct IO_APIC_route_entry *old_rte)
212 {
213 unsigned long flags;
214 u32* entry;
215 u8 delivery_mode, dest, vector, dest_mode;
216- struct IO_APIC_route_entry *rte = ioapic_rte;
217 int req_id;
218 spinlock_t *lock;
219 int offset;
220@@ -120,6 +120,14 @@ static void update_intremap_entry_from_i
221 spin_lock_irqsave(lock, flags);
222
223 offset = get_intremap_offset(vector, delivery_mode);
224+ if ( old_rte )
225+ {
226+ int old_offset = get_intremap_offset(old_rte->vector,
227+ old_rte->delivery_mode);
228+
229+ if ( offset != old_offset )
230+ free_intremap_entry(iommu->seg, bdf, old_offset);
231+ }
232 entry = (u32*)get_intremap_entry(iommu->seg, req_id, offset);
233 update_intremap_entry(entry, vector, delivery_mode, dest_mode, dest);
234
235@@ -188,6 +196,7 @@ int __init amd_iommu_setup_ioapic_remapp
236 amd_iommu_flush_intremap(iommu, req_id);
237 spin_unlock_irqrestore(&iommu->lock, flags);
238 }
239+ set_bit(pin, ioapic_sbdf[IO_APIC_ID(apic)].pin_setup);
240 }
241 }
242 return 0;
243@@ -199,6 +208,7 @@ void amd_iommu_ioapic_update_ire(
244 struct IO_APIC_route_entry old_rte = { 0 };
245 struct IO_APIC_route_entry new_rte = { 0 };
246 unsigned int rte_lo = (reg & 1) ? reg - 1 : reg;
247+ unsigned int pin = (reg - 0x10) / 2;
248 int saved_mask, seg, bdf;
249 struct amd_iommu *iommu;
250
251@@ -236,6 +246,14 @@ void amd_iommu_ioapic_update_ire(
252 *(((u32 *)&new_rte) + 1) = value;
253 }
254
255+ if ( new_rte.mask &&
256+ !test_bit(pin, ioapic_sbdf[IO_APIC_ID(apic)].pin_setup) )
257+ {
258+ ASSERT(saved_mask);
259+ __io_apic_write(apic, reg, value);
260+ return;
261+ }
262+
263 /* mask the interrupt while we change the intremap table */
264 if ( !saved_mask )
265 {
266@@ -244,7 +262,11 @@ void amd_iommu_ioapic_update_ire(
267 }
268
269 /* Update interrupt remapping entry */
270- update_intremap_entry_from_ioapic(bdf, iommu, &new_rte);
271+ update_intremap_entry_from_ioapic(
272+ bdf, iommu, &new_rte,
273+ test_and_set_bit(pin,
274+ ioapic_sbdf[IO_APIC_ID(apic)].pin_setup) ? &old_rte
275+ : NULL);
276
277 /* Forward write access to IO-APIC RTE */
278 __io_apic_write(apic, reg, value);
279@@ -354,6 +376,12 @@ void amd_iommu_msi_msg_update_ire(
280 return;
281 }
282
283+ if ( msi_desc->remap_index >= 0 )
284+ update_intremap_entry_from_msi_msg(iommu, pdev, msi_desc, NULL);
285+
286+ if ( !msg )
287+ return;
288+
289 update_intremap_entry_from_msi_msg(iommu, pdev, msi_desc, msg);
290 }
291
292--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
293+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
294@@ -205,6 +205,8 @@ int __init amd_iov_detect(void)
295 {
296 printk("AMD-Vi: Not overriding irq_vector_map setting\n");
297 }
298+ if ( !amd_iommu_perdev_intremap )
299+ printk(XENLOG_WARNING "AMD-Vi: Using global interrupt remap table is not recommended (see XSA-36)!\n");
300 return scan_pci_devices();
301 }
302
303--- a/xen/drivers/passthrough/iommu.c
304+++ b/xen/drivers/passthrough/iommu.c
305@@ -52,7 +52,7 @@ bool_t __read_mostly iommu_qinval = 1;
306 bool_t __read_mostly iommu_intremap = 1;
307 bool_t __read_mostly iommu_hap_pt_share = 1;
308 bool_t __read_mostly iommu_debug;
309-bool_t __read_mostly amd_iommu_perdev_intremap;
310+bool_t __read_mostly amd_iommu_perdev_intremap = 1;
311
312 DEFINE_PER_CPU(bool_t, iommu_dont_flush_iotlb);
313
314--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
315+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
316@@ -100,6 +100,7 @@ void amd_iommu_read_msi_from_ire(
317
318 extern struct ioapic_sbdf {
319 u16 bdf, seg;
320+ unsigned long *pin_setup;
321 } ioapic_sbdf[MAX_IO_APICS];
322 extern void *shared_intremap_table;
323
diff --git a/main/xen/xsa38.patch b/main/xen/xsa38.patch
deleted file mode 100644
index f4a5dc0881..0000000000
--- a/main/xen/xsa38.patch
+++ /dev/null
@@ -1,73 +0,0 @@
1diff --git a/tools/ocaml/libs/xb/partial.ml b/tools/ocaml/libs/xb/partial.ml
2index 3558889..d4d1c7b 100644
3--- a/tools/ocaml/libs/xb/partial.ml
4+++ b/tools/ocaml/libs/xb/partial.ml
5@@ -27,8 +27,15 @@ external header_size: unit -> int = "stub_header_size"
6 external header_of_string_internal: string -> int * int * int * int
7 = "stub_header_of_string"
8
9+let xenstore_payload_max = 4096 (* xen/include/public/io/xs_wire.h *)
10+
11 let of_string s =
12 let tid, rid, opint, dlen = header_of_string_internal s in
13+ (* A packet which is bigger than xenstore_payload_max is illegal.
14+ This will leave the guest connection is a bad state and will
15+ be hard to recover from without restarting the connection
16+ (ie rebooting the guest) *)
17+ let dlen = min xenstore_payload_max dlen in
18 {
19 tid = tid;
20 rid = rid;
21@@ -38,6 +45,7 @@ let of_string s =
22 }
23
24 let append pkt s sz =
25+ if pkt.len > 4096 then failwith "Buffer.add: cannot grow buffer";
26 Buffer.add_string pkt.buf (String.sub s 0 sz)
27
28 let to_complete pkt =
29diff --git a/tools/ocaml/libs/xb/xs_ring_stubs.c b/tools/ocaml/libs/xb/xs_ring_stubs.c
30index 00414c5..4888ac5 100644
31--- a/tools/ocaml/libs/xb/xs_ring_stubs.c
32+++ b/tools/ocaml/libs/xb/xs_ring_stubs.c
33@@ -39,21 +39,23 @@ static int xs_ring_read(struct mmap_interface *interface,
34 char *buffer, int len)
35 {
36 struct xenstore_domain_interface *intf = interface->addr;
37- XENSTORE_RING_IDX cons, prod;
38+ XENSTORE_RING_IDX cons, prod; /* offsets only */
39 int to_read;
40
41- cons = intf->req_cons;
42- prod = intf->req_prod;
43+ cons = *(volatile uint32*)&intf->req_cons;
44+ prod = *(volatile uint32*)&intf->req_prod;
45 xen_mb();
46+ cons = MASK_XENSTORE_IDX(cons);
47+ prod = MASK_XENSTORE_IDX(prod);
48 if (prod == cons)
49 return 0;
50- if (MASK_XENSTORE_IDX(prod) > MASK_XENSTORE_IDX(cons))
51+ if (prod > cons)
52 to_read = prod - cons;
53 else
54- to_read = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons);
55+ to_read = XENSTORE_RING_SIZE - cons;
56 if (to_read < len)
57 len = to_read;
58- memcpy(buffer, intf->req + MASK_XENSTORE_IDX(cons), len);
59+ memcpy(buffer, intf->req + cons, len);
60 xen_mb();
61 intf->req_cons += len;
62 return len;
63@@ -66,8 +68,8 @@ static int xs_ring_write(struct mmap_interface *interface,
64 XENSTORE_RING_IDX cons, prod;
65 int can_write;
66
67- cons = intf->rsp_cons;
68- prod = intf->rsp_prod;
69+ cons = *(volatile uint32*)&intf->rsp_cons;
70+ prod = *(volatile uint32*)&intf->rsp_prod;
71 xen_mb();
72 if ( (prod - cons) >= XENSTORE_RING_SIZE )
73 return 0;
diff --git a/main/xen/xsa44-4.2.patch b/main/xen/xsa44-4.2.patch
deleted file mode 100644
index 07ed9386f6..0000000000
--- a/main/xen/xsa44-4.2.patch
+++ /dev/null
@@ -1,77 +0,0 @@
1x86: clear EFLAGS.NT in SYSENTER entry path
2
3... as it causes problems if we happen to exit back via IRET: In the
4course of trying to handle the fault, the hypervisor creates a stack
5frame by hand, and uses PUSHFQ to set the respective EFLAGS field, but
6expects to be able to IRET through that stack frame to the second
7portion of the fixup code (which causes a #GP due to the stored EFLAGS
8having NT set).
9
10And even if this worked (e.g if we cleared NT in that path), it would
11then (through the fail safe callback) cause a #GP in the guest with the
12SYSENTER handler's first instruction as the source, which in turn would
13allow guest user mode code to crash the guest kernel.
14
15Inject a #GP on the fake (NULL) address of the SYSENTER instruction
16instead, just like in the case where the guest kernel didn't register
17a corresponding entry point.
18
19On 32-bit we also need to make sure we clear SYSENTER_CS for all CPUs
20(neither #RESET nor #INIT guarantee this).
21
22This is CVE-2013-1917 / XSA-44.
23
24Reported-by: Andrew Cooper <andrew.cooper3@citirx.com>
25Signed-off-by: Jan Beulich <jbeulich@suse.com>
26Tested-by: Andrew Cooper <andrew.cooper3@citrix.com>
27Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
28
29--- a/xen/arch/x86/acpi/suspend.c
30+++ b/xen/arch/x86/acpi/suspend.c
31@@ -81,8 +81,12 @@ void restore_rest_processor_state(void)
32 }
33
34 #else /* !defined(CONFIG_X86_64) */
35- if ( supervisor_mode_kernel && cpu_has_sep )
36- wrmsr(MSR_IA32_SYSENTER_ESP, &this_cpu(init_tss).esp1, 0);
37+ if ( cpu_has_sep )
38+ {
39+ wrmsr(MSR_IA32_SYSENTER_CS, 0, 0);
40+ if ( supervisor_mode_kernel )
41+ wrmsr(MSR_IA32_SYSENTER_ESP, &this_cpu(init_tss).esp1, 0);
42+ }
43 #endif
44
45 /* Maybe load the debug registers. */
46--- a/xen/arch/x86/cpu/common.c
47+++ b/xen/arch/x86/cpu/common.c
48@@ -655,8 +655,11 @@ void __cpuinit cpu_init(void)
49 #if defined(CONFIG_X86_32)
50 t->ss0 = __HYPERVISOR_DS;
51 t->esp0 = get_stack_bottom();
52- if ( supervisor_mode_kernel && cpu_has_sep )
53+ if ( cpu_has_sep ) {
54+ wrmsr(MSR_IA32_SYSENTER_CS, 0, 0);
55+ if ( supervisor_mode_kernel )
56 wrmsr(MSR_IA32_SYSENTER_ESP, &t->esp1, 0);
57+ }
58 #elif defined(CONFIG_X86_64)
59 /* Bottom-of-stack must be 16-byte aligned! */
60 BUG_ON((get_stack_bottom() & 15) != 0);
61--- a/xen/arch/x86/x86_64/entry.S
62+++ b/xen/arch/x86/x86_64/entry.S
63@@ -284,7 +284,14 @@ sysenter_eflags_saved:
64 cmpb $0,VCPU_sysenter_disables_events(%rbx)
65 movq VCPU_sysenter_addr(%rbx),%rax
66 setne %cl
67+ testl $X86_EFLAGS_NT,UREGS_eflags(%rsp)
68 leaq VCPU_trap_bounce(%rbx),%rdx
69+UNLIKELY_START(nz, sysenter_nt_set)
70+ pushfq
71+ andl $~X86_EFLAGS_NT,(%rsp)
72+ popfq
73+ xorl %eax,%eax
74+UNLIKELY_END(sysenter_nt_set)
75 testq %rax,%rax
76 leal (,%rcx,TBF_INTERRUPT),%ecx
77 UNLIKELY_START(z, sysenter_gpf)
diff --git a/main/xen/xsa46-4.2.patch b/main/xen/xsa46-4.2.patch
deleted file mode 100644
index 9448ea9c67..0000000000
--- a/main/xen/xsa46-4.2.patch
+++ /dev/null
@@ -1,293 +0,0 @@
1x86: fix various issues with handling guest IRQs
2
3- properly revoke IRQ access in map_domain_pirq() error path
4- don't permit replacing an in use IRQ
5- don't accept inputs in the GSI range for MAP_PIRQ_TYPE_MSI
6- track IRQ access permission in host IRQ terms, not guest IRQ ones
7 (and with that, also disallow Dom0 access to IRQ0)
8
9This is CVE-2013-1919 / XSA-46.
10
11Signed-off-by: Jan Beulich <jbeulich@suse.com>
12Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
13
14--- a/tools/libxl/libxl_create.c
15+++ b/tools/libxl/libxl_create.c
16@@ -968,14 +968,16 @@ static void domcreate_launch_dm(libxl__e
17 }
18
19 for (i = 0; i < d_config->b_info.num_irqs; i++) {
20- uint32_t irq = d_config->b_info.irqs[i];
21+ int irq = d_config->b_info.irqs[i];
22
23- LOG(DEBUG, "dom%d irq %"PRIx32, domid, irq);
24+ LOG(DEBUG, "dom%d irq %d", domid, irq);
25
26- ret = xc_domain_irq_permission(CTX->xch, domid, irq, 1);
27+ ret = irq >= 0 ? xc_physdev_map_pirq(CTX->xch, domid, irq, &irq)
28+ : -EOVERFLOW;
29+ if (!ret)
30+ ret = xc_domain_irq_permission(CTX->xch, domid, irq, 1);
31 if ( ret<0 ){
32- LOGE(ERROR,
33- "failed give dom%d access to irq %"PRId32, domid, irq);
34+ LOGE(ERROR, "failed give dom%d access to irq %d", domid, irq);
35 ret = ERROR_FAIL;
36 }
37 }
38--- a/tools/python/xen/xend/server/irqif.py
39+++ b/tools/python/xen/xend/server/irqif.py
40@@ -73,6 +73,12 @@ class IRQController(DevController):
41
42 pirq = get_param('irq')
43
44+ rc = xc.physdev_map_pirq(domid = self.getDomid(),
45+ index = pirq,
46+ pirq = pirq)
47+ if rc < 0:
48+ raise VmError('irq: Failed to map irq %x' % (pirq))
49+
50 rc = xc.domain_irq_permission(domid = self.getDomid(),
51 pirq = pirq,
52 allow_access = True)
53@@ -81,12 +87,6 @@ class IRQController(DevController):
54 #todo non-fatal
55 raise VmError(
56 'irq: Failed to configure irq: %d' % (pirq))
57- rc = xc.physdev_map_pirq(domid = self.getDomid(),
58- index = pirq,
59- pirq = pirq)
60- if rc < 0:
61- raise VmError(
62- 'irq: Failed to map irq %x' % (pirq))
63 back = dict([(k, config[k]) for k in self.valid_cfg if k in config])
64 return (self.allocateDeviceID(), back, {})
65
66--- a/xen/arch/x86/domain_build.c
67+++ b/xen/arch/x86/domain_build.c
68@@ -1219,7 +1219,7 @@ int __init construct_dom0(
69 /* DOM0 is permitted full I/O capabilities. */
70 rc |= ioports_permit_access(dom0, 0, 0xFFFF);
71 rc |= iomem_permit_access(dom0, 0UL, ~0UL);
72- rc |= irqs_permit_access(dom0, 0, d->nr_pirqs - 1);
73+ rc |= irqs_permit_access(dom0, 1, nr_irqs_gsi - 1);
74
75 /*
76 * Modify I/O port access permissions.
77--- a/xen/arch/x86/domctl.c
78+++ b/xen/arch/x86/domctl.c
79@@ -772,9 +772,13 @@ long arch_do_domctl(
80 goto bind_out;
81
82 ret = -EPERM;
83- if ( !IS_PRIV(current->domain) &&
84- !irq_access_permitted(current->domain, bind->machine_irq) )
85- goto bind_out;
86+ if ( !IS_PRIV(current->domain) )
87+ {
88+ int irq = domain_pirq_to_irq(d, bind->machine_irq);
89+
90+ if ( irq <= 0 || !irq_access_permitted(current->domain, irq) )
91+ goto bind_out;
92+ }
93
94 ret = -ESRCH;
95 if ( iommu_enabled )
96@@ -803,9 +807,13 @@ long arch_do_domctl(
97 bind = &(domctl->u.bind_pt_irq);
98
99 ret = -EPERM;
100- if ( !IS_PRIV(current->domain) &&
101- !irq_access_permitted(current->domain, bind->machine_irq) )
102- goto unbind_out;
103+ if ( !IS_PRIV(current->domain) )
104+ {
105+ int irq = domain_pirq_to_irq(d, bind->machine_irq);
106+
107+ if ( irq <= 0 || !irq_access_permitted(current->domain, irq) )
108+ goto unbind_out;
109+ }
110
111 if ( iommu_enabled )
112 {
113--- a/xen/arch/x86/irq.c
114+++ b/xen/arch/x86/irq.c
115@@ -184,6 +184,14 @@ int create_irq(int node)
116 desc->arch.used = IRQ_UNUSED;
117 irq = ret;
118 }
119+ else if ( dom0 )
120+ {
121+ ret = irq_permit_access(dom0, irq);
122+ if ( ret )
123+ printk(XENLOG_G_ERR
124+ "Could not grant Dom0 access to IRQ%d (error %d)\n",
125+ irq, ret);
126+ }
127
128 return irq;
129 }
130@@ -280,6 +288,17 @@ void clear_irq_vector(int irq)
131 void destroy_irq(unsigned int irq)
132 {
133 BUG_ON(!MSI_IRQ(irq));
134+
135+ if ( dom0 )
136+ {
137+ int err = irq_deny_access(dom0, irq);
138+
139+ if ( err )
140+ printk(XENLOG_G_ERR
141+ "Could not revoke Dom0 access to IRQ%u (error %d)\n",
142+ irq, err);
143+ }
144+
145 dynamic_irq_cleanup(irq);
146 clear_irq_vector(irq);
147 }
148@@ -1858,7 +1877,7 @@ int map_domain_pirq(
149
150 if ( !IS_PRIV(current->domain) &&
151 !(IS_PRIV_FOR(current->domain, d) &&
152- irq_access_permitted(current->domain, pirq)))
153+ irq_access_permitted(current->domain, irq)))
154 return -EPERM;
155
156 if ( pirq < 0 || pirq >= d->nr_pirqs || irq < 0 || irq >= nr_irqs )
157@@ -1887,17 +1906,18 @@ int map_domain_pirq(
158 return ret;
159 }
160
161- ret = irq_permit_access(d, pirq);
162+ ret = irq_permit_access(d, irq);
163 if ( ret )
164 {
165- dprintk(XENLOG_G_ERR, "dom%d: could not permit access to irq %d\n",
166- d->domain_id, pirq);
167+ printk(XENLOG_G_ERR
168+ "dom%d: could not permit access to IRQ%d (pirq %d)\n",
169+ d->domain_id, irq, pirq);
170 return ret;
171 }
172
173 ret = prepare_domain_irq_pirq(d, irq, pirq, &info);
174 if ( ret )
175- return ret;
176+ goto revoke;
177
178 desc = irq_to_desc(irq);
179
180@@ -1921,8 +1941,14 @@ int map_domain_pirq(
181 spin_lock_irqsave(&desc->lock, flags);
182
183 if ( desc->handler != &no_irq_type )
184+ {
185+ spin_unlock_irqrestore(&desc->lock, flags);
186 dprintk(XENLOG_G_ERR, "dom%d: irq %d in use\n",
187 d->domain_id, irq);
188+ pci_disable_msi(msi_desc);
189+ ret = -EBUSY;
190+ goto done;
191+ }
192 setup_msi_handler(desc, msi_desc);
193
194 if ( opt_irq_vector_map == OPT_IRQ_VECTOR_MAP_PERDEV
195@@ -1951,7 +1977,14 @@ int map_domain_pirq(
196
197 done:
198 if ( ret )
199+ {
200 cleanup_domain_irq_pirq(d, irq, info);
201+ revoke:
202+ if ( irq_deny_access(d, irq) )
203+ printk(XENLOG_G_ERR
204+ "dom%d: could not revoke access to IRQ%d (pirq %d)\n",
205+ d->domain_id, irq, pirq);
206+ }
207 return ret;
208 }
209
210@@ -2017,10 +2050,11 @@ int unmap_domain_pirq(struct domain *d,
211 if ( !forced_unbind )
212 cleanup_domain_irq_pirq(d, irq, info);
213
214- ret = irq_deny_access(d, pirq);
215+ ret = irq_deny_access(d, irq);
216 if ( ret )
217- dprintk(XENLOG_G_ERR, "dom%d: could not deny access to irq %d\n",
218- d->domain_id, pirq);
219+ printk(XENLOG_G_ERR
220+ "dom%d: could not deny access to IRQ%d (pirq %d)\n",
221+ d->domain_id, irq, pirq);
222
223 done:
224 return ret;
225--- a/xen/arch/x86/physdev.c
226+++ b/xen/arch/x86/physdev.c
227@@ -147,7 +147,7 @@ int physdev_map_pirq(domid_t domid, int
228 if ( irq == -1 )
229 irq = create_irq(NUMA_NO_NODE);
230
231- if ( irq < 0 || irq >= nr_irqs )
232+ if ( irq < nr_irqs_gsi || irq >= nr_irqs )
233 {
234 dprintk(XENLOG_G_ERR, "dom%d: can't create irq for msi!\n",
235 d->domain_id);
236--- a/xen/common/domctl.c
237+++ b/xen/common/domctl.c
238@@ -25,6 +25,7 @@
239 #include <xen/paging.h>
240 #include <xen/hypercall.h>
241 #include <asm/current.h>
242+#include <asm/irq.h>
243 #include <asm/page.h>
244 #include <public/domctl.h>
245 #include <xsm/xsm.h>
246@@ -897,9 +898,9 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
247 else if ( xsm_irq_permission(d, pirq, allow) )
248 ret = -EPERM;
249 else if ( allow )
250- ret = irq_permit_access(d, pirq);
251+ ret = pirq_permit_access(d, pirq);
252 else
253- ret = irq_deny_access(d, pirq);
254+ ret = pirq_deny_access(d, pirq);
255
256 rcu_unlock_domain(d);
257 }
258--- a/xen/common/event_channel.c
259+++ b/xen/common/event_channel.c
260@@ -369,7 +369,7 @@ static long evtchn_bind_pirq(evtchn_bind
261 if ( (pirq < 0) || (pirq >= d->nr_pirqs) )
262 return -EINVAL;
263
264- if ( !is_hvm_domain(d) && !irq_access_permitted(d, pirq) )
265+ if ( !is_hvm_domain(d) && !pirq_access_permitted(d, pirq) )
266 return -EPERM;
267
268 spin_lock(&d->event_lock);
269--- a/xen/include/xen/iocap.h
270+++ b/xen/include/xen/iocap.h
271@@ -28,4 +28,22 @@
272 #define irq_access_permitted(d, i) \
273 rangeset_contains_singleton((d)->irq_caps, i)
274
275+#define pirq_permit_access(d, i) ({ \
276+ struct domain *d__ = (d); \
277+ int i__ = domain_pirq_to_irq(d__, i); \
278+ i__ > 0 ? rangeset_add_singleton(d__->irq_caps, i__)\
279+ : -EINVAL; \
280+})
281+#define pirq_deny_access(d, i) ({ \
282+ struct domain *d__ = (d); \
283+ int i__ = domain_pirq_to_irq(d__, i); \
284+ i__ > 0 ? rangeset_remove_singleton(d__->irq_caps, i__)\
285+ : -EINVAL; \
286+})
287+#define pirq_access_permitted(d, i) ({ \
288+ struct domain *d__ = (d); \
289+ rangeset_contains_singleton(d__->irq_caps, \
290+ domain_pirq_to_irq(d__, i));\
291+})
292+
293 #endif /* __XEN_IOCAP_H__ */
diff --git a/main/xen/xsa47-4.2-unstable.patch b/main/xen/xsa47-4.2-unstable.patch
deleted file mode 100644
index 7ebb8c8a31..0000000000
--- a/main/xen/xsa47-4.2-unstable.patch
+++ /dev/null
@@ -1,31 +0,0 @@
1defer event channel bucket pointer store until after XSM checks
2
3Otherwise a dangling pointer can be left, which would cause subsequent
4memory corruption as soon as the space got re-allocated for some other
5purpose.
6
7This is CVE-2013-1920 / XSA-47.
8
9Reported-by: Wei Liu <wei.liu2@citrix.com>
10Signed-off-by: Jan Beulich <jbeulich@suse.com>
11Reviewed-by: Tim Deegan <tim@xen.org>
12
13--- a/xen/common/event_channel.c
14+++ b/xen/common/event_channel.c
15@@ -140,7 +140,6 @@ static int get_free_port(struct domain *
16 chn = xzalloc_array(struct evtchn, EVTCHNS_PER_BUCKET);
17 if ( unlikely(chn == NULL) )
18 return -ENOMEM;
19- bucket_from_port(d, port) = chn;
20
21 for ( i = 0; i < EVTCHNS_PER_BUCKET; i++ )
22 {
23@@ -153,6 +152,8 @@ static int get_free_port(struct domain *
24 }
25 }
26
27+ bucket_from_port(d, port) = chn;
28+
29 return port;
30 }
31