aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNatanael Copa <ncopa@alpinelinux.org>2013-02-05 15:59:04 +0000
committerNatanael Copa <ncopa@alpinelinux.org>2013-02-07 11:35:08 +0000
commit6f310f53bcdccbc9be004e1c3194bf2a7c0fb64d (patch)
tree57f72687160215f3959598c93d9d55021a9f5b14
parentd9966d0468bd0c7a2f11ec41f44642f98da93f2d (diff)
downloadalpine_aports-6f310f53bcdccbc9be004e1c3194bf2a7c0fb64d.tar.bz2
alpine_aports-6f310f53bcdccbc9be004e1c3194bf2a7c0fb64d.tar.xz
alpine_aports-6f310f53bcdccbc9be004e1c3194bf2a7c0fb64d.zip
main/linux-grsec: various xen security fixes
(cherry picked from commit 03f76f442fe7e358e07f507978eea46777af4268) Conflicts: main/linux-grsec/APKBUILD
-rw-r--r--main/linux-grsec/APKBUILD12
-rw-r--r--main/linux-grsec/xsa39-pvops-0001-xen-netback-shutdown-the-ring-if-it-contains-garbage.patch253
-rw-r--r--main/linux-grsec/xsa39-pvops-0002-xen-netback-don-t-leak-pages-on-failure-in-xen_netbk.patch132
-rw-r--r--main/linux-grsec/xsa39-pvops-0003-xen-netback-free-already-allocated-memory-on-failure.patch47
-rw-r--r--main/linux-grsec/xsa39-pvops-0004-netback-correct-netbk_tx_err-to-handle-wrap-around.patch27
5 files changed, 470 insertions, 1 deletions
diff --git a/main/linux-grsec/APKBUILD b/main/linux-grsec/APKBUILD
index 1cc10630e8..cb719e4f8a 100644
--- a/main/linux-grsec/APKBUILD
+++ b/main/linux-grsec/APKBUILD
@@ -4,7 +4,7 @@ _flavor=grsec
4pkgname=linux-${_flavor} 4pkgname=linux-${_flavor}
5pkgver=3.6.11 5pkgver=3.6.11
6_kernver=3.6 6_kernver=3.6
7pkgrel=8 7pkgrel=10
8pkgdesc="Linux kernel with grsecurity" 8pkgdesc="Linux kernel with grsecurity"
9url=http://grsecurity.net 9url=http://grsecurity.net
10depends="mkinitfs linux-firmware" 10depends="mkinitfs linux-firmware"
@@ -23,6 +23,12 @@ source="http://ftp.kernel.org/pub/linux/kernel/v3.x/linux-$_kernver.tar.xz
23 0001-r8169-remove-the-obsolete-and-incorrect-AMD-workarou.patch 23 0001-r8169-remove-the-obsolete-and-incorrect-AMD-workarou.patch
24 r8169-fix-vlan-tag-reordering.patch 24 r8169-fix-vlan-tag-reordering.patch
25 25
26 xsa39-pvops-0001-xen-netback-shutdown-the-ring-if-it-contains-garbage.patch
27 xsa39-pvops-0002-xen-netback-don-t-leak-pages-on-failure-in-xen_netbk.patch
28 xsa39-pvops-0003-xen-netback-free-already-allocated-memory-on-failure.patch
29 xsa39-pvops-0004-netback-correct-netbk_tx_err-to-handle-wrap-around.patch
30
31
26 kernelconfig.x86 32 kernelconfig.x86
27 kernelconfig.x86_64 33 kernelconfig.x86_64
28 " 34 "
@@ -152,5 +158,9 @@ daf2cbb558588c49c138fe9ca2482b64 r8169-num-rx-desc.patch
152d9b4a528e722d10ba53034ebd440c31b ipv4-remove-output-route-check-in-ipv4_mtu.patch 158d9b4a528e722d10ba53034ebd440c31b ipv4-remove-output-route-check-in-ipv4_mtu.patch
15363468b44e34fa19237e0a2a1f6737b14 0001-r8169-remove-the-obsolete-and-incorrect-AMD-workarou.patch 15963468b44e34fa19237e0a2a1f6737b14 0001-r8169-remove-the-obsolete-and-incorrect-AMD-workarou.patch
15444a37e1289e1056300574848aea8bd31 r8169-fix-vlan-tag-reordering.patch 16044a37e1289e1056300574848aea8bd31 r8169-fix-vlan-tag-reordering.patch
161706652ed6c17c5f7bb46a6c8318f9e75 xsa39-pvops-0001-xen-netback-shutdown-the-ring-if-it-contains-garbage.patch
162286101482a2e4b7d8c0dff16af36b3e9 xsa39-pvops-0002-xen-netback-don-t-leak-pages-on-failure-in-xen_netbk.patch
16389dbb0886c9d17c3c4a5ff4f1443e936 xsa39-pvops-0003-xen-netback-free-already-allocated-memory-on-failure.patch
164bce9f08c86570a0a86ef36f1d2e7a2dd xsa39-pvops-0004-netback-correct-netbk_tx_err-to-handle-wrap-around.patch
155373db5888708938c6b1baed6da781fcb kernelconfig.x86 165373db5888708938c6b1baed6da781fcb kernelconfig.x86
156190788fb10e79abce9d570d5e87ec3b4 kernelconfig.x86_64" 166190788fb10e79abce9d570d5e87ec3b4 kernelconfig.x86_64"
diff --git a/main/linux-grsec/xsa39-pvops-0001-xen-netback-shutdown-the-ring-if-it-contains-garbage.patch b/main/linux-grsec/xsa39-pvops-0001-xen-netback-shutdown-the-ring-if-it-contains-garbage.patch
new file mode 100644
index 0000000000..3f983028f2
--- /dev/null
+++ b/main/linux-grsec/xsa39-pvops-0001-xen-netback-shutdown-the-ring-if-it-contains-garbage.patch
@@ -0,0 +1,253 @@
1From 7dd7ce44593a8c4c715fa665027af8e07245c8cf Mon Sep 17 00:00:00 2001
2From: Ian Campbell <ian.campbell@citrix.com>
3Date: Fri, 11 Jan 2013 14:26:29 +0000
4Subject: [PATCH 1/4] xen/netback: shutdown the ring if it contains garbage.
5
6A buggy or malicious frontend should not be able to confuse netback.
7If we spot anything which is not as it should be then shutdown the
8device and don't try to continue with the ring in a potentially
9hostile state. Well behaved and non-hostile frontends will not be
10penalised.
11
12As well as making the existing checks for such errors fatal also add a
13new check that ensures that there isn't an insane number of requests
14on the ring (i.e. more than would fit in the ring). If the ring
15contains garbage then previously is was possible to loop over this
16insane number, getting an error each time and therefore not generating
17any more pending requests and therefore not exiting the loop in
18xen_netbk_tx_build_gops for an externded period.
19
20Also turn various netdev_dbg calls which no precipitate a fatal error
21into netdev_err, they are rate limited because the device is shutdown
22afterwards.
23
24This fixes at least one known DoS/softlockup of the backend domain.
25
26Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
27Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
28Acked-by: Jan Beulich <JBeulich@suse.com>
29---
30 drivers/net/xen-netback/common.h | 3 ++
31 drivers/net/xen-netback/interface.c | 23 ++++++++-----
32 drivers/net/xen-netback/netback.c | 63 +++++++++++++++++++++++++---------
33 3 files changed, 63 insertions(+), 26 deletions(-)
34
35diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
36index 94b79c3..9d7f172 100644
37--- a/drivers/net/xen-netback/common.h
38+++ b/drivers/net/xen-netback/common.h
39@@ -151,6 +151,9 @@ void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
40 /* Notify xenvif that ring now has space to send an skb to the frontend */
41 void xenvif_notify_tx_completion(struct xenvif *vif);
42
43+/* Prevent the device from generating any further traffic. */
44+void xenvif_carrier_off(struct xenvif *vif);
45+
46 /* Returns number of ring slots required to send an skb to the frontend */
47 unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
48
49diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
50index b7d41f8..b8c5193 100644
51--- a/drivers/net/xen-netback/interface.c
52+++ b/drivers/net/xen-netback/interface.c
53@@ -343,17 +343,22 @@ err:
54 return err;
55 }
56
57-void xenvif_disconnect(struct xenvif *vif)
58+void xenvif_carrier_off(struct xenvif *vif)
59 {
60 struct net_device *dev = vif->dev;
61- if (netif_carrier_ok(dev)) {
62- rtnl_lock();
63- netif_carrier_off(dev); /* discard queued packets */
64- if (netif_running(dev))
65- xenvif_down(vif);
66- rtnl_unlock();
67- xenvif_put(vif);
68- }
69+
70+ rtnl_lock();
71+ netif_carrier_off(dev); /* discard queued packets */
72+ if (netif_running(dev))
73+ xenvif_down(vif);
74+ rtnl_unlock();
75+ xenvif_put(vif);
76+}
77+
78+void xenvif_disconnect(struct xenvif *vif)
79+{
80+ if (netif_carrier_ok(vif->dev))
81+ xenvif_carrier_off(vif);
82
83 atomic_dec(&vif->refcnt);
84 wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
85diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
86index f2d6b78..1a449f9 100644
87--- a/drivers/net/xen-netback/netback.c
88+++ b/drivers/net/xen-netback/netback.c
89@@ -888,6 +888,13 @@ static void netbk_tx_err(struct xenvif *vif,
90 xenvif_put(vif);
91 }
92
93+static void netbk_fatal_tx_err(struct xenvif *vif)
94+{
95+ netdev_err(vif->dev, "fatal error; disabling device\n");
96+ xenvif_carrier_off(vif);
97+ xenvif_put(vif);
98+}
99+
100 static int netbk_count_requests(struct xenvif *vif,
101 struct xen_netif_tx_request *first,
102 struct xen_netif_tx_request *txp,
103@@ -901,19 +908,22 @@ static int netbk_count_requests(struct xenvif *vif,
104
105 do {
106 if (frags >= work_to_do) {
107- netdev_dbg(vif->dev, "Need more frags\n");
108+ netdev_err(vif->dev, "Need more frags\n");
109+ netbk_fatal_tx_err(vif);
110 return -frags;
111 }
112
113 if (unlikely(frags >= MAX_SKB_FRAGS)) {
114- netdev_dbg(vif->dev, "Too many frags\n");
115+ netdev_err(vif->dev, "Too many frags\n");
116+ netbk_fatal_tx_err(vif);
117 return -frags;
118 }
119
120 memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
121 sizeof(*txp));
122 if (txp->size > first->size) {
123- netdev_dbg(vif->dev, "Frags galore\n");
124+ netdev_err(vif->dev, "Frag is bigger than frame.\n");
125+ netbk_fatal_tx_err(vif);
126 return -frags;
127 }
128
129@@ -921,8 +931,9 @@ static int netbk_count_requests(struct xenvif *vif,
130 frags++;
131
132 if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
133- netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n",
134+ netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
135 txp->offset, txp->size);
136+ netbk_fatal_tx_err(vif);
137 return -frags;
138 }
139 } while ((txp++)->flags & XEN_NETTXF_more_data);
140@@ -1095,7 +1106,8 @@ static int xen_netbk_get_extras(struct xenvif *vif,
141
142 do {
143 if (unlikely(work_to_do-- <= 0)) {
144- netdev_dbg(vif->dev, "Missing extra info\n");
145+ netdev_err(vif->dev, "Missing extra info\n");
146+ netbk_fatal_tx_err(vif);
147 return -EBADR;
148 }
149
150@@ -1104,8 +1116,9 @@ static int xen_netbk_get_extras(struct xenvif *vif,
151 if (unlikely(!extra.type ||
152 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
153 vif->tx.req_cons = ++cons;
154- netdev_dbg(vif->dev,
155+ netdev_err(vif->dev,
156 "Invalid extra type: %d\n", extra.type);
157+ netbk_fatal_tx_err(vif);
158 return -EINVAL;
159 }
160
161@@ -1121,13 +1134,15 @@ static int netbk_set_skb_gso(struct xenvif *vif,
162 struct xen_netif_extra_info *gso)
163 {
164 if (!gso->u.gso.size) {
165- netdev_dbg(vif->dev, "GSO size must not be zero.\n");
166+ netdev_err(vif->dev, "GSO size must not be zero.\n");
167+ netbk_fatal_tx_err(vif);
168 return -EINVAL;
169 }
170
171 /* Currently only TCPv4 S.O. is supported. */
172 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
173- netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
174+ netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
175+ netbk_fatal_tx_err(vif);
176 return -EINVAL;
177 }
178
179@@ -1264,9 +1279,26 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
180
181 /* Get a netif from the list with work to do. */
182 vif = poll_net_schedule_list(netbk);
183+ /*
184+ * This can sometimes happen because the test of
185+ * list_empty(net_schedule_list) at the top of the
186+ * loop is unlocked. Just go back and have another
187+ * look.
188+ */
189 if (!vif)
190 continue;
191
192+ if (vif->tx.sring->req_prod - vif->tx.req_cons >
193+ XEN_NETIF_TX_RING_SIZE) {
194+ netdev_err(vif->dev,
195+ "Impossible number of requests. "
196+ "req_prod %d, req_cons %d, size %ld\n",
197+ vif->tx.sring->req_prod, vif->tx.req_cons,
198+ XEN_NETIF_TX_RING_SIZE);
199+ netbk_fatal_tx_err(vif);
200+ continue;
201+ }
202+
203 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
204 if (!work_to_do) {
205 xenvif_put(vif);
206@@ -1294,17 +1326,14 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
207 work_to_do = xen_netbk_get_extras(vif, extras,
208 work_to_do);
209 idx = vif->tx.req_cons;
210- if (unlikely(work_to_do < 0)) {
211- netbk_tx_err(vif, &txreq, idx);
212+ if (unlikely(work_to_do < 0))
213 continue;
214- }
215 }
216
217 ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
218- if (unlikely(ret < 0)) {
219- netbk_tx_err(vif, &txreq, idx - ret);
220+ if (unlikely(ret < 0))
221 continue;
222- }
223+
224 idx += ret;
225
226 if (unlikely(txreq.size < ETH_HLEN)) {
227@@ -1316,11 +1345,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
228
229 /* No crossing a page as the payload mustn't fragment. */
230 if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
231- netdev_dbg(vif->dev,
232+ netdev_err(vif->dev,
233 "txreq.offset: %x, size: %u, end: %lu\n",
234 txreq.offset, txreq.size,
235 (txreq.offset&~PAGE_MASK) + txreq.size);
236- netbk_tx_err(vif, &txreq, idx);
237+ netbk_fatal_tx_err(vif);
238 continue;
239 }
240
241@@ -1348,8 +1377,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
242 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
243
244 if (netbk_set_skb_gso(vif, skb, gso)) {
245+ /* Failure in netbk_set_skb_gso is fatal. */
246 kfree_skb(skb);
247- netbk_tx_err(vif, &txreq, idx);
248 continue;
249 }
250 }
251--
2521.7.2.5
253
diff --git a/main/linux-grsec/xsa39-pvops-0002-xen-netback-don-t-leak-pages-on-failure-in-xen_netbk.patch b/main/linux-grsec/xsa39-pvops-0002-xen-netback-don-t-leak-pages-on-failure-in-xen_netbk.patch
new file mode 100644
index 0000000000..686f38bb7a
--- /dev/null
+++ b/main/linux-grsec/xsa39-pvops-0002-xen-netback-don-t-leak-pages-on-failure-in-xen_netbk.patch
@@ -0,0 +1,132 @@
1From 90420631d2b78aca28c94beb66b25447e57a8dd4 Mon Sep 17 00:00:00 2001
2From: Ian Campbell <ian.campbell@citrix.com>
3Date: Mon, 14 Jan 2013 12:20:04 +0000
4Subject: [PATCH 2/4] xen/netback: don't leak pages on failure in xen_netbk_tx_check_gop.
5
6Signed-off-by: Matthew Daley <mattjd@gmail.com>
7Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
8Acked-by: Ian Campbell <ian.campbell@citrix.com>
9Acked-by: Jan Beulich <JBeulich@suse.com>
10---
11 drivers/net/xen-netback/netback.c | 38 ++++++++++++------------------------
12 1 files changed, 13 insertions(+), 25 deletions(-)
13
14diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
15index 1a449f9..975241e 100644
16--- a/drivers/net/xen-netback/netback.c
17+++ b/drivers/net/xen-netback/netback.c
18@@ -147,7 +147,8 @@ void xen_netbk_remove_xenvif(struct xenvif *vif)
19 atomic_dec(&netbk->netfront_count);
20 }
21
22-static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx);
23+static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
24+ u8 status);
25 static void make_tx_response(struct xenvif *vif,
26 struct xen_netif_tx_request *txp,
27 s8 st);
28@@ -1007,30 +1008,20 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
29 {
30 struct gnttab_copy *gop = *gopp;
31 u16 pending_idx = *((u16 *)skb->data);
32- struct pending_tx_info *pending_tx_info = netbk->pending_tx_info;
33- struct xenvif *vif = pending_tx_info[pending_idx].vif;
34- struct xen_netif_tx_request *txp;
35 struct skb_shared_info *shinfo = skb_shinfo(skb);
36 int nr_frags = shinfo->nr_frags;
37 int i, err, start;
38
39 /* Check status of header. */
40 err = gop->status;
41- if (unlikely(err)) {
42- pending_ring_idx_t index;
43- index = pending_index(netbk->pending_prod++);
44- txp = &pending_tx_info[pending_idx].req;
45- make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
46- netbk->pending_ring[index] = pending_idx;
47- xenvif_put(vif);
48- }
49+ if (unlikely(err))
50+ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
51
52 /* Skip first skb fragment if it is on same page as header fragment. */
53 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
54
55 for (i = start; i < nr_frags; i++) {
56 int j, newerr;
57- pending_ring_idx_t index;
58
59 pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
60
61@@ -1039,16 +1030,12 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
62 if (likely(!newerr)) {
63 /* Had a previous error? Invalidate this fragment. */
64 if (unlikely(err))
65- xen_netbk_idx_release(netbk, pending_idx);
66+ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
67 continue;
68 }
69
70 /* Error on this fragment: respond to client with an error. */
71- txp = &netbk->pending_tx_info[pending_idx].req;
72- make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
73- index = pending_index(netbk->pending_prod++);
74- netbk->pending_ring[index] = pending_idx;
75- xenvif_put(vif);
76+ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
77
78 /* Not the first error? Preceding frags already invalidated. */
79 if (err)
80@@ -1056,10 +1043,10 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
81
82 /* First error: invalidate header and preceding fragments. */
83 pending_idx = *((u16 *)skb->data);
84- xen_netbk_idx_release(netbk, pending_idx);
85+ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
86 for (j = start; j < i; j++) {
87 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
88- xen_netbk_idx_release(netbk, pending_idx);
89+ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
90 }
91
92 /* Remember the error: invalidate all subsequent fragments. */
93@@ -1093,7 +1080,7 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
94
95 /* Take an extra reference to offset xen_netbk_idx_release */
96 get_page(netbk->mmap_pages[pending_idx]);
97- xen_netbk_idx_release(netbk, pending_idx);
98+ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
99 }
100 }
101
102@@ -1477,7 +1464,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
103 txp->size -= data_len;
104 } else {
105 /* Schedule a response immediately. */
106- xen_netbk_idx_release(netbk, pending_idx);
107+ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
108 }
109
110 if (txp->flags & XEN_NETTXF_csum_blank)
111@@ -1529,7 +1516,8 @@ static void xen_netbk_tx_action(struct xen_netbk *netbk)
112 xen_netbk_tx_submit(netbk);
113 }
114
115-static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
116+static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
117+ u8 status)
118 {
119 struct xenvif *vif;
120 struct pending_tx_info *pending_tx_info;
121@@ -1543,7 +1531,7 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
122
123 vif = pending_tx_info->vif;
124
125- make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY);
126+ make_tx_response(vif, &pending_tx_info->req, status);
127
128 index = pending_index(netbk->pending_prod++);
129 netbk->pending_ring[index] = pending_idx;
130--
1311.7.2.5
132
diff --git a/main/linux-grsec/xsa39-pvops-0003-xen-netback-free-already-allocated-memory-on-failure.patch b/main/linux-grsec/xsa39-pvops-0003-xen-netback-free-already-allocated-memory-on-failure.patch
new file mode 100644
index 0000000000..1c71801958
--- /dev/null
+++ b/main/linux-grsec/xsa39-pvops-0003-xen-netback-free-already-allocated-memory-on-failure.patch
@@ -0,0 +1,47 @@
1From b6b1f17aa44acfe1024968bafb1d1fe7704a749a Mon Sep 17 00:00:00 2001
2From: Ian Campbell <ian.campbell@citrix.com>
3Date: Mon, 14 Jan 2013 12:51:22 +0000
4Subject: [PATCH 3/4] xen/netback: free already allocated memory on failure in xen_netbk_get_requests
5
6Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
7---
8 drivers/net/xen-netback/netback.c | 16 +++++++++++++++-
9 1 files changed, 15 insertions(+), 1 deletions(-)
10
11diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
12index 975241e..1a99288 100644
13--- a/drivers/net/xen-netback/netback.c
14+++ b/drivers/net/xen-netback/netback.c
15@@ -978,7 +978,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
16 pending_idx = netbk->pending_ring[index];
17 page = xen_netbk_alloc_page(netbk, skb, pending_idx);
18 if (!page)
19- return NULL;
20+ goto err;
21
22 gop->source.u.ref = txp->gref;
23 gop->source.domid = vif->domid;
24@@ -1000,6 +1000,20 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
25 }
26
27 return gop;
28+err:
29+ /*
30+ * Unwind, freeing all pages and sending error
31+ * reponses.
32+ */
33+ while (i-- > start) {
34+ xen_netbk_idx_release(netbk, frag_get_pending_idx(&frags[i]),
35+ XEN_NETIF_RSP_ERROR);
36+ }
37+ /* The head too, if necessary. */
38+ if (start)
39+ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
40+
41+ return NULL;
42 }
43
44 static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
45--
461.7.2.5
47
diff --git a/main/linux-grsec/xsa39-pvops-0004-netback-correct-netbk_tx_err-to-handle-wrap-around.patch b/main/linux-grsec/xsa39-pvops-0004-netback-correct-netbk_tx_err-to-handle-wrap-around.patch
new file mode 100644
index 0000000000..c76a2c40eb
--- /dev/null
+++ b/main/linux-grsec/xsa39-pvops-0004-netback-correct-netbk_tx_err-to-handle-wrap-around.patch
@@ -0,0 +1,27 @@
1From ea5e3c1e8fd9ffe6080e01af7769a9fa420cc62e Mon Sep 17 00:00:00 2001
2From: Ian Campbell <ian.campbell@citrix.com>
3Date: Mon, 14 Jan 2013 13:32:31 +0000
4Subject: [PATCH 4/4] netback: correct netbk_tx_err to handle wrap around.
5
6Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
7Acked-by: Jan Beulich <JBeulich@suse.com>
8---
9 drivers/net/xen-netback/netback.c | 2 +-
10 1 files changed, 1 insertions(+), 1 deletions(-)
11
12diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
13index 1a99288..28d5e06 100644
14--- a/drivers/net/xen-netback/netback.c
15+++ b/drivers/net/xen-netback/netback.c
16@@ -880,7 +880,7 @@ static void netbk_tx_err(struct xenvif *vif,
17
18 do {
19 make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
20- if (cons >= end)
21+ if (cons == end)
22 break;
23 txp = RING_GET_REQUEST(&vif->tx, cons++);
24 } while (1);
25--
261.7.2.5
27