aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNatanael Copa <ncopa@alpinelinux.org>2010-03-23 09:40:14 +0000
committerNatanael Copa <ncopa@alpinelinux.org>2010-03-23 09:43:05 +0000
commit9ecc46adcb899a119c17101dd3c2aa029445a2ac (patch)
treeb64e6a00b0b5f32ced0f48eec3f51e785e376419
parent47bfd5f590ebebd0204adf9dc358253c06d59725 (diff)
downloadalpine_aports-9ecc46adcb899a119c17101dd3c2aa029445a2ac.tar.bz2
alpine_aports-9ecc46adcb899a119c17101dd3c2aa029445a2ac.tar.xz
alpine_aports-9ecc46adcb899a119c17101dd3c2aa029445a2ac.zip
main/linux-grsec: cleanup unused patches
(cherry picked from commit aec9c8c2ded84cc042069aab670bf5cf17c516b3)
-rw-r--r--main/linux-grsec/net-2.6.git-87c1e12b5eeb7b30b4b41291bef8e0b41fc3dde9.patch109
-rw-r--r--main/linux-grsec/xfrm-flow-cache-grsec.patch1154
2 files changed, 0 insertions, 1263 deletions
diff --git a/main/linux-grsec/net-2.6.git-87c1e12b5eeb7b30b4b41291bef8e0b41fc3dde9.patch b/main/linux-grsec/net-2.6.git-87c1e12b5eeb7b30b4b41291bef8e0b41fc3dde9.patch
deleted file mode 100644
index 7cc9bf7896..0000000000
--- a/main/linux-grsec/net-2.6.git-87c1e12b5eeb7b30b4b41291bef8e0b41fc3dde9.patch
+++ /dev/null
@@ -1,109 +0,0 @@
1From 87c1e12b5eeb7b30b4b41291bef8e0b41fc3dde9 Mon Sep 17 00:00:00 2001
2From: Herbert Xu <herbert@gondor.apana.org.au>
3Date: Tue, 2 Mar 2010 02:51:56 +0000
4Subject: [PATCH] ipsec: Fix bogus bundle flowi
5
6When I merged the bundle creation code, I introduced a bogus
7flowi value in the bundle. Instead of getting from the caller,
8it was instead set to the flow in the route object, which is
9totally different.
10
11The end result is that the bundles we created never match, and
12we instead end up with an ever growing bundle list.
13
14Thanks to Jamal for find this problem.
15
16Reported-by: Jamal Hadi Salim <hadi@cyberus.ca>
17Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
18Acked-by: Steffen Klassert <steffen.klassert@secunet.com>
19Acked-by: Jamal Hadi Salim <hadi@cyberus.ca>
20Signed-off-by: David S. Miller <davem@davemloft.net>
21---
22 include/net/xfrm.h | 3 ++-
23 net/ipv4/xfrm4_policy.c | 5 +++--
24 net/ipv6/xfrm6_policy.c | 3 ++-
25 net/xfrm/xfrm_policy.c | 7 ++++---
26 4 files changed, 11 insertions(+), 7 deletions(-)
27
28diff --git a/include/net/xfrm.h b/include/net/xfrm.h
29index a7df327..d74e080 100644
30--- a/include/net/xfrm.h
31+++ b/include/net/xfrm.h
32@@ -275,7 +275,8 @@ struct xfrm_policy_afinfo {
33 struct dst_entry *dst,
34 int nfheader_len);
35 int (*fill_dst)(struct xfrm_dst *xdst,
36- struct net_device *dev);
37+ struct net_device *dev,
38+ struct flowi *fl);
39 };
40
41 extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
42diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
43index 67107d6..e4a1483 100644
44--- a/net/ipv4/xfrm4_policy.c
45+++ b/net/ipv4/xfrm4_policy.c
46@@ -91,11 +91,12 @@ static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst,
47 return 0;
48 }
49
50-static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev)
51+static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
52+ struct flowi *fl)
53 {
54 struct rtable *rt = (struct rtable *)xdst->route;
55
56- xdst->u.rt.fl = rt->fl;
57+ xdst->u.rt.fl = *fl;
58
59 xdst->u.dst.dev = dev;
60 dev_hold(dev);
61diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
62index dbdc696..ae18165 100644
63--- a/net/ipv6/xfrm6_policy.c
64+++ b/net/ipv6/xfrm6_policy.c
65@@ -116,7 +116,8 @@ static int xfrm6_init_path(struct xfrm_dst *path, struct dst_entry *dst,
66 return 0;
67 }
68
69-static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev)
70+static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
71+ struct flowi *fl)
72 {
73 struct rt6_info *rt = (struct rt6_info*)xdst->route;
74
75diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
76index 34a5ef8..843e066 100644
77--- a/net/xfrm/xfrm_policy.c
78+++ b/net/xfrm/xfrm_policy.c
79@@ -1372,7 +1372,8 @@ static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
80 return err;
81 }
82
83-static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev)
84+static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
85+ struct flowi *fl)
86 {
87 struct xfrm_policy_afinfo *afinfo =
88 xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
89@@ -1381,7 +1382,7 @@ static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev)
90 if (!afinfo)
91 return -EINVAL;
92
93- err = afinfo->fill_dst(xdst, dev);
94+ err = afinfo->fill_dst(xdst, dev, fl);
95
96 xfrm_policy_put_afinfo(afinfo);
97
98@@ -1486,7 +1487,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
99 for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
100 struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
101
102- err = xfrm_fill_dst(xdst, dev);
103+ err = xfrm_fill_dst(xdst, dev, fl);
104 if (err)
105 goto free_dst;
106
107--
1081.7.0.2
109
diff --git a/main/linux-grsec/xfrm-flow-cache-grsec.patch b/main/linux-grsec/xfrm-flow-cache-grsec.patch
deleted file mode 100644
index 881623d8ae..0000000000
--- a/main/linux-grsec/xfrm-flow-cache-grsec.patch
+++ /dev/null
@@ -1,1154 +0,0 @@
1From 3519d7c86a6e87584d25f3292b53d3ce865a659e Mon Sep 17 00:00:00 2001
2From: Natanael Copa <ncopa@alpinelinux.org>
3Date: Mon, 15 Mar 2010 15:31:37 +0000
4Subject: [PATCH] xfrm: flow cache2
5
6---
7 include/net/flow.h | 39 ++++-
8 include/net/netns/xfrm.h | 4 +
9 include/net/xfrm.h | 1 +
10 net/core/flow.c | 342 ++++++++++++++++++--------------------
11 net/ipv6/inet6_connection_sock.c | 6 +-
12 net/xfrm/xfrm_policy.c | 271 +++++++++++++++++++++---------
13 6 files changed, 394 insertions(+), 269 deletions(-)
14
15diff --git a/include/net/flow.h b/include/net/flow.h
16index 809970b..814a9d2 100644
17--- a/include/net/flow.h
18+++ b/include/net/flow.h
19@@ -8,6 +8,9 @@
20 #define _NET_FLOW_H
21
22 #include <linux/in6.h>
23+#include <linux/notifier.h>
24+#include <linux/timer.h>
25+#include <linux/slab.h>
26 #include <asm/atomic.h>
27
28 struct flowi {
29@@ -86,13 +89,37 @@ struct flowi {
30
31 struct net;
32 struct sock;
33-typedef int (*flow_resolve_t)(struct net *net, struct flowi *key, u16 family,
34- u8 dir, void **objp, atomic_t **obj_refp);
35
36-extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
37- u8 dir, flow_resolve_t resolver);
38-extern void flow_cache_flush(void);
39-extern atomic_t flow_cache_genid;
40+struct flow_cache_percpu;
41+struct flow_cache_entry;
42+
43+struct flow_cache {
44+ u32 hash_shift;
45+ u32 order;
46+ struct flow_cache_percpu * percpu;
47+ struct notifier_block hotcpu_notifier;
48+ int low_watermark;
49+ int high_watermark;
50+ struct timer_list rnd_timer;
51+ struct kmem_cache * flow_cachep;
52+};
53+
54+struct flow_cache_entry {
55+ struct flow_cache_entry *next;
56+ struct flowi key;
57+ u16 family;
58+ u8 dir;
59+};
60+
61+extern struct flow_cache_entry *flow_cache_lookup(
62+ struct flow_cache *cache, struct flowi *key,
63+ u16 family, u8 dir);
64+extern void flow_cache_entry_put(struct flow_cache_entry *fce);
65+
66+void flow_cache_flush(struct flow_cache *fc,
67+ void (*flush)(struct flow_cache *fc, struct flow_cache_entry *fce));
68+extern int flow_cache_init(struct flow_cache *cache, size_t entry_size);
69+extern void flow_cache_fini(struct flow_cache *cache);
70
71 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
72 {
73diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
74index 1ba9127..4bb72c4 100644
75--- a/include/net/netns/xfrm.h
76+++ b/include/net/netns/xfrm.h
77@@ -41,6 +41,10 @@ struct netns_xfrm {
78 struct xfrm_policy_hash policy_bydst[XFRM_POLICY_MAX * 2];
79 unsigned int policy_count[XFRM_POLICY_MAX * 2];
80 struct work_struct policy_hash_work;
81+ atomic_t policy_genid;
82+ struct hlist_head policy_gc_list;
83+ struct work_struct policy_gc_work;
84+ struct flow_cache flow_cache;
85
86 struct sock *nlsk;
87
88diff --git a/include/net/xfrm.h b/include/net/xfrm.h
89index 223e90a..5cd4e29 100644
90--- a/include/net/xfrm.h
91+++ b/include/net/xfrm.h
92@@ -487,6 +487,7 @@ struct xfrm_policy
93 struct xfrm_lifetime_cfg lft;
94 struct xfrm_lifetime_cur curlft;
95 struct dst_entry *bundles;
96+ atomic_t bundles_genid;
97 struct xfrm_policy_walk_entry walk;
98 u8 type;
99 u8 action;
100diff --git a/net/core/flow.c b/net/core/flow.c
101index 5b27992..e3782c2 100644
102--- a/net/core/flow.c
103+++ b/net/core/flow.c
104@@ -25,114 +25,85 @@
105 #include <asm/atomic.h>
106 #include <linux/security.h>
107
108-struct flow_cache_entry {
109- struct flow_cache_entry *next;
110- u16 family;
111- u8 dir;
112- u32 genid;
113- struct flowi key;
114- void *object;
115- atomic_t *object_ref;
116-};
117-
118-atomic_t flow_cache_genid = ATOMIC_INIT(0);
119-
120-static u32 flow_hash_shift;
121-#define flow_hash_size (1 << flow_hash_shift)
122-static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
123-
124-#define flow_table(cpu) (per_cpu(flow_tables, cpu))
125-
126-static struct kmem_cache *flow_cachep __read_mostly;
127
128-static int flow_lwm, flow_hwm;
129-
130-struct flow_percpu_info {
131- int hash_rnd_recalc;
132- u32 hash_rnd;
133- int count;
134+struct flow_cache_percpu {
135+ struct flow_cache_entry ** hash_table;
136+ int hash_count;
137+ u32 hash_rnd;
138+ int hash_rnd_recalc;
139+ struct tasklet_struct flush_tasklet;
140 };
141-static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
142-
143-#define flow_hash_rnd_recalc(cpu) \
144- (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
145-#define flow_hash_rnd(cpu) \
146- (per_cpu(flow_hash_info, cpu).hash_rnd)
147-#define flow_count(cpu) \
148- (per_cpu(flow_hash_info, cpu).count)
149-
150-static struct timer_list flow_hash_rnd_timer;
151-
152-#define FLOW_HASH_RND_PERIOD (10 * 60 * HZ)
153
154 struct flow_flush_info {
155- atomic_t cpuleft;
156- struct completion completion;
157+ void (*flush)(struct flow_cache *fc, struct flow_cache_entry *fce);
158+ struct flow_cache * cache;
159+ atomic_t cpuleft;
160+ struct completion completion;
161 };
162-static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
163
164-#define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
165+#define flow_cache_hash_size(cache) (1 << (cache)->hash_shift)
166+#define FLOW_HASH_RND_PERIOD (10 * 60 * HZ)
167
168 static void flow_cache_new_hashrnd(unsigned long arg)
169 {
170+ struct flow_cache *fc = (struct flow_cache *) arg;
171 int i;
172
173 for_each_possible_cpu(i)
174- flow_hash_rnd_recalc(i) = 1;
175+ per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1;
176
177- flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
178- add_timer(&flow_hash_rnd_timer);
179+ fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
180+ add_timer(&fc->rnd_timer);
181 }
182
183-static void flow_entry_kill(int cpu, struct flow_cache_entry *fle)
184-{
185- if (fle->object)
186- atomic_dec(fle->object_ref);
187- kmem_cache_free(flow_cachep, fle);
188- flow_count(cpu)--;
189-}
190-
191-static void __flow_cache_shrink(int cpu, int shrink_to)
192+static void __flow_cache_shrink(struct flow_cache *fc,
193+ struct flow_cache_percpu *fcp,
194+ int shrink_to)
195 {
196 struct flow_cache_entry *fle, **flp;
197 int i;
198
199- for (i = 0; i < flow_hash_size; i++) {
200+ for (i = 0; i < flow_cache_hash_size(fc); i++) {
201 int k = 0;
202
203- flp = &flow_table(cpu)[i];
204+ flp = &fcp->hash_table[i];
205 while ((fle = *flp) != NULL && k < shrink_to) {
206 k++;
207 flp = &fle->next;
208 }
209 while ((fle = *flp) != NULL) {
210 *flp = fle->next;
211- flow_entry_kill(cpu, fle);
212+
213+ kmem_cache_free(fc->flow_cachep, fle);
214+ fcp->hash_count--;
215 }
216 }
217 }
218
219-static void flow_cache_shrink(int cpu)
220+static void flow_cache_shrink(struct flow_cache *fc,
221+ struct flow_cache_percpu *fcp)
222 {
223- int shrink_to = flow_lwm / flow_hash_size;
224+ int shrink_to = fc->low_watermark / flow_cache_hash_size(fc);
225
226- __flow_cache_shrink(cpu, shrink_to);
227+ __flow_cache_shrink(fc, fcp, shrink_to);
228 }
229
230-static void flow_new_hash_rnd(int cpu)
231+static void flow_new_hash_rnd(struct flow_cache *fc,
232+ struct flow_cache_percpu *fcp)
233 {
234- get_random_bytes(&flow_hash_rnd(cpu), sizeof(u32));
235- flow_hash_rnd_recalc(cpu) = 0;
236-
237- __flow_cache_shrink(cpu, 0);
238+ get_random_bytes(&fcp->hash_rnd, sizeof(u32));
239+ fcp->hash_rnd_recalc = 0;
240+ __flow_cache_shrink(fc, fcp, 0);
241 }
242
243-static u32 flow_hash_code(struct flowi *key, int cpu)
244+static u32 flow_hash_code(struct flow_cache *fc,
245+ struct flow_cache_percpu *fcp,
246+ struct flowi *key)
247 {
248 u32 *k = (u32 *) key;
249
250- return (jhash2(k, (sizeof(*key) / sizeof(u32)), flow_hash_rnd(cpu)) &
251- (flow_hash_size - 1));
252+ return (jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd)
253+ & (flow_cache_hash_size(fc) - 1));
254 }
255
256 #if (BITS_PER_LONG == 64)
257@@ -165,128 +136,100 @@ static int flow_key_compare(struct flowi *key1, struct flowi *key2)
258 return 0;
259 }
260
261-void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
262- flow_resolve_t resolver)
263+struct flow_cache_entry *flow_cache_lookup(struct flow_cache *fc,
264+ struct flowi *key,
265+ u16 family, u8 dir)
266 {
267 struct flow_cache_entry *fle, **head;
268+ struct flow_cache_percpu *fcp;
269 unsigned int hash;
270- int cpu;
271
272 local_bh_disable();
273- cpu = smp_processor_id();
274+ fcp = per_cpu_ptr(fc->percpu, smp_processor_id());
275
276 fle = NULL;
277 /* Packet really early in init? Making flow_cache_init a
278 * pre-smp initcall would solve this. --RR */
279- if (!flow_table(cpu))
280+ if (!fcp->hash_table)
281 goto nocache;
282
283- if (flow_hash_rnd_recalc(cpu))
284- flow_new_hash_rnd(cpu);
285- hash = flow_hash_code(key, cpu);
286+ if (fcp->hash_rnd_recalc)
287+ flow_new_hash_rnd(fc, fcp);
288+
289+ hash = flow_hash_code(fc, fcp, key);
290
291- head = &flow_table(cpu)[hash];
292+ head = &fcp->hash_table[hash];
293 for (fle = *head; fle; fle = fle->next) {
294 if (fle->family == family &&
295 fle->dir == dir &&
296 flow_key_compare(key, &fle->key) == 0) {
297- if (fle->genid == atomic_read(&flow_cache_genid)) {
298- void *ret = fle->object;
299-
300- if (ret)
301- atomic_inc(fle->object_ref);
302- local_bh_enable();
303-
304- return ret;
305- }
306- break;
307- }
308- }
309-
310- if (!fle) {
311- if (flow_count(cpu) > flow_hwm)
312- flow_cache_shrink(cpu);
313-
314- fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
315- if (fle) {
316- fle->next = *head;
317- *head = fle;
318- fle->family = family;
319- fle->dir = dir;
320- memcpy(&fle->key, key, sizeof(*key));
321- fle->object = NULL;
322- flow_count(cpu)++;
323+ return fle;
324 }
325 }
326
327-nocache:
328- {
329- int err;
330- void *obj;
331- atomic_t *obj_ref;
332-
333- err = resolver(net, key, family, dir, &obj, &obj_ref);
334+ if (fcp->hash_count > fc->high_watermark)
335+ flow_cache_shrink(fc, fcp);
336
337- if (fle && !err) {
338- fle->genid = atomic_read(&flow_cache_genid);
339+ fle = kmem_cache_zalloc(fc->flow_cachep, GFP_ATOMIC);
340+ if (!fle)
341+ goto nocache;
342
343- if (fle->object)
344- atomic_dec(fle->object_ref);
345+ fle->next = *head;
346+ *head = fle;
347+ fle->family = family;
348+ fle->dir = dir;
349+ memcpy(&fle->key, key, sizeof(*key));
350+ fcp->hash_count++;
351+ return fle;
352
353- fle->object = obj;
354- fle->object_ref = obj_ref;
355- if (obj)
356- atomic_inc(fle->object_ref);
357- }
358- local_bh_enable();
359+nocache:
360+ local_bh_enable();
361+ return NULL;
362+}
363
364- if (err)
365- obj = ERR_PTR(err);
366- return obj;
367- }
368+void flow_cache_entry_put(struct flow_cache_entry *fce)
369+{
370+ local_bh_enable();
371 }
372
373 static void flow_cache_flush_tasklet(unsigned long data)
374 {
375- struct flow_flush_info *info = (void *)data;
376+ struct flow_flush_info *info = (void *) data;
377+ struct flow_cache *fc = (void *) info->cache;
378+ struct flow_cache_percpu *fcp;
379 int i;
380- int cpu;
381
382- cpu = smp_processor_id();
383- for (i = 0; i < flow_hash_size; i++) {
384- struct flow_cache_entry *fle;
385+ if (info->flush == NULL)
386+ goto done;
387
388- fle = flow_table(cpu)[i];
389- for (; fle; fle = fle->next) {
390- unsigned genid = atomic_read(&flow_cache_genid);
391-
392- if (!fle->object || fle->genid == genid)
393- continue;
394+ fcp = per_cpu_ptr(fc->percpu, smp_processor_id());
395+ for (i = 0; i < flow_cache_hash_size(fc); i++) {
396+ struct flow_cache_entry *fle;
397
398- fle->object = NULL;
399- atomic_dec(fle->object_ref);
400- }
401+ fle = fcp->hash_table[i];
402+ for (; fle; fle = fle->next)
403+ info->flush(fc, fle);
404 }
405
406+done:
407 if (atomic_dec_and_test(&info->cpuleft))
408 complete(&info->completion);
409 }
410
411-static void flow_cache_flush_per_cpu(void *) __attribute__((__unused__));
412 static void flow_cache_flush_per_cpu(void *data)
413 {
414 struct flow_flush_info *info = data;
415- int cpu;
416 struct tasklet_struct *tasklet;
417+ int cpu;
418
419 cpu = smp_processor_id();
420-
421- tasklet = flow_flush_tasklet(cpu);
422- tasklet->data = (unsigned long)info;
423+ tasklet = &per_cpu_ptr(info->cache->percpu, cpu)->flush_tasklet;
424+ tasklet->data = (unsigned long) data;
425 tasklet_schedule(tasklet);
426 }
427
428-void flow_cache_flush(void)
429+void flow_cache_flush(struct flow_cache *fc,
430+ void (*flush)(struct flow_cache *fc, struct flow_cache_entry *fce))
431 {
432 struct flow_flush_info info;
433 static DEFINE_MUTEX(flow_flush_sem);
434@@ -294,6 +237,8 @@ void flow_cache_flush(void)
435 /* Don't want cpus going down or up during this. */
436 get_online_cpus();
437 mutex_lock(&flow_flush_sem);
438+ info.cache = fc;
439+ info.flush = flush;
440 atomic_set(&info.cpuleft, num_online_cpus());
441 init_completion(&info.completion);
442
443@@ -307,62 +252,99 @@ void flow_cache_flush(void)
444 put_online_cpus();
445 }
446
447-static void __init flow_cache_cpu_prepare(int cpu)
448+static void __init flow_cache_cpu_prepare(struct flow_cache *fc,
449+ struct flow_cache_percpu *fcp)
450+{
451+ fcp->hash_table = (struct flow_cache_entry **)
452+ __get_free_pages(GFP_KERNEL|__GFP_ZERO, fc->order);
453+ fcp->hash_rnd_recalc = 1;
454+ fcp->hash_count = 0;
455+
456+ tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0);
457+}
458+
459+static int __cpuinit flow_cache_cpu(struct notifier_block *nfb,
460+ unsigned long action,
461+ void *hcpu)
462+{
463+ struct flow_cache *fc = container_of(nfb, struct flow_cache, hotcpu_notifier);
464+ int cpu = (unsigned long) hcpu;
465+ struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
466+
467+ switch (action) {
468+ case CPU_UP_PREPARE:
469+ case CPU_UP_PREPARE_FROZEN:
470+ flow_cache_cpu_prepare(fc, fcp);
471+ if (!fcp->hash_table)
472+ return NOTIFY_BAD;
473+ break;
474+ case CPU_UP_CANCELED:
475+ case CPU_UP_CANCELED_FROZEN:
476+ case CPU_DEAD:
477+ case CPU_DEAD_FROZEN:
478+ if (fcp->hash_table) {
479+ __flow_cache_shrink(fc, fcp, 0);
480+ free_pages((unsigned long) fcp->hash_table, fc->order);
481+ fcp->hash_table = NULL;
482+ }
483+ break;
484+ }
485+ return NOTIFY_OK;
486+}
487+
488+int flow_cache_init(struct flow_cache *fc, size_t entry_size)
489 {
490- struct tasklet_struct *tasklet;
491 unsigned long order;
492+ int i, r;
493+
494+ BUG_ON(entry_size < sizeof(struct flow_cache_entry));
495+ fc->flow_cachep = kmem_cache_create("flow_cache",
496+ entry_size,
497+ 0, SLAB_PANIC,
498+ NULL);
499+ fc->hash_shift = 10;
500+ fc->low_watermark = 2 * flow_cache_hash_size(fc);
501+ fc->high_watermark = 4 * flow_cache_hash_size(fc);
502+ fc->percpu = alloc_percpu(struct flow_cache_percpu);
503
504 for (order = 0;
505 (PAGE_SIZE << order) <
506- (sizeof(struct flow_cache_entry *)*flow_hash_size);
507+ (sizeof(struct flow_cache_entry *) * flow_cache_hash_size(fc));
508 order++)
509 /* NOTHING */;
510+ fc->order = order;
511
512- flow_table(cpu) = (struct flow_cache_entry **)
513- __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
514- if (!flow_table(cpu))
515- panic("NET: failed to allocate flow cache order %lu\n", order);
516+ setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd, (unsigned long) fc);
517+ fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
518+ add_timer(&fc->rnd_timer);
519
520- flow_hash_rnd_recalc(cpu) = 1;
521- flow_count(cpu) = 0;
522+ for_each_online_cpu(i) {
523+ r = flow_cache_cpu(&fc->hotcpu_notifier,
524+ CPU_UP_PREPARE, (void*) i);
525+ if (r != NOTIFY_OK)
526+ panic("NET: failed to allocate flow cache order %lu\n", order);
527+ }
528
529- tasklet = flow_flush_tasklet(cpu);
530- tasklet_init(tasklet, flow_cache_flush_tasklet, 0);
531-}
532+ fc->hotcpu_notifier = (struct notifier_block){
533+ .notifier_call = flow_cache_cpu,
534+ };
535+ register_hotcpu_notifier(&fc->hotcpu_notifier);
536
537-static int flow_cache_cpu(struct notifier_block *nfb,
538- unsigned long action,
539- void *hcpu)
540-{
541- if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
542- __flow_cache_shrink((unsigned long)hcpu, 0);
543- return NOTIFY_OK;
544+ return 0;
545 }
546
547-static int __init flow_cache_init(void)
548+void flow_cache_fini(struct flow_cache *fc)
549 {
550 int i;
551
552- flow_cachep = kmem_cache_create("flow_cache",
553- sizeof(struct flow_cache_entry),
554- 0, SLAB_PANIC,
555- NULL);
556- flow_hash_shift = 10;
557- flow_lwm = 2 * flow_hash_size;
558- flow_hwm = 4 * flow_hash_size;
559-
560- setup_timer(&flow_hash_rnd_timer, flow_cache_new_hashrnd, 0);
561- flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
562- add_timer(&flow_hash_rnd_timer);
563+ del_timer(&fc->rnd_timer);
564+ unregister_hotcpu_notifier(&fc->hotcpu_notifier);
565
566 for_each_possible_cpu(i)
567- flow_cache_cpu_prepare(i);
568+ flow_cache_cpu(&fc->hotcpu_notifier, CPU_DEAD, (void*) i);
569
570- hotcpu_notifier(flow_cache_cpu, 0);
571- return 0;
572+ free_percpu(fc->percpu);
573+ kmem_cache_destroy(fc->flow_cachep);
574 }
575
576-module_init(flow_cache_init);
577-
578-EXPORT_SYMBOL(flow_cache_genid);
579 EXPORT_SYMBOL(flow_cache_lookup);
580diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
581index cc4797d..399853e 100644
582--- a/net/ipv6/inet6_connection_sock.c
583+++ b/net/ipv6/inet6_connection_sock.c
584@@ -151,8 +151,9 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
585
586 #ifdef CONFIG_XFRM
587 {
588+ struct net *net = sock_net(sk);
589 struct rt6_info *rt = (struct rt6_info *)dst;
590- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
591+ rt->rt6i_flow_cache_genid = atomic_read(&net->xfrm.policy_genid);
592 }
593 #endif
594 }
595@@ -166,8 +167,9 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
596
597 #ifdef CONFIG_XFRM
598 if (dst) {
599+ struct net *net = sock_net(sk);
600 struct rt6_info *rt = (struct rt6_info *)dst;
601- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
602+ if (rt->rt6i_flow_cache_genid != atomic_read(&net->xfrm.policy_genid)) {
603 sk->sk_dst_cache = NULL;
604 dst_release(dst);
605 dst = NULL;
606diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
607index cb81ca3..82b01c3 100644
608--- a/net/xfrm/xfrm_policy.c
609+++ b/net/xfrm/xfrm_policy.c
610@@ -44,7 +44,6 @@ static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
611
612 static struct kmem_cache *xfrm_dst_cache __read_mostly;
613
614-static HLIST_HEAD(xfrm_policy_gc_list);
615 static DEFINE_SPINLOCK(xfrm_policy_gc_lock);
616
617 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
618@@ -53,6 +52,7 @@ static void xfrm_init_pmtu(struct dst_entry *dst);
619
620 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
621 int dir);
622+static int stale_bundle(struct dst_entry *dst);
623
624 static inline int
625 __xfrm4_selector_match(struct xfrm_selector *sel, struct flowi *fl)
626@@ -216,6 +216,35 @@ expired:
627 xfrm_pol_put(xp);
628 }
629
630+struct xfrm_flow_cache_entry {
631+ struct flow_cache_entry fce;
632+ struct xfrm_policy *policy;
633+ struct xfrm_dst *dst;
634+ u32 policy_genid, bundles_genid;
635+};
636+#define XFRM_CACHE_NO_POLICY ((struct xfrm_policy *) -1)
637+
638+void xfrm_flow_cache_entry_validate(struct flow_cache *fc,
639+ struct flow_cache_entry *fce)
640+{
641+ struct net *net = container_of(fc, struct net, xfrm.flow_cache);
642+ struct xfrm_flow_cache_entry *xfc =
643+ container_of(fce, struct xfrm_flow_cache_entry, fce);
644+
645+ if (xfc->policy_genid != atomic_read(&net->xfrm.policy_genid))
646+ goto invalid;
647+ if (xfc->policy == NULL || xfc->policy == XFRM_CACHE_NO_POLICY)
648+ return;
649+ if (xfc->policy->walk.dead)
650+ goto invalid;
651+ if (xfc->bundles_genid != atomic_read(&xfc->policy->bundles_genid))
652+ goto invalid_dst;
653+ return;
654+invalid:
655+ xfc->policy = NULL;
656+invalid_dst:
657+ xfc->dst = NULL;
658+}
659
660 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
661 * SPD calls.
662@@ -269,27 +298,26 @@ static void xfrm_policy_gc_kill(struct xfrm_policy *policy)
663 if (del_timer(&policy->timer))
664 atomic_dec(&policy->refcnt);
665
666- if (atomic_read(&policy->refcnt) > 1)
667- flow_cache_flush();
668-
669 xfrm_pol_put(policy);
670 }
671
672 static void xfrm_policy_gc_task(struct work_struct *work)
673 {
674+ struct net *net = container_of(work, struct net, xfrm.policy_gc_work);
675 struct xfrm_policy *policy;
676 struct hlist_node *entry, *tmp;
677 struct hlist_head gc_list;
678
679 spin_lock_bh(&xfrm_policy_gc_lock);
680- gc_list.first = xfrm_policy_gc_list.first;
681- INIT_HLIST_HEAD(&xfrm_policy_gc_list);
682+ gc_list.first = net->xfrm.policy_gc_list.first;
683+ INIT_HLIST_HEAD(&net->xfrm.policy_gc_list);
684 spin_unlock_bh(&xfrm_policy_gc_lock);
685
686+ flow_cache_flush(&net->xfrm.flow_cache, xfrm_flow_cache_entry_validate);
687+
688 hlist_for_each_entry_safe(policy, entry, tmp, &gc_list, bydst)
689 xfrm_policy_gc_kill(policy);
690 }
691-static DECLARE_WORK(xfrm_policy_gc_work, xfrm_policy_gc_task);
692
693 /* Rule must be locked. Release descentant resources, announce
694 * entry dead. The rule must be unlinked from lists to the moment.
695@@ -297,6 +325,7 @@ static DECLARE_WORK(xfrm_policy_gc_work, xfrm_policy_gc_task);
696
697 static void xfrm_policy_kill(struct xfrm_policy *policy)
698 {
699+ struct net *net = xp_net(policy);
700 int dead;
701
702 write_lock_bh(&policy->lock);
703@@ -310,10 +339,10 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
704 }
705
706 spin_lock_bh(&xfrm_policy_gc_lock);
707- hlist_add_head(&policy->bydst, &xfrm_policy_gc_list);
708+ hlist_add_head(&policy->bydst, &net->xfrm.policy_gc_list);
709 spin_unlock_bh(&xfrm_policy_gc_lock);
710
711- schedule_work(&xfrm_policy_gc_work);
712+ schedule_work(&net->xfrm.policy_gc_work);
713 }
714
715 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
716@@ -586,7 +615,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
717 hlist_add_head(&policy->bydst, chain);
718 xfrm_pol_hold(policy);
719 net->xfrm.policy_count[dir]++;
720- atomic_inc(&flow_cache_genid);
721+ atomic_inc(&net->xfrm.policy_genid);
722 if (delpol)
723 __xfrm_policy_unlink(delpol, dir);
724 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
725@@ -619,11 +648,13 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
726 gc_list = dst;
727
728 policy->bundles = NULL;
729+ atomic_inc(&policy->bundles_genid);
730 }
731 write_unlock(&policy->lock);
732 }
733 read_unlock_bh(&xfrm_policy_lock);
734
735+ flow_cache_flush(&net->xfrm.flow_cache, NULL);
736 while (gc_list) {
737 struct dst_entry *dst = gc_list;
738
739@@ -669,7 +700,7 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u8 type, int dir,
740 write_unlock_bh(&xfrm_policy_lock);
741
742 if (ret && delete) {
743- atomic_inc(&flow_cache_genid);
744+ atomic_inc(&net->xfrm.policy_genid);
745 xfrm_policy_kill(ret);
746 }
747 return ret;
748@@ -710,7 +741,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u8 type, int dir, u32 id,
749 write_unlock_bh(&xfrm_policy_lock);
750
751 if (ret && delete) {
752- atomic_inc(&flow_cache_genid);
753+ atomic_inc(&net->xfrm.policy_genid);
754 xfrm_policy_kill(ret);
755 }
756 return ret;
757@@ -824,7 +855,7 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
758 }
759
760 }
761- atomic_inc(&flow_cache_genid);
762+ atomic_inc(&net->xfrm.policy_genid);
763 out:
764 write_unlock_bh(&xfrm_policy_lock);
765 return err;
766@@ -977,32 +1008,18 @@ fail:
767 return ret;
768 }
769
770-static int xfrm_policy_lookup(struct net *net, struct flowi *fl, u16 family,
771- u8 dir, void **objp, atomic_t **obj_refp)
772+static struct xfrm_policy *xfrm_policy_lookup(
773+ struct net *net, struct flowi *fl,
774+ u16 family, u8 dir)
775 {
776+#ifdef CONFIG_XFRM_SUB_POLICY
777 struct xfrm_policy *pol;
778- int err = 0;
779
780-#ifdef CONFIG_XFRM_SUB_POLICY
781 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir);
782- if (IS_ERR(pol)) {
783- err = PTR_ERR(pol);
784- pol = NULL;
785- }
786- if (pol || err)
787- goto end;
788+ if (pol != NULL)
789+ return pol;
790 #endif
791- pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
792- if (IS_ERR(pol)) {
793- err = PTR_ERR(pol);
794- pol = NULL;
795- }
796-#ifdef CONFIG_XFRM_SUB_POLICY
797-end:
798-#endif
799- if ((*objp = (void *) pol) != NULL)
800- *obj_refp = &pol->refcnt;
801- return err;
802+ return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
803 }
804
805 static inline int policy_to_flow_dir(int dir)
806@@ -1083,12 +1100,14 @@ static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
807
808 int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
809 {
810+ struct net *net = xp_net(pol);
811+
812 write_lock_bh(&xfrm_policy_lock);
813 pol = __xfrm_policy_unlink(pol, dir);
814 write_unlock_bh(&xfrm_policy_lock);
815 if (pol) {
816 if (dir < XFRM_POLICY_MAX)
817- atomic_inc(&flow_cache_genid);
818+ atomic_inc(&net->xfrm.policy_genid);
819 xfrm_policy_kill(pol);
820 return 0;
821 }
822@@ -1512,13 +1531,34 @@ xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
823 #endif
824 }
825
826-static int stale_bundle(struct dst_entry *dst);
827-
828 /* Main function: finds/creates a bundle for given flow.
829 *
830 * At the moment we eat a raw IP route. Mostly to speed up lookups
831 * on interfaces with disabled IPsec.
832 */
833+
834+static void xfrm_flow_cache_update(struct net *net, struct flowi *key,
835+ u16 family, u8 dir,
836+ struct xfrm_policy *pol,
837+ struct xfrm_dst *dst)
838+{
839+ struct flow_cache_entry *fce;
840+ struct xfrm_flow_cache_entry *xf;
841+
842+ fce = flow_cache_lookup(&net->xfrm.flow_cache,
843+ key, family, dir);
844+ if (fce == NULL)
845+ return;
846+
847+ xf = container_of(fce, struct xfrm_flow_cache_entry, fce);
848+ xf->policy_genid = atomic_read(&net->xfrm.policy_genid);
849+ xf->policy = pol;
850+ if (dst != NULL)
851+ xf->bundles_genid = atomic_read(&pol->bundles_genid);
852+ xf->dst = dst;
853+ flow_cache_entry_put(fce);
854+}
855+
856 int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl,
857 struct sock *sk, int flags)
858 {
859@@ -1537,8 +1577,10 @@ int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl,
860 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
861
862 restart:
863- genid = atomic_read(&flow_cache_genid);
864+ family = dst_orig->ops->family;
865+ genid = atomic_read(&net->xfrm.policy_genid);
866 policy = NULL;
867+ dst = NULL;
868 for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
869 pols[pi] = NULL;
870 npols = 0;
871@@ -1555,24 +1597,51 @@ restart:
872 }
873
874 if (!policy) {
875+ struct flow_cache_entry *fce;
876+ struct xfrm_flow_cache_entry *xf;
877+
878 /* To accelerate a bit... */
879 if ((dst_orig->flags & DST_NOXFRM) ||
880 !net->xfrm.policy_count[XFRM_POLICY_OUT])
881 goto nopol;
882
883- policy = flow_cache_lookup(net, fl, dst_orig->ops->family,
884- dir, xfrm_policy_lookup);
885- err = PTR_ERR(policy);
886- if (IS_ERR(policy)) {
887- XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
888- goto dropdst;
889+ fce = flow_cache_lookup(&net->xfrm.flow_cache,
890+ fl, family, dir);
891+ if (fce == NULL)
892+ goto no_cache;
893+
894+ xf = container_of(fce, struct xfrm_flow_cache_entry, fce);
895+ xfrm_flow_cache_entry_validate(&net->xfrm.flow_cache, fce);
896+ if (xf->policy != NULL) {
897+ policy = xf->policy;
898+ if (policy != XFRM_CACHE_NO_POLICY)
899+ xfrm_pol_hold(policy);
900+ if (xf->dst != NULL)
901+ dst = dst_clone((struct dst_entry *) xf->dst);
902+ }
903+ flow_cache_entry_put(fce);
904+ if (policy == XFRM_CACHE_NO_POLICY)
905+ goto nopol;
906+ if (dst && !xfrm_bundle_ok(policy, (struct xfrm_dst *) dst, fl, family, 0)) {
907+ dst_release(dst);
908+ dst = NULL;
909 }
910 }
911+no_cache:
912+ if (!policy) {
913+ policy = xfrm_policy_lookup(net, fl, family, dir);
914+ if (!policy) {
915+ xfrm_flow_cache_update(
916+ net, fl, family, dir,
917+ XFRM_CACHE_NO_POLICY, NULL);
918+ goto nopol;
919+ }
920+ }
921+ if (IS_ERR(policy)) {
922+ XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
923+ goto dropdst;
924+ }
925
926- if (!policy)
927- goto nopol;
928-
929- family = dst_orig->ops->family;
930 pols[0] = policy;
931 npols ++;
932 xfrm_nr += pols[0]->xfrm_nr;
933@@ -1583,6 +1652,9 @@ restart:
934
935 policy->curlft.use_time = get_seconds();
936
937+ if (dst)
938+ goto dst_found;
939+
940 switch (policy->action) {
941 default:
942 case XFRM_POLICY_BLOCK:
943@@ -1593,18 +1665,11 @@ restart:
944
945 case XFRM_POLICY_ALLOW:
946 #ifndef CONFIG_XFRM_SUB_POLICY
947- if (policy->xfrm_nr == 0) {
948- /* Flow passes not transformed. */
949- xfrm_pol_put(policy);
950- return 0;
951- }
952+ if (policy->xfrm_nr == 0)
953+ goto no_transform;
954 #endif
955
956- /* Try to find matching bundle.
957- *
958- * LATER: help from flow cache. It is optional, this
959- * is required only for output policy.
960- */
961+ /* Try to find matching bundle the hard way. */
962 dst = xfrm_find_bundle(fl, policy, family);
963 if (IS_ERR(dst)) {
964 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
965@@ -1644,12 +1709,8 @@ restart:
966 * they are searched. See above not-transformed bypass
967 * is surrounded by non-sub policy configuration, too.
968 */
969- if (xfrm_nr == 0) {
970- /* Flow passes not transformed. */
971- xfrm_pols_put(pols, npols);
972- return 0;
973- }
974-
975+ if (xfrm_nr == 0)
976+ goto no_transform;
977 #endif
978 nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
979
980@@ -1680,7 +1741,7 @@ restart:
981 goto error;
982 }
983 if (nx == -EAGAIN ||
984- genid != atomic_read(&flow_cache_genid)) {
985+ genid != atomic_read(&net->xfrm.policy_genid)) {
986 xfrm_pols_put(pols, npols);
987 goto restart;
988 }
989@@ -1691,11 +1752,8 @@ restart:
990 goto error;
991 }
992 }
993- if (nx == 0) {
994- /* Flow passes not transformed. */
995- xfrm_pols_put(pols, npols);
996- return 0;
997- }
998+ if (nx == 0)
999+ goto no_transform;
1000
1001 dst = xfrm_bundle_create(policy, xfrm, nx, fl, dst_orig);
1002 err = PTR_ERR(dst);
1003@@ -1744,6 +1802,9 @@ restart:
1004 dst_hold(dst);
1005 write_unlock_bh(&policy->lock);
1006 }
1007+ xfrm_flow_cache_update(net, fl, family, dir,
1008+ policy, (struct xfrm_dst *) dst);
1009+dst_found:
1010 *dst_p = dst;
1011 dst_release(dst_orig);
1012 xfrm_pols_put(pols, npols);
1013@@ -1761,7 +1822,12 @@ nopol:
1014 if (flags & XFRM_LOOKUP_ICMP)
1015 goto dropdst;
1016 return 0;
1017+no_transform:
1018+ /* Flow passes not transformed. */
1019+ xfrm_pols_put(pols, npols);
1020+ return 0;
1021 }
1022+
1023 EXPORT_SYMBOL(__xfrm_lookup);
1024
1025 int xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl,
1026@@ -1919,10 +1985,35 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
1027 }
1028 }
1029
1030- if (!pol)
1031- pol = flow_cache_lookup(net, &fl, family, fl_dir,
1032- xfrm_policy_lookup);
1033-
1034+ if (!pol) {
1035+ struct flow_cache_entry *fce;
1036+ struct xfrm_flow_cache_entry *xf;
1037+
1038+ fce = flow_cache_lookup(&net->xfrm.flow_cache,
1039+ &fl, family, dir);
1040+ if (fce != NULL) {
1041+ xf = container_of(fce, struct xfrm_flow_cache_entry, fce);
1042+ xfrm_flow_cache_entry_validate(&net->xfrm.flow_cache, fce);
1043+ if (xf->policy != NULL) {
1044+ pol = xf->policy;
1045+ if (pol != XFRM_CACHE_NO_POLICY)
1046+ xfrm_pol_hold(pol);
1047+ else
1048+ pol = NULL;
1049+ } else {
1050+ pol = xfrm_policy_lookup(net, &fl, family, dir);
1051+ if (!IS_ERR(pol)) {
1052+ if (pol)
1053+ xf->policy = pol;
1054+ else
1055+ xf->policy = XFRM_CACHE_NO_POLICY;
1056+ }
1057+ xf->dst = NULL;
1058+ xf->policy_genid = atomic_read(&net->xfrm.policy_genid);
1059+ }
1060+ flow_cache_entry_put(fce);
1061+ }
1062+ }
1063 if (IS_ERR(pol)) {
1064 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
1065 return 0;
1066@@ -2121,6 +2212,7 @@ static void prune_one_bundle(struct xfrm_policy *pol, int (*func)(struct dst_ent
1067 dstp = &dst->next;
1068 }
1069 }
1070+ atomic_inc(&pol->bundles_genid);
1071 write_unlock(&pol->lock);
1072 }
1073
1074@@ -2148,6 +2240,7 @@ static void xfrm_prune_bundles(struct net *net, int (*func)(struct dst_entry *))
1075 }
1076 read_unlock_bh(&xfrm_policy_lock);
1077
1078+ flow_cache_flush(&net->xfrm.flow_cache, NULL);
1079 while (gc_list) {
1080 struct dst_entry *dst = gc_list;
1081 gc_list = dst->next;
1082@@ -2428,6 +2521,9 @@ static int __net_init xfrm_policy_init(struct net *net)
1083
1084 INIT_LIST_HEAD(&net->xfrm.policy_all);
1085 INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
1086+ INIT_HLIST_HEAD(&net->xfrm.policy_gc_list);
1087+ INIT_WORK(&net->xfrm.policy_gc_work, xfrm_policy_gc_task);
1088+ flow_cache_init(&net->xfrm.flow_cache, sizeof(struct xfrm_flow_cache_entry));
1089 if (net_eq(net, &init_net))
1090 register_netdevice_notifier(&xfrm_dev_notifier);
1091 return 0;
1092@@ -2461,7 +2557,7 @@ static void xfrm_policy_fini(struct net *net)
1093 audit_info.sessionid = -1;
1094 audit_info.secid = 0;
1095 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info);
1096- flush_work(&xfrm_policy_gc_work);
1097+ flush_work(&net->xfrm.policy_gc_work);
1098
1099 WARN_ON(!list_empty(&net->xfrm.policy_all));
1100
1101@@ -2479,6 +2575,8 @@ static void xfrm_policy_fini(struct net *net)
1102 sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
1103 WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
1104 xfrm_hash_free(net->xfrm.policy_byidx, sz);
1105+
1106+ flow_cache_fini(&net->xfrm.flow_cache);
1107 }
1108
1109 static int __net_init xfrm_net_init(struct net *net)
1110@@ -2685,8 +2783,9 @@ static int migrate_tmpl_match(struct xfrm_migrate *m, struct xfrm_tmpl *t)
1111 static int xfrm_policy_migrate(struct xfrm_policy *pol,
1112 struct xfrm_migrate *m, int num_migrate)
1113 {
1114+ struct net *net = xp_net(pol);
1115 struct xfrm_migrate *mp;
1116- struct dst_entry *dst;
1117+ struct dst_entry *gc_list = NULL, *tail;
1118 int i, j, n = 0;
1119
1120 write_lock_bh(&pol->lock);
1121@@ -2711,15 +2810,25 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
1122 sizeof(pol->xfrm_vec[i].saddr));
1123 pol->xfrm_vec[i].encap_family = mp->new_family;
1124 /* flush bundles */
1125- while ((dst = pol->bundles) != NULL) {
1126- pol->bundles = dst->next;
1127- dst_free(dst);
1128- }
1129+ tail = pol->bundles;
1130+ while (tail->next)
1131+ tail = tail->next;
1132+ tail->next = gc_list;
1133+ gc_list = pol->bundles;
1134+ pol->bundles = NULL;
1135+ atomic_inc(&pol->bundles_genid);
1136 }
1137 }
1138-
1139 write_unlock_bh(&pol->lock);
1140
1141+ flow_cache_flush(&net->xfrm.flow_cache, NULL);
1142+ while (gc_list) {
1143+ struct dst_entry *dst = gc_list;
1144+
1145+ gc_list = dst->next;
1146+ dst_free(dst);
1147+ }
1148+
1149 if (!n)
1150 return -ENODATA;
1151
1152--
11531.7.0.2
1154