Add myloader partition table parser
[openwrt.git] / target / linux / generic-2.6 / patches / 130-netfilter-ipset.patch
1 diff -urN linux-2.6.19.old/include/linux/netfilter_ipv4/ip_set.h linux-2.6.19.dev/include/linux/netfilter_ipv4/ip_set.h
2 --- linux-2.6.19.old/include/linux/netfilter_ipv4/ip_set.h 1970-01-01 01:00:00.000000000 +0100
3 +++ linux-2.6.19.dev/include/linux/netfilter_ipv4/ip_set.h 2006-12-14 03:13:43.000000000 +0100
4 @@ -0,0 +1,489 @@
5 +#ifndef _IP_SET_H
6 +#define _IP_SET_H
7 +
8 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
9 + * Patrick Schaaf <bof@bof.de>
10 + * Martin Josefsson <gandalf@wlug.westbo.se>
11 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
12 + *
13 + * This program is free software; you can redistribute it and/or modify
14 + * it under the terms of the GNU General Public License version 2 as
15 + * published by the Free Software Foundation.
16 + */
17 +
18 +/*
19 + * A sockopt of such quality has hardly ever been seen before on the open
20 + * market! This little beauty, hardly ever used: above 64, so it's
21 + * traditionally used for firewalling, not touched (even once!) by the
22 + * 2.0, 2.2 and 2.4 kernels!
23 + *
24 + * Comes with its own certificate of authenticity, valid anywhere in the
25 + * Free world!
26 + *
27 + * Rusty, 19.4.2000
28 + */
29 +#define SO_IP_SET 83
30 +
31 +/*
32 + * Heavily modify by Joakim Axelsson 08.03.2002
33 + * - Made it more modulebased
34 + *
35 + * Additional heavy modifications by Jozsef Kadlecsik 22.02.2004
36 + * - bindings added
37 + * - in order to "deal with" backward compatibility, renamed to ipset
38 + */
39 +
40 +/*
41 + * Used so that the kernel module and ipset-binary can match their versions
42 + */
43 +#define IP_SET_PROTOCOL_VERSION 2
44 +
45 +#define IP_SET_MAXNAMELEN 32 /* set names and set typenames */
46 +
47 +/* Lets work with our own typedef for representing an IP address.
48 + * We hope to make the code more portable, possibly to IPv6...
49 + *
50 + * The representation works in HOST byte order, because most set types
51 + * will perform arithmetic operations and compare operations.
52 + *
53 + * For now the type is an uint32_t.
54 + *
55 + * Make sure to ONLY use the functions when translating and parsing
56 + * in order to keep the host byte order and make it more portable:
57 + * parse_ip()
58 + * parse_mask()
59 + * parse_ipandmask()
60 + * ip_tostring()
61 + * (Joakim: where are they???)
62 + */
63 +
64 +typedef uint32_t ip_set_ip_t;
65 +
66 +/* Sets are identified by an id in kernel space. Tweak with ip_set_id_t
67 + * and IP_SET_INVALID_ID if you want to increase the max number of sets.
68 + */
69 +typedef uint16_t ip_set_id_t;
70 +
71 +#define IP_SET_INVALID_ID 65535
72 +
73 +/* How deep we follow bindings */
74 +#define IP_SET_MAX_BINDINGS 6
75 +
76 +/*
77 + * Option flags for kernel operations (ipt_set_info)
78 + */
79 +#define IPSET_SRC 0x01 /* Source match/add */
80 +#define IPSET_DST 0x02 /* Destination match/add */
81 +#define IPSET_MATCH_INV 0x04 /* Inverse matching */
82 +
83 +/*
84 + * Set types (flavours)
85 + */
86 +#define IPSET_TYPE_IP 0 /* IP address type of set */
87 +#define IPSET_TYPE_PORT 1 /* Port type of set */
88 +
89 +/* Reserved keywords */
90 +#define IPSET_TOKEN_DEFAULT ":default:"
91 +#define IPSET_TOKEN_ALL ":all:"
92 +
93 +/* SO_IP_SET operation constants, and their request struct types.
94 + *
95 + * Operation ids:
96 + * 0-99: commands with version checking
97 + * 100-199: add/del/test/bind/unbind
98 + * 200-299: list, save, restore
99 + */
100 +
101 +/* Single shot operations:
102 + * version, create, destroy, flush, rename and swap
103 + *
104 + * Sets are identified by name.
105 + */
106 +
107 +#define IP_SET_REQ_STD \
108 + unsigned op; \
109 + unsigned version; \
110 + char name[IP_SET_MAXNAMELEN]
111 +
112 +#define IP_SET_OP_CREATE 0x00000001 /* Create a new (empty) set */
113 +struct ip_set_req_create {
114 + IP_SET_REQ_STD;
115 + char typename[IP_SET_MAXNAMELEN];
116 +};
117 +
118 +#define IP_SET_OP_DESTROY 0x00000002 /* Remove a (empty) set */
119 +struct ip_set_req_std {
120 + IP_SET_REQ_STD;
121 +};
122 +
123 +#define IP_SET_OP_FLUSH 0x00000003 /* Remove all IPs in a set */
124 +/* Uses ip_set_req_std */
125 +
126 +#define IP_SET_OP_RENAME 0x00000004 /* Rename a set */
127 +/* Uses ip_set_req_create */
128 +
129 +#define IP_SET_OP_SWAP 0x00000005 /* Swap two sets */
130 +/* Uses ip_set_req_create */
131 +
132 +union ip_set_name_index {
133 + char name[IP_SET_MAXNAMELEN];
134 + ip_set_id_t index;
135 +};
136 +
137 +#define IP_SET_OP_GET_BYNAME 0x00000006 /* Get set index by name */
138 +struct ip_set_req_get_set {
139 + unsigned op;
140 + unsigned version;
141 + union ip_set_name_index set;
142 +};
143 +
144 +#define IP_SET_OP_GET_BYINDEX 0x00000007 /* Get set name by index */
145 +/* Uses ip_set_req_get_set */
146 +
147 +#define IP_SET_OP_VERSION 0x00000100 /* Ask kernel version */
148 +struct ip_set_req_version {
149 + unsigned op;
150 + unsigned version;
151 +};
152 +
153 +/* Double shots operations:
154 + * add, del, test, bind and unbind.
155 + *
156 + * First we query the kernel to get the index and type of the target set,
157 + * then issue the command. Validity of IP is checked in kernel in order
158 + * to minimalize sockopt operations.
159 + */
160 +
161 +/* Get minimal set data for add/del/test/bind/unbind IP */
162 +#define IP_SET_OP_ADT_GET 0x00000010 /* Get set and type */
163 +struct ip_set_req_adt_get {
164 + unsigned op;
165 + unsigned version;
166 + union ip_set_name_index set;
167 + char typename[IP_SET_MAXNAMELEN];
168 +};
169 +
170 +#define IP_SET_REQ_BYINDEX \
171 + unsigned op; \
172 + ip_set_id_t index;
173 +
174 +struct ip_set_req_adt {
175 + IP_SET_REQ_BYINDEX;
176 +};
177 +
178 +#define IP_SET_OP_ADD_IP 0x00000101 /* Add an IP to a set */
179 +/* Uses ip_set_req_adt, with type specific addage */
180 +
181 +#define IP_SET_OP_DEL_IP 0x00000102 /* Remove an IP from a set */
182 +/* Uses ip_set_req_adt, with type specific addage */
183 +
184 +#define IP_SET_OP_TEST_IP 0x00000103 /* Test an IP in a set */
185 +/* Uses ip_set_req_adt, with type specific addage */
186 +
187 +#define IP_SET_OP_BIND_SET 0x00000104 /* Bind an IP to a set */
188 +/* Uses ip_set_req_bind, with type specific addage */
189 +struct ip_set_req_bind {
190 + IP_SET_REQ_BYINDEX;
191 + char binding[IP_SET_MAXNAMELEN];
192 +};
193 +
194 +#define IP_SET_OP_UNBIND_SET 0x00000105 /* Unbind an IP from a set */
195 +/* Uses ip_set_req_bind, with type speficic addage
196 + * index = 0 means unbinding for all sets */
197 +
198 +#define IP_SET_OP_TEST_BIND_SET 0x00000106 /* Test binding an IP to a set */
199 +/* Uses ip_set_req_bind, with type specific addage */
200 +
201 +/* Multiple shots operations: list, save, restore.
202 + *
203 + * - check kernel version and query the max number of sets
204 + * - get the basic information on all sets
205 + * and size required for the next step
206 + * - get actual set data: header, data, bindings
207 + */
208 +
209 +/* Get max_sets and the index of a queried set
210 + */
211 +#define IP_SET_OP_MAX_SETS 0x00000020
212 +struct ip_set_req_max_sets {
213 + unsigned op;
214 + unsigned version;
215 + ip_set_id_t max_sets; /* max_sets */
216 + ip_set_id_t sets; /* real number of sets */
217 + union ip_set_name_index set; /* index of set if name used */
218 +};
219 +
220 +/* Get the id and name of the sets plus size for next step */
221 +#define IP_SET_OP_LIST_SIZE 0x00000201
222 +#define IP_SET_OP_SAVE_SIZE 0x00000202
223 +struct ip_set_req_setnames {
224 + unsigned op;
225 + ip_set_id_t index; /* set to list/save */
226 + size_t size; /* size to get setdata/bindings */
227 + /* followed by sets number of struct ip_set_name_list */
228 +};
229 +
230 +struct ip_set_name_list {
231 + char name[IP_SET_MAXNAMELEN];
232 + char typename[IP_SET_MAXNAMELEN];
233 + ip_set_id_t index;
234 + ip_set_id_t id;
235 +};
236 +
237 +/* The actual list operation */
238 +#define IP_SET_OP_LIST 0x00000203
239 +struct ip_set_req_list {
240 + IP_SET_REQ_BYINDEX;
241 + /* sets number of struct ip_set_list in reply */
242 +};
243 +
244 +struct ip_set_list {
245 + ip_set_id_t index;
246 + ip_set_id_t binding;
247 + u_int32_t ref;
248 + size_t header_size; /* Set header data of header_size */
249 + size_t members_size; /* Set members data of members_size */
250 + size_t bindings_size; /* Set bindings data of bindings_size */
251 +};
252 +
253 +struct ip_set_hash_list {
254 + ip_set_ip_t ip;
255 + ip_set_id_t binding;
256 +};
257 +
258 +/* The save operation */
259 +#define IP_SET_OP_SAVE 0x00000204
260 +/* Uses ip_set_req_list, in the reply replaced by
261 + * sets number of struct ip_set_save plus a marker
262 + * ip_set_save followed by ip_set_hash_save structures.
263 + */
264 +struct ip_set_save {
265 + ip_set_id_t index;
266 + ip_set_id_t binding;
267 + size_t header_size; /* Set header data of header_size */
268 + size_t members_size; /* Set members data of members_size */
269 +};
270 +
271 +/* At restoring, ip == 0 means default binding for the given set: */
272 +struct ip_set_hash_save {
273 + ip_set_ip_t ip;
274 + ip_set_id_t id;
275 + ip_set_id_t binding;
276 +};
277 +
278 +/* The restore operation */
279 +#define IP_SET_OP_RESTORE 0x00000205
280 +/* Uses ip_set_req_setnames followed by ip_set_restore structures
281 + * plus a marker ip_set_restore, followed by ip_set_hash_save
282 + * structures.
283 + */
284 +struct ip_set_restore {
285 + char name[IP_SET_MAXNAMELEN];
286 + char typename[IP_SET_MAXNAMELEN];
287 + ip_set_id_t index;
288 + size_t header_size; /* Create data of header_size */
289 + size_t members_size; /* Set members data of members_size */
290 +};
291 +
292 +static inline int bitmap_bytes(ip_set_ip_t a, ip_set_ip_t b)
293 +{
294 + return 4 * ((((b - a + 8) / 8) + 3) / 4);
295 +}
296 +
297 +#ifdef __KERNEL__
298 +
299 +#define ip_set_printk(format, args...) \
300 + do { \
301 + printk("%s: %s: ", __FILE__, __FUNCTION__); \
302 + printk(format "\n" , ## args); \
303 + } while (0)
304 +
305 +#if defined(IP_SET_DEBUG)
306 +#define DP(format, args...) \
307 + do { \
308 + printk("%s: %s (DBG): ", __FILE__, __FUNCTION__);\
309 + printk(format "\n" , ## args); \
310 + } while (0)
311 +#define IP_SET_ASSERT(x) \
312 + do { \
313 + if (!(x)) \
314 + printk("IP_SET_ASSERT: %s:%i(%s)\n", \
315 + __FILE__, __LINE__, __FUNCTION__); \
316 + } while (0)
317 +#else
318 +#define DP(format, args...)
319 +#define IP_SET_ASSERT(x)
320 +#endif
321 +
322 +struct ip_set;
323 +
324 +/*
325 + * The ip_set_type definition - one per set type, e.g. "ipmap".
326 + *
327 + * Each individual set has a pointer, set->type, going to one
328 + * of these structures. Function pointers inside the structure implement
329 + * the real behaviour of the sets.
330 + *
331 + * If not mentioned differently, the implementation behind the function
332 + * pointers of a set_type, is expected to return 0 if ok, and a negative
333 + * errno (e.g. -EINVAL) on error.
334 + */
335 +struct ip_set_type {
336 + struct list_head list; /* next in list of set types */
337 +
338 + /* test for IP in set (kernel: iptables -m set src|dst)
339 + * return 0 if not in set, 1 if in set.
340 + */
341 + int (*testip_kernel) (struct ip_set *set,
342 + const struct sk_buff * skb,
343 + u_int32_t flags,
344 + ip_set_ip_t *ip);
345 +
346 + /* test for IP in set (userspace: ipset -T set IP)
347 + * return 0 if not in set, 1 if in set.
348 + */
349 + int (*testip) (struct ip_set *set,
350 + const void *data, size_t size,
351 + ip_set_ip_t *ip);
352 +
353 + /*
354 + * Size of the data structure passed by when
355 + * adding/deletin/testing an entry.
356 + */
357 + size_t reqsize;
358 +
359 + /* Add IP into set (userspace: ipset -A set IP)
360 + * Return -EEXIST if the address is already in the set,
361 + * and -ERANGE if the address lies outside the set bounds.
362 + * If the address was not already in the set, 0 is returned.
363 + */
364 + int (*addip) (struct ip_set *set,
365 + const void *data, size_t size,
366 + ip_set_ip_t *ip);
367 +
368 + /* Add IP into set (kernel: iptables ... -j SET set src|dst)
369 + * Return -EEXIST if the address is already in the set,
370 + * and -ERANGE if the address lies outside the set bounds.
371 + * If the address was not already in the set, 0 is returned.
372 + */
373 + int (*addip_kernel) (struct ip_set *set,
374 + const struct sk_buff * skb,
375 + u_int32_t flags,
376 + ip_set_ip_t *ip);
377 +
378 + /* remove IP from set (userspace: ipset -D set --entry x)
379 + * Return -EEXIST if the address is NOT in the set,
380 + * and -ERANGE if the address lies outside the set bounds.
381 + * If the address really was in the set, 0 is returned.
382 + */
383 + int (*delip) (struct ip_set *set,
384 + const void *data, size_t size,
385 + ip_set_ip_t *ip);
386 +
387 + /* remove IP from set (kernel: iptables ... -j SET --entry x)
388 + * Return -EEXIST if the address is NOT in the set,
389 + * and -ERANGE if the address lies outside the set bounds.
390 + * If the address really was in the set, 0 is returned.
391 + */
392 + int (*delip_kernel) (struct ip_set *set,
393 + const struct sk_buff * skb,
394 + u_int32_t flags,
395 + ip_set_ip_t *ip);
396 +
397 + /* new set creation - allocated type specific items
398 + */
399 + int (*create) (struct ip_set *set,
400 + const void *data, size_t size);
401 +
402 + /* retry the operation after successfully tweaking the set
403 + */
404 + int (*retry) (struct ip_set *set);
405 +
406 + /* set destruction - free type specific items
407 + * There is no return value.
408 + * Can be called only when child sets are destroyed.
409 + */
410 + void (*destroy) (struct ip_set *set);
411 +
412 + /* set flushing - reset all bits in the set, or something similar.
413 + * There is no return value.
414 + */
415 + void (*flush) (struct ip_set *set);
416 +
417 + /* Listing: size needed for header
418 + */
419 + size_t header_size;
420 +
421 + /* Listing: Get the header
422 + *
423 + * Fill in the information in "data".
424 + * This function is always run after list_header_size() under a
425 + * writelock on the set. Therefor is the length of "data" always
426 + * correct.
427 + */
428 + void (*list_header) (const struct ip_set *set,
429 + void *data);
430 +
431 + /* Listing: Get the size for the set members
432 + */
433 + int (*list_members_size) (const struct ip_set *set);
434 +
435 + /* Listing: Get the set members
436 + *
437 + * Fill in the information in "data".
438 + * This function is always run after list_member_size() under a
439 + * writelock on the set. Therefor is the length of "data" always
440 + * correct.
441 + */
442 + void (*list_members) (const struct ip_set *set,
443 + void *data);
444 +
445 + char typename[IP_SET_MAXNAMELEN];
446 + char typecode;
447 + int protocol_version;
448 +
449 + /* Set this to THIS_MODULE if you are a module, otherwise NULL */
450 + struct module *me;
451 +};
452 +
453 +extern int ip_set_register_set_type(struct ip_set_type *set_type);
454 +extern void ip_set_unregister_set_type(struct ip_set_type *set_type);
455 +
456 +/* A generic ipset */
457 +struct ip_set {
458 + char name[IP_SET_MAXNAMELEN]; /* the name of the set */
459 + rwlock_t lock; /* lock for concurrency control */
460 + ip_set_id_t id; /* set id for swapping */
461 + ip_set_id_t binding; /* default binding for the set */
462 + atomic_t ref; /* in kernel and in hash references */
463 + struct ip_set_type *type; /* the set types */
464 + void *data; /* pooltype specific data */
465 +};
466 +
467 +/* Structure to bind set elements to sets */
468 +struct ip_set_hash {
469 + struct list_head list; /* list of clashing entries in hash */
470 + ip_set_ip_t ip; /* ip from set */
471 + ip_set_id_t id; /* set id */
472 + ip_set_id_t binding; /* set we bind the element to */
473 +};
474 +
475 +/* register and unregister set references */
476 +extern ip_set_id_t ip_set_get_byname(const char name[IP_SET_MAXNAMELEN]);
477 +extern ip_set_id_t ip_set_get_byindex(ip_set_id_t id);
478 +extern void ip_set_put(ip_set_id_t id);
479 +
480 +/* API for iptables set match, and SET target */
481 +extern void ip_set_addip_kernel(ip_set_id_t id,
482 + const struct sk_buff *skb,
483 + const u_int32_t *flags);
484 +extern void ip_set_delip_kernel(ip_set_id_t id,
485 + const struct sk_buff *skb,
486 + const u_int32_t *flags);
487 +extern int ip_set_testip_kernel(ip_set_id_t id,
488 + const struct sk_buff *skb,
489 + const u_int32_t *flags);
490 +
491 +#endif /* __KERNEL__ */
492 +
493 +#endif /*_IP_SET_H*/
494 diff -urN linux-2.6.19.old/include/linux/netfilter_ipv4/ip_set_iphash.h linux-2.6.19.dev/include/linux/netfilter_ipv4/ip_set_iphash.h
495 --- linux-2.6.19.old/include/linux/netfilter_ipv4/ip_set_iphash.h 1970-01-01 01:00:00.000000000 +0100
496 +++ linux-2.6.19.dev/include/linux/netfilter_ipv4/ip_set_iphash.h 2006-12-14 03:13:43.000000000 +0100
497 @@ -0,0 +1,30 @@
498 +#ifndef __IP_SET_IPHASH_H
499 +#define __IP_SET_IPHASH_H
500 +
501 +#include <linux/netfilter_ipv4/ip_set.h>
502 +
503 +#define SETTYPE_NAME "iphash"
504 +#define MAX_RANGE 0x0000FFFF
505 +
506 +struct ip_set_iphash {
507 + ip_set_ip_t *members; /* the iphash proper */
508 + uint32_t initval; /* initval for jhash_1word */
509 + uint32_t prime; /* prime for double hashing */
510 + uint32_t hashsize; /* hash size */
511 + uint16_t probes; /* max number of probes */
512 + uint16_t resize; /* resize factor in percent */
513 + ip_set_ip_t netmask; /* netmask */
514 +};
515 +
516 +struct ip_set_req_iphash_create {
517 + uint32_t hashsize;
518 + uint16_t probes;
519 + uint16_t resize;
520 + ip_set_ip_t netmask;
521 +};
522 +
523 +struct ip_set_req_iphash {
524 + ip_set_ip_t ip;
525 +};
526 +
527 +#endif /* __IP_SET_IPHASH_H */
528 diff -urN linux-2.6.19.old/include/linux/netfilter_ipv4/ip_set_ipmap.h linux-2.6.19.dev/include/linux/netfilter_ipv4/ip_set_ipmap.h
529 --- linux-2.6.19.old/include/linux/netfilter_ipv4/ip_set_ipmap.h 1970-01-01 01:00:00.000000000 +0100
530 +++ linux-2.6.19.dev/include/linux/netfilter_ipv4/ip_set_ipmap.h 2006-12-14 03:13:43.000000000 +0100
531 @@ -0,0 +1,56 @@
532 +#ifndef __IP_SET_IPMAP_H
533 +#define __IP_SET_IPMAP_H
534 +
535 +#include <linux/netfilter_ipv4/ip_set.h>
536 +
537 +#define SETTYPE_NAME "ipmap"
538 +#define MAX_RANGE 0x0000FFFF
539 +
540 +struct ip_set_ipmap {
541 + void *members; /* the ipmap proper */
542 + ip_set_ip_t first_ip; /* host byte order, included in range */
543 + ip_set_ip_t last_ip; /* host byte order, included in range */
544 + ip_set_ip_t netmask; /* subnet netmask */
545 + ip_set_ip_t sizeid; /* size of set in IPs */
546 + u_int16_t hosts; /* number of hosts in a subnet */
547 +};
548 +
549 +struct ip_set_req_ipmap_create {
550 + ip_set_ip_t from;
551 + ip_set_ip_t to;
552 + ip_set_ip_t netmask;
553 +};
554 +
555 +struct ip_set_req_ipmap {
556 + ip_set_ip_t ip;
557 +};
558 +
559 +unsigned int
560 +mask_to_bits(ip_set_ip_t mask)
561 +{
562 + unsigned int bits = 32;
563 + ip_set_ip_t maskaddr;
564 +
565 + if (mask == 0xFFFFFFFF)
566 + return bits;
567 +
568 + maskaddr = 0xFFFFFFFE;
569 + while (--bits >= 0 && maskaddr != mask)
570 + maskaddr <<= 1;
571 +
572 + return bits;
573 +}
574 +
575 +ip_set_ip_t
576 +range_to_mask(ip_set_ip_t from, ip_set_ip_t to, unsigned int *bits)
577 +{
578 + ip_set_ip_t mask = 0xFFFFFFFE;
579 +
580 + *bits = 32;
581 + while (--(*bits) >= 0 && mask && (to & mask) != from)
582 + mask <<= 1;
583 +
584 + return mask;
585 +}
586 +
587 +#endif /* __IP_SET_IPMAP_H */
588 diff -urN linux-2.6.19.old/include/linux/netfilter_ipv4/ip_set_iptree.h linux-2.6.19.dev/include/linux/netfilter_ipv4/ip_set_iptree.h
589 --- linux-2.6.19.old/include/linux/netfilter_ipv4/ip_set_iptree.h 1970-01-01 01:00:00.000000000 +0100
590 +++ linux-2.6.19.dev/include/linux/netfilter_ipv4/ip_set_iptree.h 2006-12-14 03:13:43.000000000 +0100
591 @@ -0,0 +1,39 @@
592 +#ifndef __IP_SET_IPTREE_H
593 +#define __IP_SET_IPTREE_H
594 +
595 +#include <linux/netfilter_ipv4/ip_set.h>
596 +
597 +#define SETTYPE_NAME "iptree"
598 +#define MAX_RANGE 0x0000FFFF
599 +
600 +struct ip_set_iptreed {
601 + unsigned long expires[255]; /* x.x.x.ADDR */
602 +};
603 +
604 +struct ip_set_iptreec {
605 + struct ip_set_iptreed *tree[255]; /* x.x.ADDR.* */
606 +};
607 +
608 +struct ip_set_iptreeb {
609 + struct ip_set_iptreec *tree[255]; /* x.ADDR.*.* */
610 +};
611 +
612 +struct ip_set_iptree {
613 + unsigned int timeout;
614 + unsigned int gc_interval;
615 +#ifdef __KERNEL__
616 + struct timer_list gc;
617 + struct ip_set_iptreeb *tree[255]; /* ADDR.*.*.* */
618 +#endif
619 +};
620 +
621 +struct ip_set_req_iptree_create {
622 + unsigned int timeout;
623 +};
624 +
625 +struct ip_set_req_iptree {
626 + ip_set_ip_t ip;
627 + unsigned int timeout;
628 +};
629 +
630 +#endif /* __IP_SET_IPTREE_H */
631 diff -urN linux-2.6.19.old/include/linux/netfilter_ipv4/ip_set_jhash.h linux-2.6.19.dev/include/linux/netfilter_ipv4/ip_set_jhash.h
632 --- linux-2.6.19.old/include/linux/netfilter_ipv4/ip_set_jhash.h 1970-01-01 01:00:00.000000000 +0100
633 +++ linux-2.6.19.dev/include/linux/netfilter_ipv4/ip_set_jhash.h 2006-12-14 03:13:43.000000000 +0100
634 @@ -0,0 +1,148 @@
635 +#ifndef _LINUX_IPSET_JHASH_H
636 +#define _LINUX_IPSET_JHASH_H
637 +
638 +/* This is a copy of linux/jhash.h but the types u32/u8 are changed
639 + * to __u32/__u8 so that the header file can be included into
640 + * userspace code as well. Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
641 + */
642 +
643 +/* jhash.h: Jenkins hash support.
644 + *
645 + * Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net)
646 + *
647 + * http://burtleburtle.net/bob/hash/
648 + *
649 + * These are the credits from Bob's sources:
650 + *
651 + * lookup2.c, by Bob Jenkins, December 1996, Public Domain.
652 + * hash(), hash2(), hash3, and mix() are externally useful functions.
653 + * Routines to test the hash are included if SELF_TEST is defined.
654 + * You can use this free for any purpose. It has no warranty.
655 + *
656 + * Copyright (C) 2003 David S. Miller (davem@redhat.com)
657 + *
658 + * I've modified Bob's hash to be useful in the Linux kernel, and
659 + * any bugs present are surely my fault. -DaveM
660 + */
661 +
662 +/* NOTE: Arguments are modified. */
663 +#define __jhash_mix(a, b, c) \
664 +{ \
665 + a -= b; a -= c; a ^= (c>>13); \
666 + b -= c; b -= a; b ^= (a<<8); \
667 + c -= a; c -= b; c ^= (b>>13); \
668 + a -= b; a -= c; a ^= (c>>12); \
669 + b -= c; b -= a; b ^= (a<<16); \
670 + c -= a; c -= b; c ^= (b>>5); \
671 + a -= b; a -= c; a ^= (c>>3); \
672 + b -= c; b -= a; b ^= (a<<10); \
673 + c -= a; c -= b; c ^= (b>>15); \
674 +}
675 +
676 +/* The golden ration: an arbitrary value */
677 +#define JHASH_GOLDEN_RATIO 0x9e3779b9
678 +
679 +/* The most generic version, hashes an arbitrary sequence
680 + * of bytes. No alignment or length assumptions are made about
681 + * the input key.
682 + */
683 +static inline __u32 jhash(void *key, __u32 length, __u32 initval)
684 +{
685 + __u32 a, b, c, len;
686 + __u8 *k = key;
687 +
688 + len = length;
689 + a = b = JHASH_GOLDEN_RATIO;
690 + c = initval;
691 +
692 + while (len >= 12) {
693 + a += (k[0] +((__u32)k[1]<<8) +((__u32)k[2]<<16) +((__u32)k[3]<<24));
694 + b += (k[4] +((__u32)k[5]<<8) +((__u32)k[6]<<16) +((__u32)k[7]<<24));
695 + c += (k[8] +((__u32)k[9]<<8) +((__u32)k[10]<<16)+((__u32)k[11]<<24));
696 +
697 + __jhash_mix(a,b,c);
698 +
699 + k += 12;
700 + len -= 12;
701 + }
702 +
703 + c += length;
704 + switch (len) {
705 + case 11: c += ((__u32)k[10]<<24);
706 + case 10: c += ((__u32)k[9]<<16);
707 + case 9 : c += ((__u32)k[8]<<8);
708 + case 8 : b += ((__u32)k[7]<<24);
709 + case 7 : b += ((__u32)k[6]<<16);
710 + case 6 : b += ((__u32)k[5]<<8);
711 + case 5 : b += k[4];
712 + case 4 : a += ((__u32)k[3]<<24);
713 + case 3 : a += ((__u32)k[2]<<16);
714 + case 2 : a += ((__u32)k[1]<<8);
715 + case 1 : a += k[0];
716 + };
717 +
718 + __jhash_mix(a,b,c);
719 +
720 + return c;
721 +}
722 +
723 +/* A special optimized version that handles 1 or more of __u32s.
724 + * The length parameter here is the number of __u32s in the key.
725 + */
726 +static inline __u32 jhash2(__u32 *k, __u32 length, __u32 initval)
727 +{
728 + __u32 a, b, c, len;
729 +
730 + a = b = JHASH_GOLDEN_RATIO;
731 + c = initval;
732 + len = length;
733 +
734 + while (len >= 3) {
735 + a += k[0];
736 + b += k[1];
737 + c += k[2];
738 + __jhash_mix(a, b, c);
739 + k += 3; len -= 3;
740 + }
741 +
742 + c += length * 4;
743 +
744 + switch (len) {
745 + case 2 : b += k[1];
746 + case 1 : a += k[0];
747 + };
748 +
749 + __jhash_mix(a,b,c);
750 +
751 + return c;
752 +}
753 +
754 +
755 +/* A special ultra-optimized versions that knows they are hashing exactly
756 + * 3, 2 or 1 word(s).
757 + *
758 + * NOTE: In partilar the "c += length; __jhash_mix(a,b,c);" normally
759 + * done at the end is not done here.
760 + */
761 +static inline __u32 jhash_3words(__u32 a, __u32 b, __u32 c, __u32 initval)
762 +{
763 + a += JHASH_GOLDEN_RATIO;
764 + b += JHASH_GOLDEN_RATIO;
765 + c += initval;
766 +
767 + __jhash_mix(a, b, c);
768 +
769 + return c;
770 +}
771 +
772 +static inline __u32 jhash_2words(__u32 a, __u32 b, __u32 initval)
773 +{
774 + return jhash_3words(a, b, 0, initval);
775 +}
776 +
777 +static inline __u32 jhash_1word(__u32 a, __u32 initval)
778 +{
779 + return jhash_3words(a, 0, 0, initval);
780 +}
781 +
782 +#endif /* _LINUX_IPSET_JHASH_H */
783 diff -urN linux-2.6.19.old/include/linux/netfilter_ipv4/ip_set_macipmap.h linux-2.6.19.dev/include/linux/netfilter_ipv4/ip_set_macipmap.h
784 --- linux-2.6.19.old/include/linux/netfilter_ipv4/ip_set_macipmap.h 1970-01-01 01:00:00.000000000 +0100
785 +++ linux-2.6.19.dev/include/linux/netfilter_ipv4/ip_set_macipmap.h 2006-12-14 03:13:43.000000000 +0100
786 @@ -0,0 +1,38 @@
787 +#ifndef __IP_SET_MACIPMAP_H
788 +#define __IP_SET_MACIPMAP_H
789 +
790 +#include <linux/netfilter_ipv4/ip_set.h>
791 +
792 +#define SETTYPE_NAME "macipmap"
793 +#define MAX_RANGE 0x0000FFFF
794 +
795 +/* general flags */
796 +#define IPSET_MACIP_MATCHUNSET 1
797 +
798 +/* per ip flags */
799 +#define IPSET_MACIP_ISSET 1
800 +
801 +struct ip_set_macipmap {
802 + void *members; /* the macipmap proper */
803 + ip_set_ip_t first_ip; /* host byte order, included in range */
804 + ip_set_ip_t last_ip; /* host byte order, included in range */
805 + u_int32_t flags;
806 +};
807 +
808 +struct ip_set_req_macipmap_create {
809 + ip_set_ip_t from;
810 + ip_set_ip_t to;
811 + u_int32_t flags;
812 +};
813 +
814 +struct ip_set_req_macipmap {
815 + ip_set_ip_t ip;
816 + unsigned char ethernet[ETH_ALEN];
817 +};
818 +
819 +struct ip_set_macip {
820 + unsigned short flags;
821 + unsigned char ethernet[ETH_ALEN];
822 +};
823 +
824 +#endif /* __IP_SET_MACIPMAP_H */
825 diff -urN linux-2.6.19.old/include/linux/netfilter_ipv4/ip_set_malloc.h linux-2.6.19.dev/include/linux/netfilter_ipv4/ip_set_malloc.h
826 --- linux-2.6.19.old/include/linux/netfilter_ipv4/ip_set_malloc.h 1970-01-01 01:00:00.000000000 +0100
827 +++ linux-2.6.19.dev/include/linux/netfilter_ipv4/ip_set_malloc.h 2006-12-14 03:13:43.000000000 +0100
828 @@ -0,0 +1,42 @@
829 +#ifndef _IP_SET_MALLOC_H
830 +#define _IP_SET_MALLOC_H
831 +
832 +#ifdef __KERNEL__
833 +
834 +/* Memory allocation and deallocation */
835 +static size_t max_malloc_size = 0;
836 +
837 +static inline void init_max_malloc_size(void)
838 +{
839 +#define CACHE(x) max_malloc_size = x;
840 +#include <linux/kmalloc_sizes.h>
841 +#undef CACHE
842 +}
843 +
844 +static inline void * ip_set_malloc_atomic(size_t bytes)
845 +{
846 + if (bytes > max_malloc_size)
847 + return __vmalloc(bytes, GFP_ATOMIC, PAGE_KERNEL);
848 + else
849 + return kmalloc(bytes, GFP_ATOMIC);
850 +}
851 +
852 +static inline void * ip_set_malloc(size_t bytes)
853 +{
854 + if (bytes > max_malloc_size)
855 + return vmalloc(bytes);
856 + else
857 + return kmalloc(bytes, GFP_KERNEL);
858 +}
859 +
860 +static inline void ip_set_free(void * data, size_t bytes)
861 +{
862 + if (bytes > max_malloc_size)
863 + vfree(data);
864 + else
865 + kfree(data);
866 +}
867 +
868 +#endif /* __KERNEL__ */
869 +
870 +#endif /*_IP_SET_MALLOC_H*/
871 diff -urN linux-2.6.19.old/include/linux/netfilter_ipv4/ip_set_nethash.h linux-2.6.19.dev/include/linux/netfilter_ipv4/ip_set_nethash.h
872 --- linux-2.6.19.old/include/linux/netfilter_ipv4/ip_set_nethash.h 1970-01-01 01:00:00.000000000 +0100
873 +++ linux-2.6.19.dev/include/linux/netfilter_ipv4/ip_set_nethash.h 2006-12-14 03:13:43.000000000 +0100
874 @@ -0,0 +1,55 @@
875 +#ifndef __IP_SET_NETHASH_H
876 +#define __IP_SET_NETHASH_H
877 +
878 +#include <linux/netfilter_ipv4/ip_set.h>
879 +
880 +#define SETTYPE_NAME "nethash"
881 +#define MAX_RANGE 0x0000FFFF
882 +
883 +struct ip_set_nethash {
884 + ip_set_ip_t *members; /* the nethash proper */
885 + uint32_t initval; /* initval for jhash_1word */
886 + uint32_t prime; /* prime for double hashing */
887 + uint32_t hashsize; /* hash size */
888 + uint16_t probes; /* max number of probes */
889 + uint16_t resize; /* resize factor in percent */
890 + unsigned char cidr[30]; /* CIDR sizes */
891 +};
892 +
893 +struct ip_set_req_nethash_create {
894 + uint32_t hashsize;
895 + uint16_t probes;
896 + uint16_t resize;
897 +};
898 +
899 +struct ip_set_req_nethash {
900 + ip_set_ip_t ip;
901 + unsigned char cidr;
902 +};
903 +
904 +static unsigned char shifts[] = {255, 253, 249, 241, 225, 193, 129, 1};
905 +
906 +static inline ip_set_ip_t
907 +pack(ip_set_ip_t ip, unsigned char cidr)
908 +{
909 + ip_set_ip_t addr, *paddr = &addr;
910 + unsigned char n, t, *a;
911 +
912 + addr = htonl(ip & (0xFFFFFFFF << (32 - (cidr))));
913 +#ifdef __KERNEL__
914 + DP("ip:%u.%u.%u.%u/%u", NIPQUAD(addr), cidr);
915 +#endif
916 + n = cidr / 8;
917 + t = cidr % 8;
918 + a = &((unsigned char *)paddr)[n];
919 + *a = *a /(1 << (8 - t)) + shifts[t];
920 +#ifdef __KERNEL__
921 + DP("n: %u, t: %u, a: %u", n, t, *a);
922 + DP("ip:%u.%u.%u.%u/%u, %u.%u.%u.%u",
923 + HIPQUAD(ip), cidr, NIPQUAD(addr));
924 +#endif
925 +
926 + return ntohl(addr);
927 +}
928 +
929 +#endif /* __IP_SET_NETHASH_H */
930 diff -urN linux-2.6.19.old/include/linux/netfilter_ipv4/ip_set_portmap.h linux-2.6.19.dev/include/linux/netfilter_ipv4/ip_set_portmap.h
931 --- linux-2.6.19.old/include/linux/netfilter_ipv4/ip_set_portmap.h 1970-01-01 01:00:00.000000000 +0100
932 +++ linux-2.6.19.dev/include/linux/netfilter_ipv4/ip_set_portmap.h 2006-12-14 03:13:43.000000000 +0100
933 @@ -0,0 +1,25 @@
934 +#ifndef __IP_SET_PORTMAP_H
935 +#define __IP_SET_PORTMAP_H
936 +
937 +#include <linux/netfilter_ipv4/ip_set.h>
938 +
939 +#define SETTYPE_NAME "portmap"
940 +#define MAX_RANGE 0x0000FFFF
941 +#define INVALID_PORT (MAX_RANGE + 1)
942 +
943 +struct ip_set_portmap {
944 + void *members; /* the portmap proper */
945 + ip_set_ip_t first_port; /* host byte order, included in range */
946 + ip_set_ip_t last_port; /* host byte order, included in range */
947 +};
948 +
949 +struct ip_set_req_portmap_create {
950 + ip_set_ip_t from;
951 + ip_set_ip_t to;
952 +};
953 +
954 +struct ip_set_req_portmap {
955 + ip_set_ip_t port;
956 +};
957 +
958 +#endif /* __IP_SET_PORTMAP_H */
959 diff -urN linux-2.6.19.old/include/linux/netfilter_ipv4/ip_set_prime.h linux-2.6.19.dev/include/linux/netfilter_ipv4/ip_set_prime.h
960 --- linux-2.6.19.old/include/linux/netfilter_ipv4/ip_set_prime.h 1970-01-01 01:00:00.000000000 +0100
961 +++ linux-2.6.19.dev/include/linux/netfilter_ipv4/ip_set_prime.h 2006-12-14 03:13:43.000000000 +0100
962 @@ -0,0 +1,34 @@
963 +#ifndef __IP_SET_PRIME_H
964 +#define __IP_SET_PRIME_H
965 +
966 +static inline unsigned make_prime_bound(unsigned nr)
967 +{
968 + unsigned long long nr64 = nr;
969 + unsigned long long x = 1;
970 + nr = 1;
971 + while (x <= nr64) { x <<= 2; nr <<= 1; }
972 + return nr;
973 +}
974 +
975 +static inline int make_prime_check(unsigned nr)
976 +{
977 + unsigned x = 3;
978 + unsigned b = make_prime_bound(nr);
979 + while (x <= b) {
980 + if (0 == (nr % x)) return 0;
981 + x += 2;
982 + }
983 + return 1;
984 +}
985 +
986 +static unsigned make_prime(unsigned nr)
987 +{
988 + if (0 == (nr & 1)) nr--;
989 + while (nr > 1) {
990 + if (make_prime_check(nr)) return nr;
991 + nr -= 2;
992 + }
993 + return 2;
994 +}
995 +
996 +#endif /* __IP_SET_PRIME_H */
997 diff -urN linux-2.6.19.old/include/linux/netfilter_ipv4/ipt_set.h linux-2.6.19.dev/include/linux/netfilter_ipv4/ipt_set.h
998 --- linux-2.6.19.old/include/linux/netfilter_ipv4/ipt_set.h 1970-01-01 01:00:00.000000000 +0100
999 +++ linux-2.6.19.dev/include/linux/netfilter_ipv4/ipt_set.h 2006-12-14 03:13:43.000000000 +0100
1000 @@ -0,0 +1,21 @@
1001 +#ifndef _IPT_SET_H
1002 +#define _IPT_SET_H
1003 +
1004 +#include <linux/netfilter_ipv4/ip_set.h>
1005 +
1006 +struct ipt_set_info {
1007 + ip_set_id_t index;
1008 + u_int32_t flags[IP_SET_MAX_BINDINGS + 1];
1009 +};
1010 +
1011 +/* match info */
1012 +struct ipt_set_info_match {
1013 + struct ipt_set_info match_set;
1014 +};
1015 +
1016 +struct ipt_set_info_target {
1017 + struct ipt_set_info add_set;
1018 + struct ipt_set_info del_set;
1019 +};
1020 +
1021 +#endif /*_IPT_SET_H*/
1022 diff -urN linux-2.6.19.old/include/linux/netfilter_ipv4/listhelp.h linux-2.6.19.dev/include/linux/netfilter_ipv4/listhelp.h
1023 --- linux-2.6.19.old/include/linux/netfilter_ipv4/listhelp.h 1970-01-01 01:00:00.000000000 +0100
1024 +++ linux-2.6.19.dev/include/linux/netfilter_ipv4/listhelp.h 2006-12-14 03:13:43.000000000 +0100
1025 @@ -0,0 +1,123 @@
1026 +#ifndef _LISTHELP_H
1027 +#define _LISTHELP_H
1028 +#include <linux/list.h>
1029 +
1030 +/* Header to do more comprehensive job than linux/list.h; assume list
1031 + is first entry in structure. */
1032 +
1033 +/* Return pointer to first true entry, if any, or NULL. A macro
1034 + required to allow inlining of cmpfn. */
1035 +#define LIST_FIND(head, cmpfn, type, args...) \
1036 +({ \
1037 + const struct list_head *__i, *__j = NULL; \
1038 + \
1039 + ASSERT_READ_LOCK(head); \
1040 + list_for_each(__i, (head)) \
1041 + if (cmpfn((const type)__i , ## args)) { \
1042 + __j = __i; \
1043 + break; \
1044 + } \
1045 + (type)__j; \
1046 +})
1047 +
1048 +#define LIST_FIND_W(head, cmpfn, type, args...) \
1049 +({ \
1050 + const struct list_head *__i, *__j = NULL; \
1051 + \
1052 + ASSERT_WRITE_LOCK(head); \
1053 + list_for_each(__i, (head)) \
1054 + if (cmpfn((type)__i , ## args)) { \
1055 + __j = __i; \
1056 + break; \
1057 + } \
1058 + (type)__j; \
1059 +})
1060 +
1061 +/* Just like LIST_FIND but we search backwards */
1062 +#define LIST_FIND_B(head, cmpfn, type, args...) \
1063 +({ \
1064 + const struct list_head *__i, *__j = NULL; \
1065 + \
1066 + ASSERT_READ_LOCK(head); \
1067 + list_for_each_prev(__i, (head)) \
1068 + if (cmpfn((const type)__i , ## args)) { \
1069 + __j = __i; \
1070 + break; \
1071 + } \
1072 + (type)__j; \
1073 +})
1074 +
1075 +static inline int
1076 +__list_cmp_same(const void *p1, const void *p2) { return p1 == p2; }
1077 +
1078 +/* Is this entry in the list? */
1079 +static inline int
1080 +list_inlist(struct list_head *head, const void *entry)
1081 +{
1082 + return LIST_FIND(head, __list_cmp_same, void *, entry) != NULL;
1083 +}
1084 +
1085 +/* Delete from list. */
1086 +#ifdef CONFIG_NETFILTER_DEBUG
1087 +#define LIST_DELETE(head, oldentry) \
1088 +do { \
1089 + ASSERT_WRITE_LOCK(head); \
1090 + if (!list_inlist(head, oldentry)) \
1091 + printk("LIST_DELETE: %s:%u `%s'(%p) not in %s.\n", \
1092 + __FILE__, __LINE__, #oldentry, oldentry, #head); \
1093 + else list_del((struct list_head *)oldentry); \
1094 +} while(0)
1095 +#else
1096 +#define LIST_DELETE(head, oldentry) list_del((struct list_head *)oldentry)
1097 +#endif
1098 +
1099 +/* Append. */
1100 +static inline void
1101 +list_append(struct list_head *head, void *new)
1102 +{
1103 + ASSERT_WRITE_LOCK(head);
1104 + list_add((new), (head)->prev);
1105 +}
1106 +
1107 +/* Prepend. */
1108 +static inline void
1109 +list_prepend(struct list_head *head, void *new)
1110 +{
1111 + ASSERT_WRITE_LOCK(head);
1112 + list_add(new, head);
1113 +}
1114 +
1115 +/* Insert according to ordering function; insert before first true. */
1116 +#define LIST_INSERT(head, new, cmpfn) \
1117 +do { \
1118 + struct list_head *__i; \
1119 + ASSERT_WRITE_LOCK(head); \
1120 + list_for_each(__i, (head)) \
1121 + if ((new), (typeof (new))__i) \
1122 + break; \
1123 + list_add((struct list_head *)(new), __i->prev); \
1124 +} while(0)
1125 +
1126 +/* If the field after the list_head is a nul-terminated string, you
1127 + can use these functions. */
1128 +static inline int __list_cmp_name(const void *i, const char *name)
1129 +{
1130 + return strcmp(name, i+sizeof(struct list_head)) == 0;
1131 +}
1132 +
1133 +/* Returns false if same name already in list, otherwise does insert. */
1134 +static inline int
1135 +list_named_insert(struct list_head *head, void *new)
1136 +{
1137 + if (LIST_FIND(head, __list_cmp_name, void *,
1138 + new + sizeof(struct list_head)))
1139 + return 0;
1140 + list_prepend(head, new);
1141 + return 1;
1142 +}
1143 +
1144 +/* Find this named element in the list. */
1145 +#define list_named_find(head, name) \
1146 +LIST_FIND(head, __list_cmp_name, void *, name)
1147 +
1148 +#endif /*_LISTHELP_H*/
1149 diff -urN linux-2.6.19.old/net/ipv4/netfilter/ip_set.c linux-2.6.19.dev/net/ipv4/netfilter/ip_set.c
1150 --- linux-2.6.19.old/net/ipv4/netfilter/ip_set.c 1970-01-01 01:00:00.000000000 +0100
1151 +++ linux-2.6.19.dev/net/ipv4/netfilter/ip_set.c 2006-12-14 03:13:43.000000000 +0100
1152 @@ -0,0 +1,1989 @@
1153 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
1154 + * Patrick Schaaf <bof@bof.de>
1155 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
1156 + *
1157 + * This program is free software; you can redistribute it and/or modify
1158 + * it under the terms of the GNU General Public License version 2 as
1159 + * published by the Free Software Foundation.
1160 + */
1161 +
1162 +/* Kernel module for IP set management */
1163 +
1164 +#include <linux/autoconf.h>
1165 +#include <linux/module.h>
1166 +#include <linux/moduleparam.h>
1167 +#include <linux/kmod.h>
1168 +#include <linux/ip.h>
1169 +#include <linux/skbuff.h>
1170 +#include <linux/random.h>
1171 +#include <linux/jhash.h>
1172 +#include <linux/netfilter_ipv4/ip_tables.h>
1173 +#include <linux/errno.h>
1174 +#include <asm/uaccess.h>
1175 +#include <asm/bitops.h>
1176 +#include <asm/semaphore.h>
1177 +#include <linux/spinlock.h>
1178 +#include <linux/vmalloc.h>
1179 +
1180 +#define ASSERT_READ_LOCK(x) /* dont use that */
1181 +#define ASSERT_WRITE_LOCK(x)
1182 +#include <linux/netfilter_ipv4/listhelp.h>
1183 +#include <linux/netfilter_ipv4/ip_set.h>
1184 +
1185 +static struct list_head set_type_list; /* all registered sets */
1186 +static struct ip_set **ip_set_list; /* all individual sets */
1187 +static DEFINE_RWLOCK(ip_set_lock); /* protects the lists and the hash */
1188 +static DECLARE_MUTEX(ip_set_app_mutex); /* serializes user access */
1189 +static ip_set_id_t ip_set_max = CONFIG_IP_NF_SET_MAX;
1190 +static ip_set_id_t ip_set_bindings_hash_size = CONFIG_IP_NF_SET_HASHSIZE;
1191 +static struct list_head *ip_set_hash; /* hash of bindings */
1192 +static unsigned int ip_set_hash_random; /* random seed */
1193 +
1194 +/*
1195 + * Sets are identified either by the index in ip_set_list or by id.
1196 + * The id never changes and is used to find a key in the hash.
1197 + * The index may change by swapping and used at all other places
1198 + * (set/SET netfilter modules, binding value, etc.)
1199 + *
1200 + * Userspace requests are serialized by ip_set_mutex and sets can
1201 + * be deleted only from userspace. Therefore ip_set_list locking
1202 + * must obey the following rules:
1203 + *
1204 + * - kernel requests: read and write locking mandatory
1205 + * - user requests: read locking optional, write locking mandatory
1206 + */
1207 +
1208 +static inline void
1209 +__ip_set_get(ip_set_id_t index)
1210 +{
1211 + atomic_inc(&ip_set_list[index]->ref);
1212 +}
1213 +
1214 +static inline void
1215 +__ip_set_put(ip_set_id_t index)
1216 +{
1217 + atomic_dec(&ip_set_list[index]->ref);
1218 +}
1219 +
1220 +/*
1221 + * Binding routines
1222 + */
1223 +
1224 +static inline int
1225 +ip_hash_cmp(const struct ip_set_hash *set_hash,
1226 + ip_set_id_t id, ip_set_ip_t ip)
1227 +{
1228 + return set_hash->id == id && set_hash->ip == ip;
1229 +}
1230 +
1231 +static ip_set_id_t
1232 +ip_set_find_in_hash(ip_set_id_t id, ip_set_ip_t ip)
1233 +{
1234 + u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
1235 + % ip_set_bindings_hash_size;
1236 + struct ip_set_hash *set_hash;
1237 +
1238 + ASSERT_READ_LOCK(&ip_set_lock);
1239 + IP_SET_ASSERT(ip_set_list[id]);
1240 + DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
1241 +
1242 + set_hash = LIST_FIND(&ip_set_hash[key], ip_hash_cmp,
1243 + struct ip_set_hash *, id, ip);
1244 +
1245 + DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
1246 + HIPQUAD(ip),
1247 + set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
1248 +
1249 + return (set_hash != NULL ? set_hash->binding : IP_SET_INVALID_ID);
1250 +}
1251 +
1252 +static inline void
1253 +__set_hash_del(struct ip_set_hash *set_hash)
1254 +{
1255 + ASSERT_WRITE_LOCK(&ip_set_lock);
1256 + IP_SET_ASSERT(ip_set_list[set_hash->binding]);
1257 +
1258 + __ip_set_put(set_hash->binding);
1259 + list_del(&set_hash->list);
1260 + kfree(set_hash);
1261 +}
1262 +
1263 +static int
1264 +ip_set_hash_del(ip_set_id_t id, ip_set_ip_t ip)
1265 +{
1266 + u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
1267 + % ip_set_bindings_hash_size;
1268 + struct ip_set_hash *set_hash;
1269 +
1270 + IP_SET_ASSERT(ip_set_list[id]);
1271 + DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
1272 + write_lock_bh(&ip_set_lock);
1273 + set_hash = LIST_FIND(&ip_set_hash[key], ip_hash_cmp,
1274 + struct ip_set_hash *, id, ip);
1275 + DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
1276 + HIPQUAD(ip),
1277 + set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
1278 +
1279 + if (set_hash != NULL)
1280 + __set_hash_del(set_hash);
1281 + write_unlock_bh(&ip_set_lock);
1282 + return 0;
1283 +}
1284 +
1285 +static int
1286 +ip_set_hash_add(ip_set_id_t id, ip_set_ip_t ip, ip_set_id_t binding)
1287 +{
1288 + u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
1289 + % ip_set_bindings_hash_size;
1290 + struct ip_set_hash *set_hash;
1291 + int ret = 0;
1292 +
1293 + IP_SET_ASSERT(ip_set_list[id]);
1294 + IP_SET_ASSERT(ip_set_list[binding]);
1295 + DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
1296 + HIPQUAD(ip), ip_set_list[binding]->name);
1297 + write_lock_bh(&ip_set_lock);
1298 + set_hash = LIST_FIND(&ip_set_hash[key], ip_hash_cmp,
1299 + struct ip_set_hash *, id, ip);
1300 + if (!set_hash) {
1301 + set_hash = kmalloc(sizeof(struct ip_set_hash), GFP_KERNEL);
1302 + if (!set_hash) {
1303 + ret = -ENOMEM;
1304 + goto unlock;
1305 + }
1306 + INIT_LIST_HEAD(&set_hash->list);
1307 + set_hash->id = id;
1308 + set_hash->ip = ip;
1309 + list_add(&ip_set_hash[key], &set_hash->list);
1310 + } else {
1311 + IP_SET_ASSERT(ip_set_list[set_hash->binding]);
1312 + DP("overwrite binding: %s",
1313 + ip_set_list[set_hash->binding]->name);
1314 + __ip_set_put(set_hash->binding);
1315 + }
1316 + set_hash->binding = binding;
1317 + __ip_set_get(set_hash->binding);
1318 + unlock:
1319 + write_unlock_bh(&ip_set_lock);
1320 + return ret;
1321 +}
1322 +
1323 +#define FOREACH_HASH_DO(fn, args...) \
1324 +({ \
1325 + ip_set_id_t __key; \
1326 + struct ip_set_hash *__set_hash; \
1327 + \
1328 + for (__key = 0; __key < ip_set_bindings_hash_size; __key++) { \
1329 + list_for_each_entry(__set_hash, &ip_set_hash[__key], list) \
1330 + fn(__set_hash , ## args); \
1331 + } \
1332 +})
1333 +
1334 +#define FOREACH_HASH_RW_DO(fn, args...) \
1335 +({ \
1336 + ip_set_id_t __key; \
1337 + struct ip_set_hash *__set_hash, *__n; \
1338 + \
1339 + ASSERT_WRITE_LOCK(&ip_set_lock); \
1340 + for (__key = 0; __key < ip_set_bindings_hash_size; __key++) { \
1341 + list_for_each_entry_safe(__set_hash, __n, &ip_set_hash[__key], list)\
1342 + fn(__set_hash , ## args); \
1343 + } \
1344 +})
1345 +
1346 +/* Add, del and test set entries from kernel */
1347 +
1348 +#define follow_bindings(index, set, ip) \
1349 +((index = ip_set_find_in_hash((set)->id, ip)) != IP_SET_INVALID_ID \
1350 + || (index = (set)->binding) != IP_SET_INVALID_ID)
1351 +
1352 +int
1353 +ip_set_testip_kernel(ip_set_id_t index,
1354 + const struct sk_buff *skb,
1355 + const u_int32_t *flags)
1356 +{
1357 + struct ip_set *set;
1358 + ip_set_ip_t ip;
1359 + int res, i = 0;
1360 +
1361 + IP_SET_ASSERT(flags[i]);
1362 + read_lock_bh(&ip_set_lock);
1363 + do {
1364 + set = ip_set_list[index];
1365 + IP_SET_ASSERT(set);
1366 + DP("set %s, index %u", set->name, index);
1367 + read_lock_bh(&set->lock);
1368 + res = set->type->testip_kernel(set, skb, flags[i], &ip);
1369 + read_unlock_bh(&set->lock);
1370 + } while (res > 0
1371 + && flags[++i]
1372 + && follow_bindings(index, set, ip));
1373 + read_unlock_bh(&ip_set_lock);
1374 +
1375 + return res;
1376 +}
1377 +
1378 +void
1379 +ip_set_addip_kernel(ip_set_id_t index,
1380 + const struct sk_buff *skb,
1381 + const u_int32_t *flags)
1382 +{
1383 + struct ip_set *set;
1384 + ip_set_ip_t ip;
1385 + int res, i= 0;
1386 +
1387 + IP_SET_ASSERT(flags[i]);
1388 + retry:
1389 + read_lock_bh(&ip_set_lock);
1390 + do {
1391 + set = ip_set_list[index];
1392 + IP_SET_ASSERT(set);
1393 + DP("set %s, index %u", set->name, index);
1394 + write_lock_bh(&set->lock);
1395 + res = set->type->addip_kernel(set, skb, flags[i], &ip);
1396 + write_unlock_bh(&set->lock);
1397 + } while ((res == 0 || res == -EEXIST)
1398 + && flags[++i]
1399 + && follow_bindings(index, set, ip));
1400 + read_unlock_bh(&ip_set_lock);
1401 +
1402 + if (res == -EAGAIN
1403 + && set->type->retry
1404 + && (res = set->type->retry(set)) == 0)
1405 + goto retry;
1406 +}
1407 +
1408 +void
1409 +ip_set_delip_kernel(ip_set_id_t index,
1410 + const struct sk_buff *skb,
1411 + const u_int32_t *flags)
1412 +{
1413 + struct ip_set *set;
1414 + ip_set_ip_t ip;
1415 + int res, i = 0;
1416 +
1417 + IP_SET_ASSERT(flags[i]);
1418 + read_lock_bh(&ip_set_lock);
1419 + do {
1420 + set = ip_set_list[index];
1421 + IP_SET_ASSERT(set);
1422 + DP("set %s, index %u", set->name, index);
1423 + write_lock_bh(&set->lock);
1424 + res = set->type->delip_kernel(set, skb, flags[i], &ip);
1425 + write_unlock_bh(&set->lock);
1426 + } while ((res == 0 || res == -EEXIST)
1427 + && flags[++i]
1428 + && follow_bindings(index, set, ip));
1429 + read_unlock_bh(&ip_set_lock);
1430 +}
1431 +
1432 +/* Register and deregister settype */
1433 +
1434 +static inline int
1435 +set_type_equal(const struct ip_set_type *set_type, const char *str2)
1436 +{
1437 + return !strncmp(set_type->typename, str2, IP_SET_MAXNAMELEN - 1);
1438 +}
1439 +
1440 +static inline struct ip_set_type *
1441 +find_set_type(const char *name)
1442 +{
1443 + return LIST_FIND(&set_type_list,
1444 + set_type_equal,
1445 + struct ip_set_type *,
1446 + name);
1447 +}
1448 +
1449 +int
1450 +ip_set_register_set_type(struct ip_set_type *set_type)
1451 +{
1452 + int ret = 0;
1453 +
1454 + if (set_type->protocol_version != IP_SET_PROTOCOL_VERSION) {
1455 + ip_set_printk("'%s' uses wrong protocol version %u (want %u)",
1456 + set_type->typename,
1457 + set_type->protocol_version,
1458 + IP_SET_PROTOCOL_VERSION);
1459 + return -EINVAL;
1460 + }
1461 +
1462 + write_lock_bh(&ip_set_lock);
1463 + if (find_set_type(set_type->typename)) {
1464 + /* Duplicate! */
1465 + ip_set_printk("'%s' already registered!",
1466 + set_type->typename);
1467 + ret = -EINVAL;
1468 + goto unlock;
1469 + }
1470 + if (!try_module_get(THIS_MODULE)) {
1471 + ret = -EFAULT;
1472 + goto unlock;
1473 + }
1474 + list_append(&set_type_list, set_type);
1475 + DP("'%s' registered.", set_type->typename);
1476 + unlock:
1477 + write_unlock_bh(&ip_set_lock);
1478 + return ret;
1479 +}
1480 +
1481 +void
1482 +ip_set_unregister_set_type(struct ip_set_type *set_type)
1483 +{
1484 + write_lock_bh(&ip_set_lock);
1485 + if (!find_set_type(set_type->typename)) {
1486 + ip_set_printk("'%s' not registered?",
1487 + set_type->typename);
1488 + goto unlock;
1489 + }
1490 + LIST_DELETE(&set_type_list, set_type);
1491 + module_put(THIS_MODULE);
1492 + DP("'%s' unregistered.", set_type->typename);
1493 + unlock:
1494 + write_unlock_bh(&ip_set_lock);
1495 +
1496 +}
1497 +
1498 +/*
1499 + * Userspace routines
1500 + */
1501 +
1502 +/*
1503 + * Find set by name, reference it once. The reference makes sure the
1504 + * thing pointed to, does not go away under our feet. Drop the reference
1505 + * later, using ip_set_put().
1506 + */
1507 +ip_set_id_t
1508 +ip_set_get_byname(const char *name)
1509 +{
1510 + ip_set_id_t i, index = IP_SET_INVALID_ID;
1511 +
1512 + down(&ip_set_app_mutex);
1513 + for (i = 0; i < ip_set_max; i++) {
1514 + if (ip_set_list[i] != NULL
1515 + && strcmp(ip_set_list[i]->name, name) == 0) {
1516 + __ip_set_get(i);
1517 + index = i;
1518 + break;
1519 + }
1520 + }
1521 + up(&ip_set_app_mutex);
1522 + return index;
1523 +}
1524 +
1525 +/*
1526 + * Find set by index, reference it once. The reference makes sure the
1527 + * thing pointed to, does not go away under our feet. Drop the reference
1528 + * later, using ip_set_put().
1529 + */
1530 +ip_set_id_t
1531 +ip_set_get_byindex(ip_set_id_t index)
1532 +{
1533 + down(&ip_set_app_mutex);
1534 +
1535 + if (index >= ip_set_max)
1536 + return IP_SET_INVALID_ID;
1537 +
1538 + if (ip_set_list[index])
1539 + __ip_set_get(index);
1540 + else
1541 + index = IP_SET_INVALID_ID;
1542 +
1543 + up(&ip_set_app_mutex);
1544 + return index;
1545 +}
1546 +
1547 +/*
1548 + * If the given set pointer points to a valid set, decrement
1549 + * reference count by 1. The caller shall not assume the index
1550 + * to be valid, after calling this function.
1551 + */
1552 +void ip_set_put(ip_set_id_t index)
1553 +{
1554 + down(&ip_set_app_mutex);
1555 + if (ip_set_list[index])
1556 + __ip_set_put(index);
1557 + up(&ip_set_app_mutex);
1558 +}
1559 +
1560 +/* Find a set by name or index */
1561 +static ip_set_id_t
1562 +ip_set_find_byname(const char *name)
1563 +{
1564 + ip_set_id_t i, index = IP_SET_INVALID_ID;
1565 +
1566 + for (i = 0; i < ip_set_max; i++) {
1567 + if (ip_set_list[i] != NULL
1568 + && strcmp(ip_set_list[i]->name, name) == 0) {
1569 + index = i;
1570 + break;
1571 + }
1572 + }
1573 + return index;
1574 +}
1575 +
1576 +static ip_set_id_t
1577 +ip_set_find_byindex(ip_set_id_t index)
1578 +{
1579 + if (index >= ip_set_max || ip_set_list[index] == NULL)
1580 + index = IP_SET_INVALID_ID;
1581 +
1582 + return index;
1583 +}
1584 +
1585 +/*
1586 + * Add, del, test, bind and unbind
1587 + */
1588 +
1589 +static inline int
1590 +__ip_set_testip(struct ip_set *set,
1591 + const void *data,
1592 + size_t size,
1593 + ip_set_ip_t *ip)
1594 +{
1595 + int res;
1596 +
1597 + read_lock_bh(&set->lock);
1598 + res = set->type->testip(set, data, size, ip);
1599 + read_unlock_bh(&set->lock);
1600 +
1601 + return res;
1602 +}
1603 +
1604 +static int
1605 +__ip_set_addip(ip_set_id_t index,
1606 + const void *data,
1607 + size_t size)
1608 +{
1609 + struct ip_set *set = ip_set_list[index];
1610 + ip_set_ip_t ip;
1611 + int res;
1612 +
1613 + IP_SET_ASSERT(set);
1614 + do {
1615 + write_lock_bh(&set->lock);
1616 + res = set->type->addip(set, data, size, &ip);
1617 + write_unlock_bh(&set->lock);
1618 + } while (res == -EAGAIN
1619 + && set->type->retry
1620 + && (res = set->type->retry(set)) == 0);
1621 +
1622 + return res;
1623 +}
1624 +
1625 +static int
1626 +ip_set_addip(ip_set_id_t index,
1627 + const void *data,
1628 + size_t size)
1629 +{
1630 +
1631 + return __ip_set_addip(index,
1632 + data + sizeof(struct ip_set_req_adt),
1633 + size - sizeof(struct ip_set_req_adt));
1634 +}
1635 +
1636 +static int
1637 +ip_set_delip(ip_set_id_t index,
1638 + const void *data,
1639 + size_t size)
1640 +{
1641 + struct ip_set *set = ip_set_list[index];
1642 + ip_set_ip_t ip;
1643 + int res;
1644 +
1645 + IP_SET_ASSERT(set);
1646 + write_lock_bh(&set->lock);
1647 + res = set->type->delip(set,
1648 + data + sizeof(struct ip_set_req_adt),
1649 + size - sizeof(struct ip_set_req_adt),
1650 + &ip);
1651 + write_unlock_bh(&set->lock);
1652 +
1653 + return res;
1654 +}
1655 +
1656 +static int
1657 +ip_set_testip(ip_set_id_t index,
1658 + const void *data,
1659 + size_t size)
1660 +{
1661 + struct ip_set *set = ip_set_list[index];
1662 + ip_set_ip_t ip;
1663 + int res;
1664 +
1665 + IP_SET_ASSERT(set);
1666 + res = __ip_set_testip(set,
1667 + data + sizeof(struct ip_set_req_adt),
1668 + size - sizeof(struct ip_set_req_adt),
1669 + &ip);
1670 +
1671 + return (res > 0 ? -EEXIST : res);
1672 +}
1673 +
1674 +static int
1675 +ip_set_bindip(ip_set_id_t index,
1676 + const void *data,
1677 + size_t size)
1678 +{
1679 + struct ip_set *set = ip_set_list[index];
1680 + struct ip_set_req_bind *req_bind;
1681 + ip_set_id_t binding;
1682 + ip_set_ip_t ip;
1683 + int res;
1684 +
1685 + IP_SET_ASSERT(set);
1686 + if (size < sizeof(struct ip_set_req_bind))
1687 + return -EINVAL;
1688 +
1689 + req_bind = (struct ip_set_req_bind *) data;
1690 + req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
1691 +
1692 + if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
1693 + /* Default binding of a set */
1694 + char *binding_name;
1695 +
1696 + if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
1697 + return -EINVAL;
1698 +
1699 + binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
1700 + binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
1701 +
1702 + binding = ip_set_find_byname(binding_name);
1703 + if (binding == IP_SET_INVALID_ID)
1704 + return -ENOENT;
1705 +
1706 + write_lock_bh(&ip_set_lock);
1707 + /* Sets as binding values are referenced */
1708 + if (set->binding != IP_SET_INVALID_ID)
1709 + __ip_set_put(set->binding);
1710 + set->binding = binding;
1711 + __ip_set_get(set->binding);
1712 + write_unlock_bh(&ip_set_lock);
1713 +
1714 + return 0;
1715 + }
1716 + binding = ip_set_find_byname(req_bind->binding);
1717 + if (binding == IP_SET_INVALID_ID)
1718 + return -ENOENT;
1719 +
1720 + res = __ip_set_testip(set,
1721 + data + sizeof(struct ip_set_req_bind),
1722 + size - sizeof(struct ip_set_req_bind),
1723 + &ip);
1724 + DP("set %s, ip: %u.%u.%u.%u, binding %s",
1725 + set->name, HIPQUAD(ip), ip_set_list[binding]->name);
1726 +
1727 + if (res >= 0)
1728 + res = ip_set_hash_add(set->id, ip, binding);
1729 +
1730 + return res;
1731 +}
1732 +
1733 +#define FOREACH_SET_DO(fn, args...) \
1734 +({ \
1735 + ip_set_id_t __i; \
1736 + struct ip_set *__set; \
1737 + \
1738 + for (__i = 0; __i < ip_set_max; __i++) { \
1739 + __set = ip_set_list[__i]; \
1740 + if (__set != NULL) \
1741 + fn(__set , ##args); \
1742 + } \
1743 +})
1744 +
1745 +static inline void
1746 +__set_hash_del_byid(struct ip_set_hash *set_hash, ip_set_id_t id)
1747 +{
1748 + if (set_hash->id == id)
1749 + __set_hash_del(set_hash);
1750 +}
1751 +
1752 +static inline void
1753 +__unbind_default(struct ip_set *set)
1754 +{
1755 + if (set->binding != IP_SET_INVALID_ID) {
1756 + /* Sets as binding values are referenced */
1757 + __ip_set_put(set->binding);
1758 + set->binding = IP_SET_INVALID_ID;
1759 + }
1760 +}
1761 +
1762 +static int
1763 +ip_set_unbindip(ip_set_id_t index,
1764 + const void *data,
1765 + size_t size)
1766 +{
1767 + struct ip_set *set;
1768 + struct ip_set_req_bind *req_bind;
1769 + ip_set_ip_t ip;
1770 + int res;
1771 +
1772 + DP("");
1773 + if (size < sizeof(struct ip_set_req_bind))
1774 + return -EINVAL;
1775 +
1776 + req_bind = (struct ip_set_req_bind *) data;
1777 + req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
1778 +
1779 + DP("%u %s", index, req_bind->binding);
1780 + if (index == IP_SET_INVALID_ID) {
1781 + /* unbind :all: */
1782 + if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
1783 + /* Default binding of sets */
1784 + write_lock_bh(&ip_set_lock);
1785 + FOREACH_SET_DO(__unbind_default);
1786 + write_unlock_bh(&ip_set_lock);
1787 + return 0;
1788 + } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
1789 + /* Flush all bindings of all sets*/
1790 + write_lock_bh(&ip_set_lock);
1791 + FOREACH_HASH_RW_DO(__set_hash_del);
1792 + write_unlock_bh(&ip_set_lock);
1793 + return 0;
1794 + }
1795 + DP("unreachable reached!");
1796 + return -EINVAL;
1797 + }
1798 +
1799 + set = ip_set_list[index];
1800 + IP_SET_ASSERT(set);
1801 + if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
1802 + /* Default binding of set */
1803 + ip_set_id_t binding = ip_set_find_byindex(set->binding);
1804 +
1805 + if (binding == IP_SET_INVALID_ID)
1806 + return -ENOENT;
1807 +
1808 + write_lock_bh(&ip_set_lock);
1809 + /* Sets in hash values are referenced */
1810 + __ip_set_put(set->binding);
1811 + set->binding = IP_SET_INVALID_ID;
1812 + write_unlock_bh(&ip_set_lock);
1813 +
1814 + return 0;
1815 + } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
1816 + /* Flush all bindings */
1817 +
1818 + write_lock_bh(&ip_set_lock);
1819 + FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
1820 + write_unlock_bh(&ip_set_lock);
1821 + return 0;
1822 + }
1823 +
1824 + res = __ip_set_testip(set,
1825 + data + sizeof(struct ip_set_req_bind),
1826 + size - sizeof(struct ip_set_req_bind),
1827 + &ip);
1828 +
1829 + DP("set %s, ip: %u.%u.%u.%u", set->name, HIPQUAD(ip));
1830 + if (res >= 0)
1831 + res = ip_set_hash_del(set->id, ip);
1832 +
1833 + return res;
1834 +}
1835 +
1836 +static int
1837 +ip_set_testbind(ip_set_id_t index,
1838 + const void *data,
1839 + size_t size)
1840 +{
1841 + struct ip_set *set = ip_set_list[index];
1842 + struct ip_set_req_bind *req_bind;
1843 + ip_set_id_t binding;
1844 + ip_set_ip_t ip;
1845 + int res;
1846 +
1847 + IP_SET_ASSERT(set);
1848 + if (size < sizeof(struct ip_set_req_bind))
1849 + return -EINVAL;
1850 +
1851 + req_bind = (struct ip_set_req_bind *) data;
1852 + req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
1853 +
1854 + if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
1855 + /* Default binding of set */
1856 + char *binding_name;
1857 +
1858 + if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
1859 + return -EINVAL;
1860 +
1861 + binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
1862 + binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
1863 +
1864 + binding = ip_set_find_byname(binding_name);
1865 + if (binding == IP_SET_INVALID_ID)
1866 + return -ENOENT;
1867 +
1868 + res = (set->binding == binding) ? -EEXIST : 0;
1869 +
1870 + return res;
1871 + }
1872 + binding = ip_set_find_byname(req_bind->binding);
1873 + if (binding == IP_SET_INVALID_ID)
1874 + return -ENOENT;
1875 +
1876 +
1877 + res = __ip_set_testip(set,
1878 + data + sizeof(struct ip_set_req_bind),
1879 + size - sizeof(struct ip_set_req_bind),
1880 + &ip);
1881 + DP("set %s, ip: %u.%u.%u.%u, binding %s",
1882 + set->name, HIPQUAD(ip), ip_set_list[binding]->name);
1883 +
1884 + if (res >= 0)
1885 + res = (ip_set_find_in_hash(set->id, ip) == binding)
1886 + ? -EEXIST : 0;
1887 +
1888 + return res;
1889 +}
1890 +
1891 +static struct ip_set_type *
1892 +find_set_type_rlock(const char *typename)
1893 +{
1894 + struct ip_set_type *type;
1895 +
1896 + read_lock_bh(&ip_set_lock);
1897 + type = find_set_type(typename);
1898 + if (type == NULL)
1899 + read_unlock_bh(&ip_set_lock);
1900 +
1901 + return type;
1902 +}
1903 +
1904 +static int
1905 +find_free_id(const char *name,
1906 + ip_set_id_t *index,
1907 + ip_set_id_t *id)
1908 +{
1909 + ip_set_id_t i;
1910 +
1911 + *id = IP_SET_INVALID_ID;
1912 + for (i = 0; i < ip_set_max; i++) {
1913 + if (ip_set_list[i] == NULL) {
1914 + if (*id == IP_SET_INVALID_ID)
1915 + *id = *index = i;
1916 + } else if (strcmp(name, ip_set_list[i]->name) == 0)
1917 + /* Name clash */
1918 + return -EEXIST;
1919 + }
1920 + if (*id == IP_SET_INVALID_ID)
1921 + /* No free slot remained */
1922 + return -ERANGE;
1923 + /* Check that index is usable as id (swapping) */
1924 + check:
1925 + for (i = 0; i < ip_set_max; i++) {
1926 + if (ip_set_list[i] != NULL
1927 + && ip_set_list[i]->id == *id) {
1928 + *id = i;
1929 + goto check;
1930 + }
1931 + }
1932 + return 0;
1933 +}
1934 +
1935 +/*
1936 + * Create a set
1937 + */
1938 +static int
1939 +ip_set_create(const char *name,
1940 + const char *typename,
1941 + ip_set_id_t restore,
1942 + const void *data,
1943 + size_t size)
1944 +{
1945 + struct ip_set *set;
1946 + ip_set_id_t index, id;
1947 + int res = 0;
1948 +
1949 + DP("setname: %s, typename: %s, id: %u", name, typename, restore);
1950 + /*
1951 + * First, and without any locks, allocate and initialize
1952 + * a normal base set structure.
1953 + */
1954 + set = kmalloc(sizeof(struct ip_set), GFP_KERNEL);
1955 + if (!set)
1956 + return -ENOMEM;
1957 + set->lock = RW_LOCK_UNLOCKED;
1958 + strncpy(set->name, name, IP_SET_MAXNAMELEN);
1959 + set->binding = IP_SET_INVALID_ID;
1960 + atomic_set(&set->ref, 0);
1961 +
1962 + /*
1963 + * Next, take the &ip_set_lock, check that we know the type,
1964 + * and take a reference on the type, to make sure it
1965 + * stays available while constructing our new set.
1966 + *
1967 + * After referencing the type, we drop the &ip_set_lock,
1968 + * and let the new set construction run without locks.
1969 + */
1970 + set->type = find_set_type_rlock(typename);
1971 + if (set->type == NULL) {
1972 + /* Try loading the module */
1973 + char modulename[IP_SET_MAXNAMELEN + strlen("ip_set_") + 1];
1974 + strcpy(modulename, "ip_set_");
1975 + strcat(modulename, typename);
1976 + DP("try to load %s", modulename);
1977 + request_module(modulename);
1978 + set->type = find_set_type_rlock(typename);
1979 + }
1980 + if (set->type == NULL) {
1981 + ip_set_printk("no set type '%s', set '%s' not created",
1982 + typename, name);
1983 + res = -ENOENT;
1984 + goto out;
1985 + }
1986 + if (!try_module_get(set->type->me)) {
1987 + read_unlock_bh(&ip_set_lock);
1988 + res = -EFAULT;
1989 + goto out;
1990 + }
1991 + read_unlock_bh(&ip_set_lock);
1992 +
1993 + /*
1994 + * Without holding any locks, create private part.
1995 + */
1996 + res = set->type->create(set, data, size);
1997 + if (res != 0)
1998 + goto put_out;
1999 +
2000 + /* BTW, res==0 here. */
2001 +
2002 + /*
2003 + * Here, we have a valid, constructed set. &ip_set_lock again,
2004 + * find free id/index and check that it is not already in
2005 + * ip_set_list.
2006 + */
2007 + write_lock_bh(&ip_set_lock);
2008 + if ((res = find_free_id(set->name, &index, &id)) != 0) {
2009 + DP("no free id!");
2010 + goto cleanup;
2011 + }
2012 +
2013 + /* Make sure restore gets the same index */
2014 + if (restore != IP_SET_INVALID_ID && index != restore) {
2015 + DP("Can't restore, sets are screwed up");
2016 + res = -ERANGE;
2017 + goto cleanup;
2018 + }
2019 +
2020 + /*
2021 + * Finally! Add our shiny new set to the list, and be done.
2022 + */
2023 + DP("create: '%s' created with index %u, id %u!", set->name, index, id);
2024 + set->id = id;
2025 + ip_set_list[index] = set;
2026 + write_unlock_bh(&ip_set_lock);
2027 + return res;
2028 +
2029 + cleanup:
2030 + write_unlock_bh(&ip_set_lock);
2031 + set->type->destroy(set);
2032 + put_out:
2033 + module_put(set->type->me);
2034 + out:
2035 + kfree(set);
2036 + return res;
2037 +}
2038 +
2039 +/*
2040 + * Destroy a given existing set
2041 + */
2042 +static void
2043 +ip_set_destroy_set(ip_set_id_t index)
2044 +{
2045 + struct ip_set *set = ip_set_list[index];
2046 +
2047 + IP_SET_ASSERT(set);
2048 + DP("set: %s", set->name);
2049 + write_lock_bh(&ip_set_lock);
2050 + FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
2051 + if (set->binding != IP_SET_INVALID_ID)
2052 + __ip_set_put(set->binding);
2053 + ip_set_list[index] = NULL;
2054 + write_unlock_bh(&ip_set_lock);
2055 +
2056 + /* Must call it without holding any lock */
2057 + set->type->destroy(set);
2058 + module_put(set->type->me);
2059 + kfree(set);
2060 +}
2061 +
2062 +/*
2063 + * Destroy a set - or all sets
2064 + * Sets must not be referenced/used.
2065 + */
2066 +static int
2067 +ip_set_destroy(ip_set_id_t index)
2068 +{
2069 + ip_set_id_t i;
2070 +
2071 + /* ref modification always protected by the mutex */
2072 + if (index != IP_SET_INVALID_ID) {
2073 + if (atomic_read(&ip_set_list[index]->ref))
2074 + return -EBUSY;
2075 + ip_set_destroy_set(index);
2076 + } else {
2077 + for (i = 0; i < ip_set_max; i++) {
2078 + if (ip_set_list[i] != NULL
2079 + && (atomic_read(&ip_set_list[i]->ref)))
2080 + return -EBUSY;
2081 + }
2082 +
2083 + for (i = 0; i < ip_set_max; i++) {
2084 + if (ip_set_list[i] != NULL)
2085 + ip_set_destroy_set(i);
2086 + }
2087 + }
2088 + return 0;
2089 +}
2090 +
2091 +static void
2092 +ip_set_flush_set(struct ip_set *set)
2093 +{
2094 + DP("set: %s %u", set->name, set->id);
2095 +
2096 + write_lock_bh(&set->lock);
2097 + set->type->flush(set);
2098 + write_unlock_bh(&set->lock);
2099 +}
2100 +
2101 +/*
2102 + * Flush data in a set - or in all sets
2103 + */
2104 +static int
2105 +ip_set_flush(ip_set_id_t index)
2106 +{
2107 + if (index != IP_SET_INVALID_ID) {
2108 + IP_SET_ASSERT(ip_set_list[index]);
2109 + ip_set_flush_set(ip_set_list[index]);
2110 + } else
2111 + FOREACH_SET_DO(ip_set_flush_set);
2112 +
2113 + return 0;
2114 +}
2115 +
2116 +/* Rename a set */
2117 +static int
2118 +ip_set_rename(ip_set_id_t index, const char *name)
2119 +{
2120 + struct ip_set *set = ip_set_list[index];
2121 + ip_set_id_t i;
2122 + int res = 0;
2123 +
2124 + DP("set: %s to %s", set->name, name);
2125 + write_lock_bh(&ip_set_lock);
2126 + for (i = 0; i < ip_set_max; i++) {
2127 + if (ip_set_list[i] != NULL
2128 + && strncmp(ip_set_list[i]->name,
2129 + name,
2130 + IP_SET_MAXNAMELEN - 1) == 0) {
2131 + res = -EEXIST;
2132 + goto unlock;
2133 + }
2134 + }
2135 + strncpy(set->name, name, IP_SET_MAXNAMELEN);
2136 + unlock:
2137 + write_unlock_bh(&ip_set_lock);
2138 + return res;
2139 +}
2140 +
2141 +/*
2142 + * Swap two sets so that name/index points to the other.
2143 + * References are also swapped.
2144 + */
2145 +static int
2146 +ip_set_swap(ip_set_id_t from_index, ip_set_id_t to_index)
2147 +{
2148 + struct ip_set *from = ip_set_list[from_index];
2149 + struct ip_set *to = ip_set_list[to_index];
2150 + char from_name[IP_SET_MAXNAMELEN];
2151 + u_int32_t from_ref;
2152 +
2153 + DP("set: %s to %s", from->name, to->name);
2154 + /* Type can't be changed. Artifical restriction. */
2155 + if (from->type->typecode != to->type->typecode)
2156 + return -ENOEXEC;
2157 +
2158 + /* No magic here: ref munging protected by the mutex */
2159 + write_lock_bh(&ip_set_lock);
2160 + strncpy(from_name, from->name, IP_SET_MAXNAMELEN);
2161 + from_ref = atomic_read(&from->ref);
2162 +
2163 + strncpy(from->name, to->name, IP_SET_MAXNAMELEN);
2164 + atomic_set(&from->ref, atomic_read(&to->ref));
2165 + strncpy(to->name, from_name, IP_SET_MAXNAMELEN);
2166 + atomic_set(&to->ref, from_ref);
2167 +
2168 + ip_set_list[from_index] = to;
2169 + ip_set_list[to_index] = from;
2170 +
2171 + write_unlock_bh(&ip_set_lock);
2172 + return 0;
2173 +}
2174 +
2175 +/*
2176 + * List set data
2177 + */
2178 +
2179 +static inline void
2180 +__set_hash_bindings_size_list(struct ip_set_hash *set_hash,
2181 + ip_set_id_t id, size_t *size)
2182 +{
2183 + if (set_hash->id == id)
2184 + *size += sizeof(struct ip_set_hash_list);
2185 +}
2186 +
2187 +static inline void
2188 +__set_hash_bindings_size_save(struct ip_set_hash *set_hash,
2189 + ip_set_id_t id, size_t *size)
2190 +{
2191 + if (set_hash->id == id)
2192 + *size += sizeof(struct ip_set_hash_save);
2193 +}
2194 +
2195 +static inline void
2196 +__set_hash_bindings(struct ip_set_hash *set_hash,
2197 + ip_set_id_t id, void *data, int *used)
2198 +{
2199 + if (set_hash->id == id) {
2200 + struct ip_set_hash_list *hash_list =
2201 + (struct ip_set_hash_list *)(data + *used);
2202 +
2203 + hash_list->ip = set_hash->ip;
2204 + hash_list->binding = set_hash->binding;
2205 + *used += sizeof(struct ip_set_hash_list);
2206 + }
2207 +}
2208 +
2209 +static int ip_set_list_set(ip_set_id_t index,
2210 + void *data,
2211 + int *used,
2212 + int len)
2213 +{
2214 + struct ip_set *set = ip_set_list[index];
2215 + struct ip_set_list *set_list;
2216 +
2217 + /* Pointer to our header */
2218 + set_list = (struct ip_set_list *) (data + *used);
2219 +
2220 + DP("set: %s, used: %d %p %p", set->name, *used, data, data + *used);
2221 +
2222 + /* Get and ensure header size */
2223 + if (*used + sizeof(struct ip_set_list) > len)
2224 + goto not_enough_mem;
2225 + *used += sizeof(struct ip_set_list);
2226 +
2227 + read_lock_bh(&set->lock);
2228 + /* Get and ensure set specific header size */
2229 + set_list->header_size = set->type->header_size;
2230 + if (*used + set_list->header_size > len)
2231 + goto unlock_set;
2232 +
2233 + /* Fill in the header */
2234 + set_list->index = index;
2235 + set_list->binding = set->binding;
2236 + set_list->ref = atomic_read(&set->ref);
2237 +
2238 + /* Fill in set spefific header data */
2239 + set->type->list_header(set, data + *used);
2240 + *used += set_list->header_size;
2241 +
2242 + /* Get and ensure set specific members size */
2243 + set_list->members_size = set->type->list_members_size(set);
2244 + if (*used + set_list->members_size > len)
2245 + goto unlock_set;
2246 +
2247 + /* Fill in set spefific members data */
2248 + set->type->list_members(set, data + *used);
2249 + *used += set_list->members_size;
2250 + read_unlock_bh(&set->lock);
2251 +
2252 + /* Bindings */
2253 +
2254 + /* Get and ensure set specific bindings size */
2255 + set_list->bindings_size = 0;
2256 + FOREACH_HASH_DO(__set_hash_bindings_size_list,
2257 + set->id, &set_list->bindings_size);
2258 + if (*used + set_list->bindings_size > len)
2259 + goto not_enough_mem;
2260 +
2261 + /* Fill in set spefific bindings data */
2262 + FOREACH_HASH_DO(__set_hash_bindings, set->id, data, used);
2263 +
2264 + return 0;
2265 +
2266 + unlock_set:
2267 + read_unlock_bh(&set->lock);
2268 + not_enough_mem:
2269 + DP("not enough mem, try again");
2270 + return -EAGAIN;
2271 +}
2272 +
2273 +/*
2274 + * Save sets
2275 + */
2276 +static int ip_set_save_set(ip_set_id_t index,
2277 + void *data,
2278 + int *used,
2279 + int len)
2280 +{
2281 + struct ip_set *set;
2282 + struct ip_set_save *set_save;
2283 +
2284 + /* Pointer to our header */
2285 + set_save = (struct ip_set_save *) (data + *used);
2286 +
2287 + /* Get and ensure header size */
2288 + if (*used + sizeof(struct ip_set_save) > len)
2289 + goto not_enough_mem;
2290 + *used += sizeof(struct ip_set_save);
2291 +
2292 + set = ip_set_list[index];
2293 + DP("set: %s, used: %u(%u) %p %p", set->name, *used, len,
2294 + data, data + *used);
2295 +
2296 + read_lock_bh(&set->lock);
2297 + /* Get and ensure set specific header size */
2298 + set_save->header_size = set->type->header_size;
2299 + if (*used + set_save->header_size > len)
2300 + goto unlock_set;
2301 +
2302 + /* Fill in the header */
2303 + set_save->index = index;
2304 + set_save->binding = set->binding;
2305 +
2306 + /* Fill in set spefific header data */
2307 + set->type->list_header(set, data + *used);
2308 + *used += set_save->header_size;
2309 +
2310 + DP("set header filled: %s, used: %u %p %p", set->name, *used,
2311 + data, data + *used);
2312 + /* Get and ensure set specific members size */
2313 + set_save->members_size = set->type->list_members_size(set);
2314 + if (*used + set_save->members_size > len)
2315 + goto unlock_set;
2316 +
2317 + /* Fill in set spefific members data */
2318 + set->type->list_members(set, data + *used);
2319 + *used += set_save->members_size;
2320 + read_unlock_bh(&set->lock);
2321 + DP("set members filled: %s, used: %u %p %p", set->name, *used,
2322 + data, data + *used);
2323 + return 0;
2324 +
2325 + unlock_set:
2326 + read_unlock_bh(&set->lock);
2327 + not_enough_mem:
2328 + DP("not enough mem, try again");
2329 + return -EAGAIN;
2330 +}
2331 +
2332 +static inline void
2333 +__set_hash_save_bindings(struct ip_set_hash *set_hash,
2334 + ip_set_id_t id,
2335 + void *data,
2336 + int *used,
2337 + int len,
2338 + int *res)
2339 +{
2340 + if (*res == 0
2341 + && (id == IP_SET_INVALID_ID || set_hash->id == id)) {
2342 + struct ip_set_hash_save *hash_save =
2343 + (struct ip_set_hash_save *)(data + *used);
2344 + /* Ensure bindings size */
2345 + if (*used + sizeof(struct ip_set_hash_save) > len) {
2346 + *res = -ENOMEM;
2347 + return;
2348 + }
2349 + hash_save->id = set_hash->id;
2350 + hash_save->ip = set_hash->ip;
2351 + hash_save->binding = set_hash->binding;
2352 + *used += sizeof(struct ip_set_hash_save);
2353 + }
2354 +}
2355 +
2356 +static int ip_set_save_bindings(ip_set_id_t index,
2357 + void *data,
2358 + int *used,
2359 + int len)
2360 +{
2361 + int res = 0;
2362 + struct ip_set_save *set_save;
2363 +
2364 + DP("used %u, len %u", *used, len);
2365 + /* Get and ensure header size */
2366 + if (*used + sizeof(struct ip_set_save) > len)
2367 + return -ENOMEM;
2368 +
2369 + /* Marker */
2370 + set_save = (struct ip_set_save *) (data + *used);
2371 + set_save->index = IP_SET_INVALID_ID;
2372 + *used += sizeof(struct ip_set_save);
2373 +
2374 + DP("marker added used %u, len %u", *used, len);
2375 + /* Fill in bindings data */
2376 + if (index != IP_SET_INVALID_ID)
2377 + /* Sets are identified by id in hash */
2378 + index = ip_set_list[index]->id;
2379 + FOREACH_HASH_DO(__set_hash_save_bindings, index, data, used, len, &res);
2380 +
2381 + return res;
2382 +}
2383 +
2384 +/*
2385 + * Restore sets
2386 + */
2387 +static int ip_set_restore(void *data,
2388 + int len)
2389 +{
2390 + int res = 0;
2391 + int line = 0, used = 0, members_size;
2392 + struct ip_set *set;
2393 + struct ip_set_hash_save *hash_save;
2394 + struct ip_set_restore *set_restore;
2395 + ip_set_id_t index;
2396 +
2397 + /* Loop to restore sets */
2398 + while (1) {
2399 + line++;
2400 +
2401 + DP("%u %u %u", used, sizeof(struct ip_set_restore), len);
2402 + /* Get and ensure header size */
2403 + if (used + sizeof(struct ip_set_restore) > len)
2404 + return line;
2405 + set_restore = (struct ip_set_restore *) (data + used);
2406 + used += sizeof(struct ip_set_restore);
2407 +
2408 + /* Ensure data size */
2409 + if (used
2410 + + set_restore->header_size
2411 + + set_restore->members_size > len)
2412 + return line;
2413 +
2414 + /* Check marker */
2415 + if (set_restore->index == IP_SET_INVALID_ID) {
2416 + line--;
2417 + goto bindings;
2418 + }
2419 +
2420 + /* Try to create the set */
2421 + DP("restore %s %s", set_restore->name, set_restore->typename);
2422 + res = ip_set_create(set_restore->name,
2423 + set_restore->typename,
2424 + set_restore->index,
2425 + data + used,
2426 + set_restore->header_size);
2427 +
2428 + if (res != 0)
2429 + return line;
2430 + used += set_restore->header_size;
2431 +
2432 + index = ip_set_find_byindex(set_restore->index);
2433 + DP("index %u, restore_index %u", index, set_restore->index);
2434 + if (index != set_restore->index)
2435 + return line;
2436 + /* Try to restore members data */
2437 + set = ip_set_list[index];
2438 + members_size = 0;
2439 + DP("members_size %u reqsize %u",
2440 + set_restore->members_size, set->type->reqsize);
2441 + while (members_size + set->type->reqsize <=
2442 + set_restore->members_size) {
2443 + line++;
2444 + DP("members: %u, line %u", members_size, line);
2445 + res = __ip_set_addip(index,
2446 + data + used + members_size,
2447 + set->type->reqsize);
2448 + if (!(res == 0 || res == -EEXIST))
2449 + return line;
2450 + members_size += set->type->reqsize;
2451 + }
2452 +
2453 + DP("members_size %u %u",
2454 + set_restore->members_size, members_size);
2455 + if (members_size != set_restore->members_size)
2456 + return line++;
2457 + used += set_restore->members_size;
2458 + }
2459 +
2460 + bindings:
2461 + /* Loop to restore bindings */
2462 + while (used < len) {
2463 + line++;
2464 +
2465 + DP("restore binding, line %u", line);
2466 + /* Get and ensure size */
2467 + if (used + sizeof(struct ip_set_hash_save) > len)
2468 + return line;
2469 + hash_save = (struct ip_set_hash_save *) (data + used);
2470 + used += sizeof(struct ip_set_hash_save);
2471 +
2472 + /* hash_save->id is used to store the index */
2473 + index = ip_set_find_byindex(hash_save->id);
2474 + DP("restore binding index %u, id %u, %u -> %u",
2475 + index, hash_save->id, hash_save->ip, hash_save->binding);
2476 + if (index != hash_save->id)
2477 + return line;
2478 +
2479 + set = ip_set_list[hash_save->id];
2480 + /* Null valued IP means default binding */
2481 + if (hash_save->ip)
2482 + res = ip_set_hash_add(set->id,
2483 + hash_save->ip,
2484 + hash_save->binding);
2485 + else {
2486 + IP_SET_ASSERT(set->binding == IP_SET_INVALID_ID);
2487 + write_lock_bh(&ip_set_lock);
2488 + set->binding = hash_save->binding;
2489 + __ip_set_get(set->binding);
2490 + write_unlock_bh(&ip_set_lock);
2491 + DP("default binding: %u", set->binding);
2492 + }
2493 + if (res != 0)
2494 + return line;
2495 + }
2496 + if (used != len)
2497 + return line;
2498 +
2499 + return 0;
2500 +}
2501 +
2502 +static int
2503 +ip_set_sockfn_set(struct sock *sk, int optval, void *user, unsigned int len)
2504 +{
2505 + void *data;
2506 + int res = 0; /* Assume OK */
2507 + unsigned *op;
2508 + struct ip_set_req_adt *req_adt;
2509 + ip_set_id_t index = IP_SET_INVALID_ID;
2510 + int (*adtfn)(ip_set_id_t index,
2511 + const void *data, size_t size);
2512 + struct fn_table {
2513 + int (*fn)(ip_set_id_t index,
2514 + const void *data, size_t size);
2515 + } adtfn_table[] =
2516 + { { ip_set_addip }, { ip_set_delip }, { ip_set_testip},
2517 + { ip_set_bindip}, { ip_set_unbindip }, { ip_set_testbind },
2518 + };
2519 +
2520 + DP("optval=%d, user=%p, len=%d", optval, user, len);
2521 + if (!capable(CAP_NET_ADMIN))
2522 + return -EPERM;
2523 + if (optval != SO_IP_SET)
2524 + return -EBADF;
2525 + if (len <= sizeof(unsigned)) {
2526 + ip_set_printk("short userdata (want >%zu, got %u)",
2527 + sizeof(unsigned), len);
2528 + return -EINVAL;
2529 + }
2530 + data = vmalloc(len);
2531 + if (!data) {
2532 + DP("out of mem for %u bytes", len);
2533 + return -ENOMEM;
2534 + }
2535 + if (copy_from_user(data, user, len) != 0) {
2536 + res = -EFAULT;
2537 + goto done;
2538 + }
2539 + if (down_interruptible(&ip_set_app_mutex)) {
2540 + res = -EINTR;
2541 + goto done;
2542 + }
2543 +
2544 + op = (unsigned *)data;
2545 + DP("op=%x", *op);
2546 +
2547 + if (*op < IP_SET_OP_VERSION) {
2548 + /* Check the version at the beginning of operations */
2549 + struct ip_set_req_version *req_version =
2550 + (struct ip_set_req_version *) data;
2551 + if (req_version->version != IP_SET_PROTOCOL_VERSION) {
2552 + res = -EPROTO;
2553 + goto done;
2554 + }
2555 + }
2556 +
2557 + switch (*op) {
2558 + case IP_SET_OP_CREATE:{
2559 + struct ip_set_req_create *req_create
2560 + = (struct ip_set_req_create *) data;
2561 +
2562 + if (len <= sizeof(struct ip_set_req_create)) {
2563 + ip_set_printk("short CREATE data (want >%zu, got %u)",
2564 + sizeof(struct ip_set_req_create), len);
2565 + res = -EINVAL;
2566 + goto done;
2567 + }
2568 + req_create->name[IP_SET_MAXNAMELEN - 1] = '\0';
2569 + req_create->typename[IP_SET_MAXNAMELEN - 1] = '\0';
2570 + res = ip_set_create(req_create->name,
2571 + req_create->typename,
2572 + IP_SET_INVALID_ID,
2573 + data + sizeof(struct ip_set_req_create),
2574 + len - sizeof(struct ip_set_req_create));
2575 + goto done;
2576 + }
2577 + case IP_SET_OP_DESTROY:{
2578 + struct ip_set_req_std *req_destroy
2579 + = (struct ip_set_req_std *) data;
2580 +
2581 + if (len != sizeof(struct ip_set_req_std)) {
2582 + ip_set_printk("invalid DESTROY data (want %zu, got %u)",
2583 + sizeof(struct ip_set_req_std), len);
2584 + res = -EINVAL;
2585 + goto done;
2586 + }
2587 + if (strcmp(req_destroy->name, IPSET_TOKEN_ALL) == 0) {
2588 + /* Destroy all sets */
2589 + index = IP_SET_INVALID_ID;
2590 + } else {
2591 + req_destroy->name[IP_SET_MAXNAMELEN - 1] = '\0';
2592 + index = ip_set_find_byname(req_destroy->name);
2593 +
2594 + if (index == IP_SET_INVALID_ID) {
2595 + res = -ENOENT;
2596 + goto done;
2597 + }
2598 + }
2599 +
2600 + res = ip_set_destroy(index);
2601 + goto done;
2602 + }
2603 + case IP_SET_OP_FLUSH:{
2604 + struct ip_set_req_std *req_flush =
2605 + (struct ip_set_req_std *) data;
2606 +
2607 + if (len != sizeof(struct ip_set_req_std)) {
2608 + ip_set_printk("invalid FLUSH data (want %zu, got %u)",
2609 + sizeof(struct ip_set_req_std), len);
2610 + res = -EINVAL;
2611 + goto done;
2612 + }
2613 + if (strcmp(req_flush->name, IPSET_TOKEN_ALL) == 0) {
2614 + /* Flush all sets */
2615 + index = IP_SET_INVALID_ID;
2616 + } else {
2617 + req_flush->name[IP_SET_MAXNAMELEN - 1] = '\0';
2618 + index = ip_set_find_byname(req_flush->name);
2619 +
2620 + if (index == IP_SET_INVALID_ID) {
2621 + res = -ENOENT;
2622 + goto done;
2623 + }
2624 + }
2625 + res = ip_set_flush(index);
2626 + goto done;
2627 + }
2628 + case IP_SET_OP_RENAME:{
2629 + struct ip_set_req_create *req_rename
2630 + = (struct ip_set_req_create *) data;
2631 +
2632 + if (len != sizeof(struct ip_set_req_create)) {
2633 + ip_set_printk("invalid RENAME data (want %zu, got %u)",
2634 + sizeof(struct ip_set_req_create), len);
2635 + res = -EINVAL;
2636 + goto done;
2637 + }
2638 +
2639 + req_rename->name[IP_SET_MAXNAMELEN - 1] = '\0';
2640 + req_rename->typename[IP_SET_MAXNAMELEN - 1] = '\0';
2641 +
2642 + index = ip_set_find_byname(req_rename->name);
2643 + if (index == IP_SET_INVALID_ID) {
2644 + res = -ENOENT;
2645 + goto done;
2646 + }
2647 + res = ip_set_rename(index, req_rename->typename);
2648 + goto done;
2649 + }
2650 + case IP_SET_OP_SWAP:{
2651 + struct ip_set_req_create *req_swap
2652 + = (struct ip_set_req_create *) data;
2653 + ip_set_id_t to_index;
2654 +
2655 + if (len != sizeof(struct ip_set_req_create)) {
2656 + ip_set_printk("invalid SWAP data (want %zu, got %u)",
2657 + sizeof(struct ip_set_req_create), len);
2658 + res = -EINVAL;
2659 + goto done;
2660 + }
2661 +
2662 + req_swap->name[IP_SET_MAXNAMELEN - 1] = '\0';
2663 + req_swap->typename[IP_SET_MAXNAMELEN - 1] = '\0';
2664 +
2665 + index = ip_set_find_byname(req_swap->name);
2666 + if (index == IP_SET_INVALID_ID) {
2667 + res = -ENOENT;
2668 + goto done;
2669 + }
2670 + to_index = ip_set_find_byname(req_swap->typename);
2671 + if (to_index == IP_SET_INVALID_ID) {
2672 + res = -ENOENT;
2673 + goto done;
2674 + }
2675 + res = ip_set_swap(index, to_index);
2676 + goto done;
2677 + }
2678 + default:
2679 + break; /* Set identified by id */
2680 + }
2681 +
2682 + /* There we may have add/del/test/bind/unbind/test_bind operations */
2683 + if (*op < IP_SET_OP_ADD_IP || *op > IP_SET_OP_TEST_BIND_SET) {
2684 + res = -EBADMSG;
2685 + goto done;
2686 + }
2687 + adtfn = adtfn_table[*op - IP_SET_OP_ADD_IP].fn;
2688 +
2689 + if (len < sizeof(struct ip_set_req_adt)) {
2690 + ip_set_printk("short data in adt request (want >=%zu, got %u)",
2691 + sizeof(struct ip_set_req_adt), len);
2692 + res = -EINVAL;
2693 + goto done;
2694 + }
2695 + req_adt = (struct ip_set_req_adt *) data;
2696 +
2697 + /* -U :all: :all:|:default: uses IP_SET_INVALID_ID */
2698 + if (!(*op == IP_SET_OP_UNBIND_SET
2699 + && req_adt->index == IP_SET_INVALID_ID)) {
2700 + index = ip_set_find_byindex(req_adt->index);
2701 + if (index == IP_SET_INVALID_ID) {
2702 + res = -ENOENT;
2703 + goto done;
2704 + }
2705 + }
2706 + res = adtfn(index, data, len);
2707 +
2708 + done:
2709 + up(&ip_set_app_mutex);
2710 + vfree(data);
2711 + if (res > 0)
2712 + res = 0;
2713 + DP("final result %d", res);
2714 + return res;
2715 +}
2716 +
2717 +static int
2718 +ip_set_sockfn_get(struct sock *sk, int optval, void *user, int *len)
2719 +{
2720 + int res = 0;
2721 + unsigned *op;
2722 + ip_set_id_t index = IP_SET_INVALID_ID;
2723 + void *data;
2724 + int copylen = *len;
2725 +
2726 + DP("optval=%d, user=%p, len=%d", optval, user, *len);
2727 + if (!capable(CAP_NET_ADMIN))
2728 + return -EPERM;
2729 + if (optval != SO_IP_SET)
2730 + return -EBADF;
2731 + if (*len < sizeof(unsigned)) {
2732 + ip_set_printk("short userdata (want >=%zu, got %d)",
2733 + sizeof(unsigned), *len);
2734 + return -EINVAL;
2735 + }
2736 + data = vmalloc(*len);
2737 + if (!data) {
2738 + DP("out of mem for %d bytes", *len);
2739 + return -ENOMEM;
2740 + }
2741 + if (copy_from_user(data, user, *len) != 0) {
2742 + res = -EFAULT;
2743 + goto done;
2744 + }
2745 + if (down_interruptible(&ip_set_app_mutex)) {
2746 + res = -EINTR;
2747 + goto done;
2748 + }
2749 +
2750 + op = (unsigned *) data;
2751 + DP("op=%x", *op);
2752 +
2753 + if (*op < IP_SET_OP_VERSION) {
2754 + /* Check the version at the beginning of operations */
2755 + struct ip_set_req_version *req_version =
2756 + (struct ip_set_req_version *) data;
2757 + if (req_version->version != IP_SET_PROTOCOL_VERSION) {
2758 + res = -EPROTO;
2759 + goto done;
2760 + }
2761 + }
2762 +
2763 + switch (*op) {
2764 + case IP_SET_OP_VERSION: {
2765 + struct ip_set_req_version *req_version =
2766 + (struct ip_set_req_version *) data;
2767 +
2768 + if (*len != sizeof(struct ip_set_req_version)) {
2769 + ip_set_printk("invalid VERSION (want %zu, got %d)",
2770 + sizeof(struct ip_set_req_version),
2771 + *len);
2772 + res = -EINVAL;
2773 + goto done;
2774 + }
2775 +
2776 + req_version->version = IP_SET_PROTOCOL_VERSION;
2777 + res = copy_to_user(user, req_version,
2778 + sizeof(struct ip_set_req_version));
2779 + goto done;
2780 + }
2781 + case IP_SET_OP_GET_BYNAME: {
2782 + struct ip_set_req_get_set *req_get
2783 + = (struct ip_set_req_get_set *) data;
2784 +
2785 + if (*len != sizeof(struct ip_set_req_get_set)) {
2786 + ip_set_printk("invalid GET_BYNAME (want %zu, got %d)",
2787 + sizeof(struct ip_set_req_get_set), *len);
2788 + res = -EINVAL;
2789 + goto done;
2790 + }
2791 + req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
2792 + index = ip_set_find_byname(req_get->set.name);
2793 + req_get->set.index = index;
2794 + goto copy;
2795 + }
2796 + case IP_SET_OP_GET_BYINDEX: {
2797 + struct ip_set_req_get_set *req_get
2798 + = (struct ip_set_req_get_set *) data;
2799 +
2800 + if (*len != sizeof(struct ip_set_req_get_set)) {
2801 + ip_set_printk("invalid GET_BYINDEX (want %zu, got %d)",
2802 + sizeof(struct ip_set_req_get_set), *len);
2803 + res = -EINVAL;
2804 + goto done;
2805 + }
2806 + req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
2807 + index = ip_set_find_byindex(req_get->set.index);
2808 + strncpy(req_get->set.name,
2809 + index == IP_SET_INVALID_ID ? ""
2810 + : ip_set_list[index]->name, IP_SET_MAXNAMELEN);
2811 + goto copy;
2812 + }
2813 + case IP_SET_OP_ADT_GET: {
2814 + struct ip_set_req_adt_get *req_get
2815 + = (struct ip_set_req_adt_get *) data;
2816 +
2817 + if (*len != sizeof(struct ip_set_req_adt_get)) {
2818 + ip_set_printk("invalid ADT_GET (want %zu, got %d)",
2819 + sizeof(struct ip_set_req_adt_get), *len);
2820 + res = -EINVAL;
2821 + goto done;
2822 + }
2823 + req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
2824 + index = ip_set_find_byname(req_get->set.name);
2825 + if (index != IP_SET_INVALID_ID) {
2826 + req_get->set.index = index;
2827 + strncpy(req_get->typename,
2828 + ip_set_list[index]->type->typename,
2829 + IP_SET_MAXNAMELEN - 1);
2830 + } else {
2831 + res = -ENOENT;
2832 + goto done;
2833 + }
2834 + goto copy;
2835 + }
2836 + case IP_SET_OP_MAX_SETS: {
2837 + struct ip_set_req_max_sets *req_max_sets
2838 + = (struct ip_set_req_max_sets *) data;
2839 + ip_set_id_t i;
2840 +
2841 + if (*len != sizeof(struct ip_set_req_max_sets)) {
2842 + ip_set_printk("invalid MAX_SETS (want %zu, got %d)",
2843 + sizeof(struct ip_set_req_max_sets), *len);
2844 + res = -EINVAL;
2845 + goto done;
2846 + }
2847 +
2848 + if (strcmp(req_max_sets->set.name, IPSET_TOKEN_ALL) == 0) {
2849 + req_max_sets->set.index = IP_SET_INVALID_ID;
2850 + } else {
2851 + req_max_sets->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
2852 + req_max_sets->set.index =
2853 + ip_set_find_byname(req_max_sets->set.name);
2854 + if (req_max_sets->set.index == IP_SET_INVALID_ID) {
2855 + res = -ENOENT;
2856 + goto done;
2857 + }
2858 + }
2859 + req_max_sets->max_sets = ip_set_max;
2860 + req_max_sets->sets = 0;
2861 + for (i = 0; i < ip_set_max; i++) {
2862 + if (ip_set_list[i] != NULL)
2863 + req_max_sets->sets++;
2864 + }
2865 + goto copy;
2866 + }
2867 + case IP_SET_OP_LIST_SIZE:
2868 + case IP_SET_OP_SAVE_SIZE: {
2869 + struct ip_set_req_setnames *req_setnames
2870 + = (struct ip_set_req_setnames *) data;
2871 + struct ip_set_name_list *name_list;
2872 + struct ip_set *set;
2873 + ip_set_id_t i;
2874 + int used;
2875 +
2876 + if (*len < sizeof(struct ip_set_req_setnames)) {
2877 + ip_set_printk("short LIST_SIZE (want >=%zu, got %d)",
2878 + sizeof(struct ip_set_req_setnames), *len);
2879 + res = -EINVAL;
2880 + goto done;
2881 + }
2882 +
2883 + req_setnames->size = 0;
2884 + used = sizeof(struct ip_set_req_setnames);
2885 + for (i = 0; i < ip_set_max; i++) {
2886 + if (ip_set_list[i] == NULL)
2887 + continue;
2888 + name_list = (struct ip_set_name_list *)
2889 + (data + used);
2890 + used += sizeof(struct ip_set_name_list);
2891 + if (used > copylen) {
2892 + res = -EAGAIN;
2893 + goto done;
2894 + }
2895 + set = ip_set_list[i];
2896 + /* Fill in index, name, etc. */
2897 + name_list->index = i;
2898 + name_list->id = set->id;
2899 + strncpy(name_list->name,
2900 + set->name,
2901 + IP_SET_MAXNAMELEN - 1);
2902 + strncpy(name_list->typename,
2903 + set->type->typename,
2904 + IP_SET_MAXNAMELEN - 1);
2905 + DP("filled %s of type %s, index %u\n",
2906 + name_list->name, name_list->typename,
2907 + name_list->index);
2908 + if (!(req_setnames->index == IP_SET_INVALID_ID
2909 + || req_setnames->index == i))
2910 + continue;
2911 + /* Update size */
2912 + switch (*op) {
2913 + case IP_SET_OP_LIST_SIZE: {
2914 + req_setnames->size += sizeof(struct ip_set_list)
2915 + + set->type->header_size
2916 + + set->type->list_members_size(set);
2917 + FOREACH_HASH_DO(__set_hash_bindings_size_list,
2918 + i, &req_setnames->size);
2919 + break;
2920 + }
2921 + case IP_SET_OP_SAVE_SIZE: {
2922 + req_setnames->size += sizeof(struct ip_set_save)
2923 + + set->type->header_size
2924 + + set->type->list_members_size(set);
2925 + FOREACH_HASH_DO(__set_hash_bindings_size_save,
2926 + i, &req_setnames->size);
2927 + break;
2928 + }
2929 + default:
2930 + break;
2931 + }
2932 + }
2933 + if (copylen != used) {
2934 + res = -EAGAIN;
2935 + goto done;
2936 + }
2937 + goto copy;
2938 + }
2939 + case IP_SET_OP_LIST: {
2940 + struct ip_set_req_list *req_list
2941 + = (struct ip_set_req_list *) data;
2942 + ip_set_id_t i;
2943 + int used;
2944 +
2945 + if (*len < sizeof(struct ip_set_req_list)) {
2946 + ip_set_printk("short LIST (want >=%zu, got %d)",
2947 + sizeof(struct ip_set_req_list), *len);
2948 + res = -EINVAL;
2949 + goto done;
2950 + }
2951 + index = req_list->index;
2952 + if (index != IP_SET_INVALID_ID
2953 + && ip_set_find_byindex(index) != index) {
2954 + res = -ENOENT;
2955 + goto done;
2956 + }
2957 + used = 0;
2958 + if (index == IP_SET_INVALID_ID) {
2959 + /* List all sets */
2960 + for (i = 0; i < ip_set_max && res == 0; i++) {
2961 + if (ip_set_list[i] != NULL)
2962 + res = ip_set_list_set(i, data, &used, *len);
2963 + }
2964 + } else {
2965 + /* List an individual set */
2966 + res = ip_set_list_set(index, data, &used, *len);
2967 + }
2968 + if (res != 0)
2969 + goto done;
2970 + else if (copylen != used) {
2971 + res = -EAGAIN;
2972 + goto done;
2973 + }
2974 + goto copy;
2975 + }
2976 + case IP_SET_OP_SAVE: {
2977 + struct ip_set_req_list *req_save
2978 + = (struct ip_set_req_list *) data;
2979 + ip_set_id_t i;
2980 + int used;
2981 +
2982 + if (*len < sizeof(struct ip_set_req_list)) {
2983 + ip_set_printk("short SAVE (want >=%zu, got %d)",
2984 + sizeof(struct ip_set_req_list), *len);
2985 + res = -EINVAL;
2986 + goto done;
2987 + }
2988 + index = req_save->index;
2989 + if (index != IP_SET_INVALID_ID
2990 + && ip_set_find_byindex(index) != index) {
2991 + res = -ENOENT;
2992 + goto done;
2993 + }
2994 + used = 0;
2995 + if (index == IP_SET_INVALID_ID) {
2996 + /* Save all sets */
2997 + for (i = 0; i < ip_set_max && res == 0; i++) {
2998 + if (ip_set_list[i] != NULL)
2999 + res = ip_set_save_set(i, data, &used, *len);
3000 + }
3001 + } else {
3002 + /* Save an individual set */
3003 + res = ip_set_save_set(index, data, &used, *len);
3004 + }
3005 + if (res == 0)
3006 + res = ip_set_save_bindings(index, data, &used, *len);
3007 +
3008 + if (res != 0)
3009 + goto done;
3010 + else if (copylen != used) {
3011 + res = -EAGAIN;
3012 + goto done;
3013 + }
3014 + goto copy;
3015 + }
3016 + case IP_SET_OP_RESTORE: {
3017 + struct ip_set_req_setnames *req_restore
3018 + = (struct ip_set_req_setnames *) data;
3019 + int line;
3020 +
3021 + if (*len < sizeof(struct ip_set_req_setnames)
3022 + || *len != req_restore->size) {
3023 + ip_set_printk("invalid RESTORE (want =%zu, got %d)",
3024 + req_restore->size, *len);
3025 + res = -EINVAL;
3026 + goto done;
3027 + }
3028 + line = ip_set_restore(data + sizeof(struct ip_set_req_setnames),
3029 + req_restore->size - sizeof(struct ip_set_req_setnames));
3030 + DP("ip_set_restore: %u", line);
3031 + if (line != 0) {
3032 + res = -EAGAIN;
3033 + req_restore->size = line;
3034 + copylen = sizeof(struct ip_set_req_setnames);
3035 + goto copy;
3036 + }
3037 + goto done;
3038 + }
3039 + default:
3040 + res = -EBADMSG;
3041 + goto done;
3042 + } /* end of switch(op) */
3043 +
3044 + copy:
3045 + DP("set %s, copylen %u", index != IP_SET_INVALID_ID
3046 + && ip_set_list[index]
3047 + ? ip_set_list[index]->name
3048 + : ":all:", copylen);
3049 + if (res == 0)
3050 + res = copy_to_user(user, data, copylen);
3051 + else
3052 + copy_to_user(user, data, copylen);
3053 +
3054 + done:
3055 + up(&ip_set_app_mutex);
3056 + vfree(data);
3057 + if (res > 0)
3058 + res = 0;
3059 + DP("final result %d", res);
3060 + return res;
3061 +}
3062 +
3063 +static struct nf_sockopt_ops so_set = {
3064 + .pf = PF_INET,
3065 + .set_optmin = SO_IP_SET,
3066 + .set_optmax = SO_IP_SET + 1,
3067 + .set = &ip_set_sockfn_set,
3068 + .get_optmin = SO_IP_SET,
3069 + .get_optmax = SO_IP_SET + 1,
3070 + .get = &ip_set_sockfn_get,
3071 + .use = 0
3072 +};
3073 +
3074 +static int max_sets, hash_size;
3075 +module_param(max_sets, int, 0600);
3076 +MODULE_PARM_DESC(max_sets, "maximal number of sets");
3077 +module_param(hash_size, int, 0600);
3078 +MODULE_PARM_DESC(hash_size, "hash size for bindings");
3079 +MODULE_LICENSE("GPL");
3080 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
3081 +MODULE_DESCRIPTION("module implementing core IP set support");
3082 +
3083 +static int __init init(void)
3084 +{
3085 + int res;
3086 + ip_set_id_t i;
3087 +
3088 + get_random_bytes(&ip_set_hash_random, 4);
3089 + if (max_sets)
3090 + ip_set_max = max_sets;
3091 + ip_set_list = vmalloc(sizeof(struct ip_set *) * ip_set_max);
3092 + if (!ip_set_list) {
3093 + printk(KERN_ERR "Unable to create ip_set_list\n");
3094 + return -ENOMEM;
3095 + }
3096 + memset(ip_set_list, 0, sizeof(struct ip_set *) * ip_set_max);
3097 + if (hash_size)
3098 + ip_set_bindings_hash_size = hash_size;
3099 + ip_set_hash = vmalloc(sizeof(struct list_head) * ip_set_bindings_hash_size);
3100 + if (!ip_set_hash) {
3101 + printk(KERN_ERR "Unable to create ip_set_hash\n");
3102 + vfree(ip_set_list);
3103 + return -ENOMEM;
3104 + }
3105 + for (i = 0; i < ip_set_bindings_hash_size; i++)
3106 + INIT_LIST_HEAD(&ip_set_hash[i]);
3107 +
3108 + INIT_LIST_HEAD(&set_type_list);
3109 +
3110 + res = nf_register_sockopt(&so_set);
3111 + if (res != 0) {
3112 + ip_set_printk("SO_SET registry failed: %d", res);
3113 + vfree(ip_set_list);
3114 + vfree(ip_set_hash);
3115 + return res;
3116 + }
3117 + return 0;
3118 +}
3119 +
3120 +static void __exit fini(void)
3121 +{
3122 + /* There can't be any existing set or binding */
3123 + nf_unregister_sockopt(&so_set);
3124 + vfree(ip_set_list);
3125 + vfree(ip_set_hash);
3126 + DP("these are the famous last words");
3127 +}
3128 +
3129 +EXPORT_SYMBOL(ip_set_register_set_type);
3130 +EXPORT_SYMBOL(ip_set_unregister_set_type);
3131 +
3132 +EXPORT_SYMBOL(ip_set_get_byname);
3133 +EXPORT_SYMBOL(ip_set_get_byindex);
3134 +EXPORT_SYMBOL(ip_set_put);
3135 +
3136 +EXPORT_SYMBOL(ip_set_addip_kernel);
3137 +EXPORT_SYMBOL(ip_set_delip_kernel);
3138 +EXPORT_SYMBOL(ip_set_testip_kernel);
3139 +
3140 +module_init(init);
3141 +module_exit(fini);
3142 diff -urN linux-2.6.19.old/net/ipv4/netfilter/ip_set_iphash.c linux-2.6.19.dev/net/ipv4/netfilter/ip_set_iphash.c
3143 --- linux-2.6.19.old/net/ipv4/netfilter/ip_set_iphash.c 1970-01-01 01:00:00.000000000 +0100
3144 +++ linux-2.6.19.dev/net/ipv4/netfilter/ip_set_iphash.c 2006-12-14 03:13:43.000000000 +0100
3145 @@ -0,0 +1,379 @@
3146 +/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
3147 + *
3148 + * This program is free software; you can redistribute it and/or modify
3149 + * it under the terms of the GNU General Public License version 2 as
3150 + * published by the Free Software Foundation.
3151 + */
3152 +
3153 +/* Kernel module implementing an ip hash set */
3154 +
3155 +#include <linux/module.h>
3156 +#include <linux/ip.h>
3157 +#include <linux/skbuff.h>
3158 +#include <linux/netfilter_ipv4/ip_tables.h>
3159 +#include <linux/netfilter_ipv4/ip_set.h>
3160 +#include <linux/errno.h>
3161 +#include <asm/uaccess.h>
3162 +#include <asm/bitops.h>
3163 +#include <linux/spinlock.h>
3164 +#include <linux/vmalloc.h>
3165 +#include <linux/random.h>
3166 +
3167 +#include <net/ip.h>
3168 +
3169 +#include <linux/netfilter_ipv4/ip_set_malloc.h>
3170 +#include <linux/netfilter_ipv4/ip_set_iphash.h>
3171 +#include <linux/netfilter_ipv4/ip_set_jhash.h>
3172 +#include <linux/netfilter_ipv4/ip_set_prime.h>
3173 +
3174 +static inline __u32
3175 +jhash_ip(const struct ip_set_iphash *map, ip_set_ip_t ip)
3176 +{
3177 + return jhash_1word(ip, map->initval);
3178 +}
3179 +
3180 +static inline __u32
3181 +randhash_ip(const struct ip_set_iphash *map, ip_set_ip_t ip)
3182 +{
3183 + return (1 + ip % map->prime);
3184 +}
3185 +
3186 +static inline __u32
3187 +hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3188 +{
3189 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3190 + __u32 jhash, randhash, id;
3191 + u_int16_t i;
3192 +
3193 + *hash_ip = ip & map->netmask;
3194 + jhash = jhash_ip(map, *hash_ip);
3195 + randhash = randhash_ip(map, *hash_ip);
3196 + DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u, %u.%u.%u.%u",
3197 + set->name, HIPQUAD(ip), HIPQUAD(*hash_ip), HIPQUAD(map->netmask));
3198 +
3199 + for (i = 0; i < map->probes; i++) {
3200 + id = (jhash + i * randhash) % map->hashsize;
3201 + DP("hash key: %u", id);
3202 + if (map->members[id] == *hash_ip)
3203 + return id;
3204 + /* No shortcut at testing - there can be deleted
3205 + * entries. */
3206 + }
3207 + return UINT_MAX;
3208 +}
3209 +
3210 +static inline int
3211 +__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3212 +{
3213 + return (hash_id(set, ip, hash_ip) != UINT_MAX);
3214 +}
3215 +
3216 +static int
3217 +testip(struct ip_set *set, const void *data, size_t size,
3218 + ip_set_ip_t *hash_ip)
3219 +{
3220 + struct ip_set_req_iphash *req =
3221 + (struct ip_set_req_iphash *) data;
3222 +
3223 + if (size != sizeof(struct ip_set_req_iphash)) {
3224 + ip_set_printk("data length wrong (want %zu, have %zu)",
3225 + sizeof(struct ip_set_req_iphash),
3226 + size);
3227 + return -EINVAL;
3228 + }
3229 + return __testip(set, req->ip, hash_ip);
3230 +}
3231 +
3232 +static int
3233 +testip_kernel(struct ip_set *set, const struct sk_buff *skb,
3234 + u_int32_t flags, ip_set_ip_t *hash_ip)
3235 +{
3236 + return __testip(set,
3237 + ntohl(flags & IPSET_SRC ? skb->nh.iph->saddr
3238 + : skb->nh.iph->daddr),
3239 + hash_ip);
3240 +}
3241 +
3242 +static inline int
3243 +__addip(struct ip_set_iphash *map, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3244 +{
3245 + __u32 jhash, randhash, probe;
3246 + u_int16_t i;
3247 +
3248 + *hash_ip = ip & map->netmask;
3249 + jhash = jhash_ip(map, *hash_ip);
3250 + randhash = randhash_ip(map, *hash_ip);
3251 +
3252 + for (i = 0; i < map->probes; i++) {
3253 + probe = (jhash + i * randhash) % map->hashsize;
3254 + if (map->members[probe] == *hash_ip)
3255 + return -EEXIST;
3256 + if (!map->members[probe]) {
3257 + map->members[probe] = *hash_ip;
3258 + return 0;
3259 + }
3260 + }
3261 + /* Trigger rehashing */
3262 + return -EAGAIN;
3263 +}
3264 +
3265 +static int
3266 +addip(struct ip_set *set, const void *data, size_t size,
3267 + ip_set_ip_t *hash_ip)
3268 +{
3269 + struct ip_set_req_iphash *req =
3270 + (struct ip_set_req_iphash *) data;
3271 +
3272 + if (size != sizeof(struct ip_set_req_iphash)) {
3273 + ip_set_printk("data length wrong (want %zu, have %zu)",
3274 + sizeof(struct ip_set_req_iphash),
3275 + size);
3276 + return -EINVAL;
3277 + }
3278 + return __addip((struct ip_set_iphash *) set->data, req->ip, hash_ip);
3279 +}
3280 +
3281 +static int
3282 +addip_kernel(struct ip_set *set, const struct sk_buff *skb,
3283 + u_int32_t flags, ip_set_ip_t *hash_ip)
3284 +{
3285 + return __addip((struct ip_set_iphash *) set->data,
3286 + ntohl(flags & IPSET_SRC ? skb->nh.iph->saddr
3287 + : skb->nh.iph->daddr),
3288 + hash_ip);
3289 +}
3290 +
3291 +static int retry(struct ip_set *set)
3292 +{
3293 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3294 + ip_set_ip_t hash_ip, *members;
3295 + u_int32_t i, hashsize;
3296 + unsigned newbytes;
3297 + int res;
3298 + struct ip_set_iphash tmp = {
3299 + .hashsize = map->hashsize,
3300 + .probes = map->probes,
3301 + .resize = map->resize,
3302 + .netmask = map->netmask,
3303 + };
3304 +
3305 + if (map->resize == 0)
3306 + return -ERANGE;
3307 +
3308 + again:
3309 + res = 0;
3310 +
3311 + /* Calculate new parameters */
3312 + get_random_bytes(&tmp.initval, 4);
3313 + hashsize = tmp.hashsize + (tmp.hashsize * map->resize)/100;
3314 + if (hashsize == tmp.hashsize)
3315 + hashsize++;
3316 + tmp.prime = make_prime(hashsize);
3317 +
3318 + ip_set_printk("rehashing of set %s triggered: "
3319 + "hashsize grows from %u to %u",
3320 + set->name, tmp.hashsize, hashsize);
3321 + tmp.hashsize = hashsize;
3322 +
3323 + newbytes = hashsize * sizeof(ip_set_ip_t);
3324 + tmp.members = ip_set_malloc_atomic(newbytes);
3325 + if (!tmp.members) {
3326 + DP("out of memory for %d bytes", newbytes);
3327 + return -ENOMEM;
3328 + }
3329 + memset(tmp.members, 0, newbytes);
3330 +
3331 + write_lock_bh(&set->lock);
3332 + map = (struct ip_set_iphash *) set->data; /* Play safe */
3333 + for (i = 0; i < map->hashsize && res == 0; i++) {
3334 + if (map->members[i])
3335 + res = __addip(&tmp, map->members[i], &hash_ip);
3336 + }
3337 + if (res) {
3338 + /* Failure, try again */
3339 + write_unlock_bh(&set->lock);
3340 + ip_set_free(tmp.members, newbytes);
3341 + goto again;
3342 + }
3343 +
3344 + /* Success at resizing! */
3345 + members = map->members;
3346 + hashsize = map->hashsize;
3347 +
3348 + map->initval = tmp.initval;
3349 + map->prime = tmp.prime;
3350 + map->hashsize = tmp.hashsize;
3351 + map->members = tmp.members;
3352 + write_unlock_bh(&set->lock);
3353 +
3354 + ip_set_free(members, hashsize * sizeof(ip_set_ip_t));
3355 +
3356 + return 0;
3357 +}
3358 +
3359 +static inline int
3360 +__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3361 +{
3362 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3363 + ip_set_ip_t id = hash_id(set, ip, hash_ip);
3364 +
3365 + if (id == UINT_MAX)
3366 + return -EEXIST;
3367 +
3368 + map->members[id] = 0;
3369 + return 0;
3370 +}
3371 +
3372 +static int
3373 +delip(struct ip_set *set, const void *data, size_t size,
3374 + ip_set_ip_t *hash_ip)
3375 +{
3376 + struct ip_set_req_iphash *req =
3377 + (struct ip_set_req_iphash *) data;
3378 +
3379 + if (size != sizeof(struct ip_set_req_iphash)) {
3380 + ip_set_printk("data length wrong (want %zu, have %zu)",
3381 + sizeof(struct ip_set_req_iphash),
3382 + size);
3383 + return -EINVAL;
3384 + }
3385 + return __delip(set, req->ip, hash_ip);
3386 +}
3387 +
3388 +static int
3389 +delip_kernel(struct ip_set *set, const struct sk_buff *skb,
3390 + u_int32_t flags, ip_set_ip_t *hash_ip)
3391 +{
3392 + return __delip(set,
3393 + ntohl(flags & IPSET_SRC ? skb->nh.iph->saddr
3394 + : skb->nh.iph->daddr),
3395 + hash_ip);
3396 +}
3397 +
3398 +static int create(struct ip_set *set, const void *data, size_t size)
3399 +{
3400 + unsigned newbytes;
3401 + struct ip_set_req_iphash_create *req =
3402 + (struct ip_set_req_iphash_create *) data;
3403 + struct ip_set_iphash *map;
3404 +
3405 + if (size != sizeof(struct ip_set_req_iphash_create)) {
3406 + ip_set_printk("data length wrong (want %zu, have %zu)",
3407 + sizeof(struct ip_set_req_iphash_create),
3408 + size);
3409 + return -EINVAL;
3410 + }
3411 +
3412 + if (req->hashsize < 1) {
3413 + ip_set_printk("hashsize too small");
3414 + return -ENOEXEC;
3415 + }
3416 +
3417 + map = kmalloc(sizeof(struct ip_set_iphash), GFP_KERNEL);
3418 + if (!map) {
3419 + DP("out of memory for %d bytes",
3420 + sizeof(struct ip_set_iphash));
3421 + return -ENOMEM;
3422 + }
3423 + get_random_bytes(&map->initval, 4);
3424 + map->prime = make_prime(req->hashsize);
3425 + map->hashsize = req->hashsize;
3426 + map->probes = req->probes;
3427 + map->resize = req->resize;
3428 + map->netmask = req->netmask;
3429 + newbytes = map->hashsize * sizeof(ip_set_ip_t);
3430 + map->members = ip_set_malloc(newbytes);
3431 + if (!map->members) {
3432 + DP("out of memory for %d bytes", newbytes);
3433 + kfree(map);
3434 + return -ENOMEM;
3435 + }
3436 + memset(map->members, 0, newbytes);
3437 +
3438 + set->data = map;
3439 + return 0;
3440 +}
3441 +
3442 +static void destroy(struct ip_set *set)
3443 +{
3444 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3445 +
3446 + ip_set_free(map->members, map->hashsize * sizeof(ip_set_ip_t));
3447 + kfree(map);
3448 +
3449 + set->data = NULL;
3450 +}
3451 +
3452 +static void flush(struct ip_set *set)
3453 +{
3454 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3455 + memset(map->members, 0, map->hashsize * sizeof(ip_set_ip_t));
3456 +}
3457 +
3458 +static void list_header(const struct ip_set *set, void *data)
3459 +{
3460 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3461 + struct ip_set_req_iphash_create *header =
3462 + (struct ip_set_req_iphash_create *) data;
3463 +
3464 + header->hashsize = map->hashsize;
3465 + header->probes = map->probes;
3466 + header->resize = map->resize;
3467 + header->netmask = map->netmask;
3468 +}
3469 +
3470 +static int list_members_size(const struct ip_set *set)
3471 +{
3472 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3473 +
3474 + return (map->hashsize * sizeof(ip_set_ip_t));
3475 +}
3476 +
3477 +static void list_members(const struct ip_set *set, void *data)
3478 +{
3479 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3480 + int bytes = map->hashsize * sizeof(ip_set_ip_t);
3481 +
3482 + memcpy(data, map->members, bytes);
3483 +}
3484 +
3485 +static struct ip_set_type ip_set_iphash = {
3486 + .typename = SETTYPE_NAME,
3487 + .typecode = IPSET_TYPE_IP,
3488 + .protocol_version = IP_SET_PROTOCOL_VERSION,
3489 + .create = &create,
3490 + .destroy = &destroy,
3491 + .flush = &flush,
3492 + .reqsize = sizeof(struct ip_set_req_iphash),
3493 + .addip = &addip,
3494 + .addip_kernel = &addip_kernel,
3495 + .retry = &retry,
3496 + .delip = &delip,
3497 + .delip_kernel = &delip_kernel,
3498 + .testip = &testip,
3499 + .testip_kernel = &testip_kernel,
3500 + .header_size = sizeof(struct ip_set_req_iphash_create),
3501 + .list_header = &list_header,
3502 + .list_members_size = &list_members_size,
3503 + .list_members = &list_members,
3504 + .me = THIS_MODULE,
3505 +};
3506 +
3507 +MODULE_LICENSE("GPL");
3508 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
3509 +MODULE_DESCRIPTION("iphash type of IP sets");
3510 +
3511 +static int __init init(void)
3512 +{
3513 + init_max_malloc_size();
3514 + return ip_set_register_set_type(&ip_set_iphash);
3515 +}
3516 +
3517 +static void __exit fini(void)
3518 +{
3519 + /* FIXME: possible race with ip_set_create() */
3520 + ip_set_unregister_set_type(&ip_set_iphash);
3521 +}
3522 +
3523 +module_init(init);
3524 +module_exit(fini);
3525 diff -urN linux-2.6.19.old/net/ipv4/netfilter/ip_set_ipmap.c linux-2.6.19.dev/net/ipv4/netfilter/ip_set_ipmap.c
3526 --- linux-2.6.19.old/net/ipv4/netfilter/ip_set_ipmap.c 1970-01-01 01:00:00.000000000 +0100
3527 +++ linux-2.6.19.dev/net/ipv4/netfilter/ip_set_ipmap.c 2006-12-14 03:13:43.000000000 +0100
3528 @@ -0,0 +1,313 @@
3529 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
3530 + * Patrick Schaaf <bof@bof.de>
3531 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
3532 + *
3533 + * This program is free software; you can redistribute it and/or modify
3534 + * it under the terms of the GNU General Public License version 2 as
3535 + * published by the Free Software Foundation.
3536 + */
3537 +
3538 +/* Kernel module implementing an IP set type: the single bitmap type */
3539 +
3540 +#include <linux/module.h>
3541 +#include <linux/ip.h>
3542 +#include <linux/skbuff.h>
3543 +#include <linux/netfilter_ipv4/ip_tables.h>
3544 +#include <linux/netfilter_ipv4/ip_set.h>
3545 +#include <linux/errno.h>
3546 +#include <asm/uaccess.h>
3547 +#include <asm/bitops.h>
3548 +#include <linux/spinlock.h>
3549 +
3550 +#include <linux/netfilter_ipv4/ip_set_ipmap.h>
3551 +
3552 +static inline ip_set_ip_t
3553 +ip_to_id(const struct ip_set_ipmap *map, ip_set_ip_t ip)
3554 +{
3555 + return (ip - map->first_ip)/map->hosts;
3556 +}
3557 +
3558 +static inline int
3559 +__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3560 +{
3561 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3562 +
3563 + if (ip < map->first_ip || ip > map->last_ip)
3564 + return -ERANGE;
3565 +
3566 + *hash_ip = ip & map->netmask;
3567 + DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
3568 + set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
3569 + return !!test_bit(ip_to_id(map, *hash_ip), map->members);
3570 +}
3571 +
3572 +static int
3573 +testip(struct ip_set *set, const void *data, size_t size,
3574 + ip_set_ip_t *hash_ip)
3575 +{
3576 + struct ip_set_req_ipmap *req =
3577 + (struct ip_set_req_ipmap *) data;
3578 +
3579 + if (size != sizeof(struct ip_set_req_ipmap)) {
3580 + ip_set_printk("data length wrong (want %zu, have %zu)",
3581 + sizeof(struct ip_set_req_ipmap),
3582 + size);
3583 + return -EINVAL;
3584 + }
3585 + return __testip(set, req->ip, hash_ip);
3586 +}
3587 +
3588 +static int
3589 +testip_kernel(struct ip_set *set,
3590 + const struct sk_buff *skb,
3591 + u_int32_t flags,
3592 + ip_set_ip_t *hash_ip)
3593 +{
3594 + int res;
3595 +
3596 + DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
3597 + flags & IPSET_SRC ? "SRC" : "DST",
3598 + NIPQUAD(skb->nh.iph->saddr),
3599 + NIPQUAD(skb->nh.iph->daddr));
3600 +
3601 + res = __testip(set,
3602 + ntohl(flags & IPSET_SRC ? skb->nh.iph->saddr
3603 + : skb->nh.iph->daddr),
3604 + hash_ip);
3605 + return (res < 0 ? 0 : res);
3606 +}
3607 +
3608 +static inline int
3609 +__addip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3610 +{
3611 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3612 +
3613 + if (ip < map->first_ip || ip > map->last_ip)
3614 + return -ERANGE;
3615 +
3616 + *hash_ip = ip & map->netmask;
3617 + DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
3618 + if (test_and_set_bit(ip_to_id(map, *hash_ip), map->members))
3619 + return -EEXIST;
3620 +
3621 + return 0;
3622 +}
3623 +
3624 +static int
3625 +addip(struct ip_set *set, const void *data, size_t size,
3626 + ip_set_ip_t *hash_ip)
3627 +{
3628 + struct ip_set_req_ipmap *req =
3629 + (struct ip_set_req_ipmap *) data;
3630 +
3631 + if (size != sizeof(struct ip_set_req_ipmap)) {
3632 + ip_set_printk("data length wrong (want %zu, have %zu)",
3633 + sizeof(struct ip_set_req_ipmap),
3634 + size);
3635 + return -EINVAL;
3636 + }
3637 + DP("%u.%u.%u.%u", HIPQUAD(req->ip));
3638 + return __addip(set, req->ip, hash_ip);
3639 +}
3640 +
3641 +static int
3642 +addip_kernel(struct ip_set *set, const struct sk_buff *skb,
3643 + u_int32_t flags, ip_set_ip_t *hash_ip)
3644 +{
3645 + return __addip(set,
3646 + ntohl(flags & IPSET_SRC ? skb->nh.iph->saddr
3647 + : skb->nh.iph->daddr),
3648 + hash_ip);
3649 +}
3650 +
3651 +static inline int
3652 +__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3653 +{
3654 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3655 +
3656 + if (ip < map->first_ip || ip > map->last_ip)
3657 + return -ERANGE;
3658 +
3659 + *hash_ip = ip & map->netmask;
3660 + DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
3661 + if (!test_and_clear_bit(ip_to_id(map, *hash_ip), map->members))
3662 + return -EEXIST;
3663 +
3664 + return 0;
3665 +}
3666 +
3667 +static int
3668 +delip(struct ip_set *set, const void *data, size_t size,
3669 + ip_set_ip_t *hash_ip)
3670 +{
3671 + struct ip_set_req_ipmap *req =
3672 + (struct ip_set_req_ipmap *) data;
3673 +
3674 + if (size != sizeof(struct ip_set_req_ipmap)) {
3675 + ip_set_printk("data length wrong (want %zu, have %zu)",
3676 + sizeof(struct ip_set_req_ipmap),
3677 + size);
3678 + return -EINVAL;
3679 + }
3680 + return __delip(set, req->ip, hash_ip);
3681 +}
3682 +
3683 +static int
3684 +delip_kernel(struct ip_set *set, const struct sk_buff *skb,
3685 + u_int32_t flags, ip_set_ip_t *hash_ip)
3686 +{
3687 + return __delip(set,
3688 + ntohl(flags & IPSET_SRC ? skb->nh.iph->saddr
3689 + : skb->nh.iph->daddr),
3690 + hash_ip);
3691 +}
3692 +
3693 +static int create(struct ip_set *set, const void *data, size_t size)
3694 +{
3695 + int newbytes;
3696 + struct ip_set_req_ipmap_create *req =
3697 + (struct ip_set_req_ipmap_create *) data;
3698 + struct ip_set_ipmap *map;
3699 +
3700 + if (size != sizeof(struct ip_set_req_ipmap_create)) {
3701 + ip_set_printk("data length wrong (want %zu, have %zu)",
3702 + sizeof(struct ip_set_req_ipmap_create),
3703 + size);
3704 + return -EINVAL;
3705 + }
3706 +
3707 + DP("from %u.%u.%u.%u to %u.%u.%u.%u",
3708 + HIPQUAD(req->from), HIPQUAD(req->to));
3709 +
3710 + if (req->from > req->to) {
3711 + DP("bad ip range");
3712 + return -ENOEXEC;
3713 + }
3714 +
3715 + if (req->to - req->from > MAX_RANGE) {
3716 + ip_set_printk("range too big (max %d addresses)",
3717 + MAX_RANGE);
3718 + return -ENOEXEC;
3719 + }
3720 +
3721 + map = kmalloc(sizeof(struct ip_set_ipmap), GFP_KERNEL);
3722 + if (!map) {
3723 + DP("out of memory for %d bytes",
3724 + sizeof(struct ip_set_ipmap));
3725 + return -ENOMEM;
3726 + }
3727 + map->first_ip = req->from;
3728 + map->last_ip = req->to;
3729 + map->netmask = req->netmask;
3730 +
3731 + if (req->netmask == 0xFFFFFFFF) {
3732 + map->hosts = 1;
3733 + map->sizeid = map->last_ip - map->first_ip + 1;
3734 + } else {
3735 + unsigned int mask_bits, netmask_bits;
3736 + ip_set_ip_t mask;
3737 +
3738 + map->first_ip &= map->netmask; /* Should we better bark? */
3739 +
3740 + mask = range_to_mask(map->first_ip, map->last_ip, &mask_bits);
3741 + netmask_bits = mask_to_bits(map->netmask);
3742 +
3743 + if (!mask || netmask_bits <= mask_bits)
3744 + return -ENOEXEC;
3745 +
3746 + map->hosts = 2 << (32 - netmask_bits - 1);
3747 + map->sizeid = 2 << (netmask_bits - mask_bits - 1);
3748 + }
3749 + newbytes = bitmap_bytes(0, map->sizeid - 1);
3750 + map->members = kmalloc(newbytes, GFP_KERNEL);
3751 + if (!map->members) {
3752 + DP("out of memory for %d bytes", newbytes);
3753 + kfree(map);
3754 + return -ENOMEM;
3755 + }
3756 + memset(map->members, 0, newbytes);
3757 +
3758 + set->data = map;
3759 + return 0;
3760 +}
3761 +
3762 +static void destroy(struct ip_set *set)
3763 +{
3764 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3765 +
3766 + kfree(map->members);
3767 + kfree(map);
3768 +
3769 + set->data = NULL;
3770 +}
3771 +
3772 +static void flush(struct ip_set *set)
3773 +{
3774 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3775 + memset(map->members, 0, bitmap_bytes(0, map->sizeid - 1));
3776 +}
3777 +
3778 +static void list_header(const struct ip_set *set, void *data)
3779 +{
3780 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3781 + struct ip_set_req_ipmap_create *header =
3782 + (struct ip_set_req_ipmap_create *) data;
3783 +
3784 + header->from = map->first_ip;
3785 + header->to = map->last_ip;
3786 + header->netmask = map->netmask;
3787 +}
3788 +
3789 +static int list_members_size(const struct ip_set *set)
3790 +{
3791 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3792 +
3793 + return bitmap_bytes(0, map->sizeid - 1);
3794 +}
3795 +
3796 +static void list_members(const struct ip_set *set, void *data)
3797 +{
3798 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3799 + int bytes = bitmap_bytes(0, map->sizeid - 1);
3800 +
3801 + memcpy(data, map->members, bytes);
3802 +}
3803 +
3804 +static struct ip_set_type ip_set_ipmap = {
3805 + .typename = SETTYPE_NAME,
3806 + .typecode = IPSET_TYPE_IP,
3807 + .protocol_version = IP_SET_PROTOCOL_VERSION,
3808 + .create = &create,
3809 + .destroy = &destroy,
3810 + .flush = &flush,
3811 + .reqsize = sizeof(struct ip_set_req_ipmap),
3812 + .addip = &addip,
3813 + .addip_kernel = &addip_kernel,
3814 + .delip = &delip,
3815 + .delip_kernel = &delip_kernel,
3816 + .testip = &testip,
3817 + .testip_kernel = &testip_kernel,
3818 + .header_size = sizeof(struct ip_set_req_ipmap_create),
3819 + .list_header = &list_header,
3820 + .list_members_size = &list_members_size,
3821 + .list_members = &list_members,
3822 + .me = THIS_MODULE,
3823 +};
3824 +
3825 +MODULE_LICENSE("GPL");
3826 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
3827 +MODULE_DESCRIPTION("ipmap type of IP sets");
3828 +
3829 +static int __init init(void)
3830 +{
3831 + return ip_set_register_set_type(&ip_set_ipmap);
3832 +}
3833 +
3834 +static void __exit fini(void)
3835 +{
3836 + /* FIXME: possible race with ip_set_create() */
3837 + ip_set_unregister_set_type(&ip_set_ipmap);
3838 +}
3839 +
3840 +module_init(init);
3841 +module_exit(fini);
3842 diff -urN linux-2.6.19.old/net/ipv4/netfilter/ip_set_iptree.c linux-2.6.19.dev/net/ipv4/netfilter/ip_set_iptree.c
3843 --- linux-2.6.19.old/net/ipv4/netfilter/ip_set_iptree.c 1970-01-01 01:00:00.000000000 +0100
3844 +++ linux-2.6.19.dev/net/ipv4/netfilter/ip_set_iptree.c 2006-12-14 03:13:43.000000000 +0100
3845 @@ -0,0 +1,510 @@
3846 +/* Copyright (C) 2005 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
3847 + *
3848 + * This program is free software; you can redistribute it and/or modify
3849 + * it under the terms of the GNU General Public License version 2 as
3850 + * published by the Free Software Foundation.
3851 + */
3852 +
3853 +/* Kernel module implementing an IP set type: the iptree type */
3854 +
3855 +#include <linux/module.h>
3856 +#include <linux/ip.h>
3857 +#include <linux/skbuff.h>
3858 +#include <linux/slab.h>
3859 +#include <linux/delay.h>
3860 +#include <linux/netfilter_ipv4/ip_tables.h>
3861 +#include <linux/netfilter_ipv4/ip_set.h>
3862 +#include <linux/errno.h>
3863 +#include <asm/uaccess.h>
3864 +#include <asm/bitops.h>
3865 +#include <linux/spinlock.h>
3866 +
3867 +#include <linux/netfilter_ipv4/ip_set_iptree.h>
3868 +
3869 +/* Garbage collection interval in seconds: */
3870 +#define IPTREE_GC_TIME 5*60
3871 +/* Sleep so many milliseconds before trying again
3872 + * to delete the gc timer at destroying a set */
3873 +#define IPTREE_DESTROY_SLEEP 100
3874 +
3875 +static kmem_cache_t *branch_cachep;
3876 +static kmem_cache_t *leaf_cachep;
3877 +
3878 +#define ABCD(a,b,c,d,addrp) do { \
3879 + a = ((unsigned char *)addrp)[3]; \
3880 + b = ((unsigned char *)addrp)[2]; \
3881 + c = ((unsigned char *)addrp)[1]; \
3882 + d = ((unsigned char *)addrp)[0]; \
3883 +} while (0)
3884 +
3885 +#define TESTIP_WALK(map, elem, branch) do { \
3886 + if ((map)->tree[elem]) { \
3887 + branch = (map)->tree[elem]; \
3888 + } else \
3889 + return 0; \
3890 +} while (0)
3891 +
3892 +static inline int
3893 +__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3894 +{
3895 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
3896 + struct ip_set_iptreeb *btree;
3897 + struct ip_set_iptreec *ctree;
3898 + struct ip_set_iptreed *dtree;
3899 + unsigned char a,b,c,d;
3900 +
3901 + *hash_ip = ip;
3902 + ABCD(a, b, c, d, hash_ip);
3903 + DP("%u %u %u %u timeout %u", a, b, c, d, map->timeout);
3904 + TESTIP_WALK(map, a, btree);
3905 + TESTIP_WALK(btree, b, ctree);
3906 + TESTIP_WALK(ctree, c, dtree);
3907 + DP("%lu %lu", dtree->expires[d], jiffies);
3908 + return !!(map->timeout ? (time_after(dtree->expires[d], jiffies))
3909 + : dtree->expires[d]);
3910 +}
3911 +
3912 +static int
3913 +testip(struct ip_set *set, const void *data, size_t size,
3914 + ip_set_ip_t *hash_ip)
3915 +{
3916 + struct ip_set_req_iptree *req =
3917 + (struct ip_set_req_iptree *) data;
3918 +
3919 + if (size != sizeof(struct ip_set_req_iptree)) {
3920 + ip_set_printk("data length wrong (want %zu, have %zu)",
3921 + sizeof(struct ip_set_req_iptree),
3922 + size);
3923 + return -EINVAL;
3924 + }
3925 + return __testip(set, req->ip, hash_ip);
3926 +}
3927 +
3928 +static int
3929 +testip_kernel(struct ip_set *set,
3930 + const struct sk_buff *skb,
3931 + u_int32_t flags,
3932 + ip_set_ip_t *hash_ip)
3933 +{
3934 + int res;
3935 +
3936 + DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
3937 + flags & IPSET_SRC ? "SRC" : "DST",
3938 + NIPQUAD(skb->nh.iph->saddr),
3939 + NIPQUAD(skb->nh.iph->daddr));
3940 +
3941 + res = __testip(set,
3942 + ntohl(flags & IPSET_SRC ? skb->nh.iph->saddr
3943 + : skb->nh.iph->daddr),
3944 + hash_ip);
3945 + return (res < 0 ? 0 : res);
3946 +}
3947 +
3948 +#define ADDIP_WALK(map, elem, branch, type, cachep) do { \
3949 + if ((map)->tree[elem]) { \
3950 + DP("found %u", elem); \
3951 + branch = (map)->tree[elem]; \
3952 + } else { \
3953 + branch = (type *) \
3954 + kmem_cache_alloc(cachep, GFP_KERNEL); \
3955 + if (branch == NULL) \
3956 + return -ENOMEM; \
3957 + memset(branch, 0, sizeof(*branch)); \
3958 + (map)->tree[elem] = branch; \
3959 + DP("alloc %u", elem); \
3960 + } \
3961 +} while (0)
3962 +
3963 +static inline int
3964 +__addip(struct ip_set *set, ip_set_ip_t ip, unsigned int timeout,
3965 + ip_set_ip_t *hash_ip)
3966 +{
3967 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
3968 + struct ip_set_iptreeb *btree;
3969 + struct ip_set_iptreec *ctree;
3970 + struct ip_set_iptreed *dtree;
3971 + unsigned char a,b,c,d;
3972 + int ret = 0;
3973 +
3974 + *hash_ip = ip;
3975 + ABCD(a, b, c, d, hash_ip);
3976 + DP("%u %u %u %u timeout %u", a, b, c, d, timeout);
3977 + ADDIP_WALK(map, a, btree, struct ip_set_iptreeb, branch_cachep);
3978 + ADDIP_WALK(btree, b, ctree, struct ip_set_iptreec, branch_cachep);
3979 + ADDIP_WALK(ctree, c, dtree, struct ip_set_iptreed, leaf_cachep);
3980 + if (dtree->expires[d]
3981 + && (!map->timeout || time_after(dtree->expires[d], jiffies)))
3982 + ret = -EEXIST;
3983 + dtree->expires[d] = map->timeout ? (timeout * HZ + jiffies) : 1;
3984 + DP("%u %lu", d, dtree->expires[d]);
3985 + return ret;
3986 +}
3987 +
3988 +static int
3989 +addip(struct ip_set *set, const void *data, size_t size,
3990 + ip_set_ip_t *hash_ip)
3991 +{
3992 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
3993 + struct ip_set_req_iptree *req =
3994 + (struct ip_set_req_iptree *) data;
3995 +
3996 + if (size != sizeof(struct ip_set_req_iptree)) {
3997 + ip_set_printk("data length wrong (want %zu, have %zu)",
3998 + sizeof(struct ip_set_req_iptree),
3999 + size);
4000 + return -EINVAL;
4001 + }
4002 + DP("%u.%u.%u.%u %u", HIPQUAD(req->ip), req->timeout);
4003 + return __addip(set, req->ip,
4004 + req->timeout ? req->timeout : map->timeout,
4005 + hash_ip);
4006 +}
4007 +
4008 +static int
4009 +addip_kernel(struct ip_set *set, const struct sk_buff *skb,
4010 + u_int32_t flags, ip_set_ip_t *hash_ip)
4011 +{
4012 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4013 +
4014 + return __addip(set,
4015 + ntohl(flags & IPSET_SRC ? skb->nh.iph->saddr
4016 + : skb->nh.iph->daddr),
4017 + map->timeout,
4018 + hash_ip);
4019 +}
4020 +
4021 +#define DELIP_WALK(map, elem, branch) do { \
4022 + if ((map)->tree[elem]) { \
4023 + branch = (map)->tree[elem]; \
4024 + } else \
4025 + return -EEXIST; \
4026 +} while (0)
4027 +
4028 +static inline int
4029 +__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
4030 +{
4031 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4032 + struct ip_set_iptreeb *btree;
4033 + struct ip_set_iptreec *ctree;
4034 + struct ip_set_iptreed *dtree;
4035 + unsigned char a,b,c,d;
4036 +
4037 + *hash_ip = ip;
4038 + ABCD(a, b, c, d, hash_ip);
4039 + DELIP_WALK(map, a, btree);
4040 + DELIP_WALK(btree, b, ctree);
4041 + DELIP_WALK(ctree, c, dtree);
4042 +
4043 + if (dtree->expires[d]) {
4044 + dtree->expires[d] = 0;
4045 + return 0;
4046 + }
4047 + return -EEXIST;
4048 +}
4049 +
4050 +static int
4051 +delip(struct ip_set *set, const void *data, size_t size,
4052 + ip_set_ip_t *hash_ip)
4053 +{
4054 + struct ip_set_req_iptree *req =
4055 + (struct ip_set_req_iptree *) data;
4056 +
4057 + if (size != sizeof(struct ip_set_req_iptree)) {
4058 + ip_set_printk("data length wrong (want %zu, have %zu)",
4059 + sizeof(struct ip_set_req_iptree),
4060 + size);
4061 + return -EINVAL;
4062 + }
4063 + return __delip(set, req->ip, hash_ip);
4064 +}
4065 +
4066 +static int
4067 +delip_kernel(struct ip_set *set, const struct sk_buff *skb,
4068 + u_int32_t flags, ip_set_ip_t *hash_ip)
4069 +{
4070 + return __delip(set,
4071 + ntohl(flags & IPSET_SRC ? skb->nh.iph->saddr
4072 + : skb->nh.iph->daddr),
4073 + hash_ip);
4074 +}
4075 +
4076 +#define LOOP_WALK_BEGIN(map, i, branch) \
4077 + for (i = 0; i < 255; i++) { \
4078 + if (!(map)->tree[i]) \
4079 + continue; \
4080 + branch = (map)->tree[i]
4081 +
4082 +#define LOOP_WALK_END }
4083 +
4084 +static void ip_tree_gc(unsigned long ul_set)
4085 +{
4086 + struct ip_set *set = (void *) ul_set;
4087 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4088 + struct ip_set_iptreeb *btree;
4089 + struct ip_set_iptreec *ctree;
4090 + struct ip_set_iptreed *dtree;
4091 + unsigned char a,b,c,d;
4092 + unsigned char i,j,k;
4093 +
4094 + i = j = k = 0;
4095 + DP("gc: %s", set->name);
4096 + write_lock_bh(&set->lock);
4097 + LOOP_WALK_BEGIN(map, a, btree);
4098 + LOOP_WALK_BEGIN(btree, b, ctree);
4099 + LOOP_WALK_BEGIN(ctree, c, dtree);
4100 + for (d = 0; d < 255; d++) {
4101 + if (dtree->expires[d]) {
4102 + DP("gc: %u %u %u %u: expires %lu jiffies %lu",
4103 + a, b, c, d,
4104 + dtree->expires[d], jiffies);
4105 + if (map->timeout
4106 + && time_before(dtree->expires[d], jiffies))
4107 + dtree->expires[d] = 0;
4108 + else
4109 + k = 1;
4110 + }
4111 + }
4112 + if (k == 0) {
4113 + DP("gc: %s: leaf %u %u %u empty",
4114 + set->name, a, b, c);
4115 + kmem_cache_free(leaf_cachep, dtree);
4116 + ctree->tree[c] = NULL;
4117 + } else {
4118 + DP("gc: %s: leaf %u %u %u not empty",
4119 + set->name, a, b, c);
4120 + j = 1;
4121 + k = 0;
4122 + }
4123 + LOOP_WALK_END;
4124 + if (j == 0) {
4125 + DP("gc: %s: branch %u %u empty",
4126 + set->name, a, b);
4127 + kmem_cache_free(branch_cachep, ctree);
4128 + btree->tree[b] = NULL;
4129 + } else {
4130 + DP("gc: %s: branch %u %u not empty",
4131 + set->name, a, b);
4132 + i = 1;
4133 + j = k = 0;
4134 + }
4135 + LOOP_WALK_END;
4136 + if (i == 0) {
4137 + DP("gc: %s: branch %u empty",
4138 + set->name, a);
4139 + kmem_cache_free(branch_cachep, btree);
4140 + map->tree[a] = NULL;
4141 + } else {
4142 + DP("gc: %s: branch %u not empty",
4143 + set->name, a);
4144 + i = j = k = 0;
4145 + }
4146 + LOOP_WALK_END;
4147 + write_unlock_bh(&set->lock);
4148 +
4149 + map->gc.expires = jiffies + map->gc_interval * HZ;
4150 + add_timer(&map->gc);
4151 +}
4152 +
4153 +static int create(struct ip_set *set, const void *data, size_t size)
4154 +{
4155 + struct ip_set_req_iptree_create *req =
4156 + (struct ip_set_req_iptree_create *) data;
4157 + struct ip_set_iptree *map;
4158 +
4159 + if (size != sizeof(struct ip_set_req_iptree_create)) {
4160 + ip_set_printk("data length wrong (want %zu, have %zu)",
4161 + sizeof(struct ip_set_req_iptree_create),
4162 + size);
4163 + return -EINVAL;
4164 + }
4165 +
4166 + map = kmalloc(sizeof(struct ip_set_iptree), GFP_KERNEL);
4167 + if (!map) {
4168 + DP("out of memory for %d bytes",
4169 + sizeof(struct ip_set_iptree));
4170 + return -ENOMEM;
4171 + }
4172 + memset(map, 0, sizeof(*map));
4173 + map->timeout = req->timeout;
4174 + set->data = map;
4175 +
4176 + /* If there is no timeout for the entries,
4177 + * we still have to call gc because delete
4178 + * do not clean up empty branches */
4179 + map->gc_interval = IPTREE_GC_TIME;
4180 + init_timer(&map->gc);
4181 + map->gc.data = (unsigned long) set;
4182 + map->gc.function = ip_tree_gc;
4183 + map->gc.expires = jiffies + map->gc_interval * HZ;
4184 + add_timer(&map->gc);
4185 +
4186 + return 0;
4187 +}
4188 +
4189 +static void __flush(struct ip_set_iptree *map)
4190 +{
4191 + struct ip_set_iptreeb *btree;
4192 + struct ip_set_iptreec *ctree;
4193 + struct ip_set_iptreed *dtree;
4194 + unsigned int a,b,c;
4195 +
4196 + LOOP_WALK_BEGIN(map, a, btree);
4197 + LOOP_WALK_BEGIN(btree, b, ctree);
4198 + LOOP_WALK_BEGIN(ctree, c, dtree);
4199 + kmem_cache_free(leaf_cachep, dtree);
4200 + LOOP_WALK_END;
4201 + kmem_cache_free(branch_cachep, ctree);
4202 + LOOP_WALK_END;
4203 + kmem_cache_free(branch_cachep, btree);
4204 + LOOP_WALK_END;
4205 +}
4206 +
4207 +static void destroy(struct ip_set *set)
4208 +{
4209 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4210 +
4211 + while (!del_timer(&map->gc))
4212 + msleep(IPTREE_DESTROY_SLEEP);
4213 + __flush(map);
4214 + kfree(map);
4215 + set->data = NULL;
4216 +}
4217 +
4218 +static void flush(struct ip_set *set)
4219 +{
4220 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4221 + unsigned int timeout = map->timeout;
4222 +
4223 + __flush(map);
4224 + memset(map, 0, sizeof(*map));
4225 + map->timeout = timeout;
4226 +}
4227 +
4228 +static void list_header(const struct ip_set *set, void *data)
4229 +{
4230 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4231 + struct ip_set_req_iptree_create *header =
4232 + (struct ip_set_req_iptree_create *) data;
4233 +
4234 + header->timeout = map->timeout;
4235 +}
4236 +
4237 +static int list_members_size(const struct ip_set *set)
4238 +{
4239 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4240 + struct ip_set_iptreeb *btree;
4241 + struct ip_set_iptreec *ctree;
4242 + struct ip_set_iptreed *dtree;
4243 + unsigned char a,b,c,d;
4244 + unsigned int count = 0;
4245 +
4246 + LOOP_WALK_BEGIN(map, a, btree);
4247 + LOOP_WALK_BEGIN(btree, b, ctree);
4248 + LOOP_WALK_BEGIN(ctree, c, dtree);
4249 + for (d = 0; d < 255; d++) {
4250 + if (dtree->expires[d]
4251 + && (!map->timeout || time_after(dtree->expires[d], jiffies)))
4252 + count++;
4253 + }
4254 + LOOP_WALK_END;
4255 + LOOP_WALK_END;
4256 + LOOP_WALK_END;
4257 +
4258 + DP("members %u", count);
4259 + return (count * sizeof(struct ip_set_req_iptree));
4260 +}
4261 +
4262 +static void list_members(const struct ip_set *set, void *data)
4263 +{
4264 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4265 + struct ip_set_iptreeb *btree;
4266 + struct ip_set_iptreec *ctree;
4267 + struct ip_set_iptreed *dtree;
4268 + unsigned char a,b,c,d;
4269 + size_t offset = 0;
4270 + struct ip_set_req_iptree *entry;
4271 +
4272 + LOOP_WALK_BEGIN(map, a, btree);
4273 + LOOP_WALK_BEGIN(btree, b, ctree);
4274 + LOOP_WALK_BEGIN(ctree, c, dtree);
4275 + for (d = 0; d < 255; d++) {
4276 + if (dtree->expires[d]
4277 + && (!map->timeout || time_after(dtree->expires[d], jiffies))) {
4278 + entry = (struct ip_set_req_iptree *)(data + offset);
4279 + entry->ip = ((a << 24) | (b << 16) | (c << 8) | d);
4280 + entry->timeout = !map->timeout ? 0
4281 + : (dtree->expires[d] - jiffies)/HZ;
4282 + offset += sizeof(struct ip_set_req_iptree);
4283 + }
4284 + }
4285 + LOOP_WALK_END;
4286 + LOOP_WALK_END;
4287 + LOOP_WALK_END;
4288 +}
4289 +
4290 +static struct ip_set_type ip_set_iptree = {
4291 + .typename = SETTYPE_NAME,
4292 + .typecode = IPSET_TYPE_IP,
4293 + .protocol_version = IP_SET_PROTOCOL_VERSION,
4294 + .create = &create,
4295 + .destroy = &destroy,
4296 + .flush = &flush,
4297 + .reqsize = sizeof(struct ip_set_req_iptree),
4298 + .addip = &addip,
4299 + .addip_kernel = &addip_kernel,
4300 + .delip = &delip,
4301 + .delip_kernel = &delip_kernel,
4302 + .testip = &testip,
4303 + .testip_kernel = &testip_kernel,
4304 + .header_size = sizeof(struct ip_set_req_iptree_create),
4305 + .list_header = &list_header,
4306 + .list_members_size = &list_members_size,
4307 + .list_members = &list_members,
4308 + .me = THIS_MODULE,
4309 +};
4310 +
4311 +MODULE_LICENSE("GPL");
4312 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
4313 +MODULE_DESCRIPTION("iptree type of IP sets");
4314 +
4315 +static int __init init(void)
4316 +{
4317 + int ret;
4318 +
4319 + branch_cachep = kmem_cache_create("ip_set_iptreeb",
4320 + sizeof(struct ip_set_iptreeb),
4321 + 0, 0, NULL, NULL);
4322 + if (!branch_cachep) {
4323 + printk(KERN_ERR "Unable to create ip_set_iptreeb slab cache\n");
4324 + ret = -ENOMEM;
4325 + goto out;
4326 + }
4327 + leaf_cachep = kmem_cache_create("ip_set_iptreed",
4328 + sizeof(struct ip_set_iptreed),
4329 + 0, 0, NULL, NULL);
4330 + if (!leaf_cachep) {
4331 + printk(KERN_ERR "Unable to create ip_set_iptreed slab cache\n");
4332 + ret = -ENOMEM;
4333 + goto free_branch;
4334 + }
4335 + ret = ip_set_register_set_type(&ip_set_iptree);
4336 + if (ret == 0)
4337 + goto out;
4338 +
4339 + kmem_cache_destroy(leaf_cachep);
4340 + free_branch:
4341 + kmem_cache_destroy(branch_cachep);
4342 + out:
4343 + return ret;
4344 +}
4345 +
4346 +static void __exit fini(void)
4347 +{
4348 + /* FIXME: possible race with ip_set_create() */
4349 + ip_set_unregister_set_type(&ip_set_iptree);
4350 + kmem_cache_destroy(leaf_cachep);
4351 + kmem_cache_destroy(branch_cachep);
4352 +}
4353 +
4354 +module_init(init);
4355 +module_exit(fini);
4356 diff -urN linux-2.6.19.old/net/ipv4/netfilter/ip_set_macipmap.c linux-2.6.19.dev/net/ipv4/netfilter/ip_set_macipmap.c
4357 --- linux-2.6.19.old/net/ipv4/netfilter/ip_set_macipmap.c 1970-01-01 01:00:00.000000000 +0100
4358 +++ linux-2.6.19.dev/net/ipv4/netfilter/ip_set_macipmap.c 2006-12-14 03:13:43.000000000 +0100
4359 @@ -0,0 +1,338 @@
4360 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
4361 + * Patrick Schaaf <bof@bof.de>
4362 + * Martin Josefsson <gandalf@wlug.westbo.se>
4363 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
4364 + *
4365 + * This program is free software; you can redistribute it and/or modify
4366 + * it under the terms of the GNU General Public License version 2 as
4367 + * published by the Free Software Foundation.
4368 + */
4369 +
4370 +/* Kernel module implementing an IP set type: the macipmap type */
4371 +
4372 +#include <linux/module.h>
4373 +#include <linux/ip.h>
4374 +#include <linux/skbuff.h>
4375 +#include <linux/netfilter_ipv4/ip_tables.h>
4376 +#include <linux/netfilter_ipv4/ip_set.h>
4377 +#include <linux/errno.h>
4378 +#include <asm/uaccess.h>
4379 +#include <asm/bitops.h>
4380 +#include <linux/spinlock.h>
4381 +#include <linux/if_ether.h>
4382 +#include <linux/vmalloc.h>
4383 +
4384 +#include <linux/netfilter_ipv4/ip_set_malloc.h>
4385 +#include <linux/netfilter_ipv4/ip_set_macipmap.h>
4386 +
4387 +static int
4388 +testip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
4389 +{
4390 + struct ip_set_macipmap *map = (struct ip_set_macipmap *) set->data;
4391 + struct ip_set_macip *table = (struct ip_set_macip *) map->members;
4392 + struct ip_set_req_macipmap *req = (struct ip_set_req_macipmap *) data;
4393 +
4394 + if (size != sizeof(struct ip_set_req_macipmap)) {
4395 + ip_set_printk("data length wrong (want %zu, have %zu)",
4396 + sizeof(struct ip_set_req_macipmap),
4397 + size);
4398 + return -EINVAL;
4399 + }
4400 +
4401 + if (req->ip < map->first_ip || req->ip > map->last_ip)
4402 + return -ERANGE;
4403 +
4404 + *hash_ip = req->ip;
4405 + DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
4406 + set->name, HIPQUAD(req->ip), HIPQUAD(*hash_ip));
4407 + if (test_bit(IPSET_MACIP_ISSET,
4408 + (void *) &table[req->ip - map->first_ip].flags)) {
4409 + return (memcmp(req->ethernet,
4410 + &table[req->ip - map->first_ip].ethernet,
4411 + ETH_ALEN) == 0);
4412 + } else {
4413 + return (map->flags & IPSET_MACIP_MATCHUNSET ? 1 : 0);
4414 + }
4415 +}
4416 +
4417 +static int
4418 +testip_kernel(struct ip_set *set, const struct sk_buff *skb,
4419 + u_int32_t flags, ip_set_ip_t *hash_ip)
4420 +{
4421 + struct ip_set_macipmap *map =
4422 + (struct ip_set_macipmap *) set->data;
4423 + struct ip_set_macip *table =
4424 + (struct ip_set_macip *) map->members;
4425 + ip_set_ip_t ip;
4426 +
4427 + ip = ntohl(flags & IPSET_SRC ? skb->nh.iph->saddr
4428 + : skb->nh.iph->daddr);
4429 + DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
4430 + flags & IPSET_SRC ? "SRC" : "DST",
4431 + NIPQUAD(skb->nh.iph->saddr),
4432 + NIPQUAD(skb->nh.iph->daddr));
4433 +
4434 + if (ip < map->first_ip || ip > map->last_ip)
4435 + return 0;
4436 +
4437 + *hash_ip = ip;
4438 + DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
4439 + set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
4440 + if (test_bit(IPSET_MACIP_ISSET,
4441 + (void *) &table[ip - map->first_ip].flags)) {
4442 + /* Is mac pointer valid?
4443 + * If so, compare... */
4444 + return (skb->mac.raw >= skb->head
4445 + && (skb->mac.raw + ETH_HLEN) <= skb->data
4446 + && (memcmp(eth_hdr(skb)->h_source,
4447 + &table[ip - map->first_ip].ethernet,
4448 + ETH_ALEN) == 0));
4449 + } else {
4450 + return (map->flags & IPSET_MACIP_MATCHUNSET ? 1 : 0);
4451 + }
4452 +}
4453 +
4454 +/* returns 0 on success */
4455 +static inline int
4456 +__addip(struct ip_set *set,
4457 + ip_set_ip_t ip, unsigned char *ethernet, ip_set_ip_t *hash_ip)
4458 +{
4459 + struct ip_set_macipmap *map =
4460 + (struct ip_set_macipmap *) set->data;
4461 + struct ip_set_macip *table =
4462 + (struct ip_set_macip *) map->members;
4463 +
4464 + if (ip < map->first_ip || ip > map->last_ip)
4465 + return -ERANGE;
4466 + if (test_and_set_bit(IPSET_MACIP_ISSET,
4467 + (void *) &table[ip - map->first_ip].flags))
4468 + return -EEXIST;
4469 +
4470 + *hash_ip = ip;
4471 + DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
4472 + memcpy(&table[ip - map->first_ip].ethernet, ethernet, ETH_ALEN);
4473 + return 0;
4474 +}
4475 +
4476 +static int
4477 +addip(struct ip_set *set, const void *data, size_t size,
4478 + ip_set_ip_t *hash_ip)
4479 +{
4480 + struct ip_set_req_macipmap *req =
4481 + (struct ip_set_req_macipmap *) data;
4482 +
4483 + if (size != sizeof(struct ip_set_req_macipmap)) {
4484 + ip_set_printk("data length wrong (want %zu, have %zu)",
4485 + sizeof(struct ip_set_req_macipmap),
4486 + size);
4487 + return -EINVAL;
4488 + }
4489 + return __addip(set, req->ip, req->ethernet, hash_ip);
4490 +}
4491 +
4492 +static int
4493 +addip_kernel(struct ip_set *set, const struct sk_buff *skb,
4494 + u_int32_t flags, ip_set_ip_t *hash_ip)
4495 +{
4496 + ip_set_ip_t ip;
4497 +
4498 + ip = ntohl(flags & IPSET_SRC ? skb->nh.iph->saddr
4499 + : skb->nh.iph->daddr);
4500 +
4501 + if (!(skb->mac.raw >= skb->head
4502 + && (skb->mac.raw + ETH_HLEN) <= skb->data))
4503 + return -EINVAL;
4504 +
4505 + return __addip(set, ip, eth_hdr(skb)->h_source, hash_ip);
4506 +}
4507 +
4508 +static inline int
4509 +__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
4510 +{
4511 + struct ip_set_macipmap *map =
4512 + (struct ip_set_macipmap *) set->data;
4513 + struct ip_set_macip *table =
4514 + (struct ip_set_macip *) map->members;
4515 +
4516 + if (ip < map->first_ip || ip > map->last_ip)
4517 + return -ERANGE;
4518 + if (!test_and_clear_bit(IPSET_MACIP_ISSET,
4519 + (void *)&table[ip - map->first_ip].flags))
4520 + return -EEXIST;
4521 +
4522 + *hash_ip = ip;
4523 + DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
4524 + return 0;
4525 +}
4526 +
4527 +static int
4528 +delip(struct ip_set *set, const void *data, size_t size,
4529 + ip_set_ip_t *hash_ip)
4530 +{
4531 + struct ip_set_req_macipmap *req =
4532 + (struct ip_set_req_macipmap *) data;
4533 +
4534 + if (size != sizeof(struct ip_set_req_macipmap)) {
4535 + ip_set_printk("data length wrong (want %zu, have %zu)",
4536 + sizeof(struct ip_set_req_macipmap),
4537 + size);
4538 + return -EINVAL;
4539 + }
4540 + return __delip(set, req->ip, hash_ip);
4541 +}
4542 +
4543 +static int
4544 +delip_kernel(struct ip_set *set, const struct sk_buff *skb,
4545 + u_int32_t flags, ip_set_ip_t *hash_ip)
4546 +{
4547 + return __delip(set,
4548 + ntohl(flags & IPSET_SRC ? skb->nh.iph->saddr
4549 + : skb->nh.iph->daddr),
4550 + hash_ip);
4551 +}
4552 +
4553 +static inline size_t members_size(ip_set_id_t from, ip_set_id_t to)
4554 +{
4555 + return (size_t)((to - from + 1) * sizeof(struct ip_set_macip));
4556 +}
4557 +
4558 +static int create(struct ip_set *set, const void *data, size_t size)
4559 +{
4560 + int newbytes;
4561 + struct ip_set_req_macipmap_create *req =
4562 + (struct ip_set_req_macipmap_create *) data;
4563 + struct ip_set_macipmap *map;
4564 +
4565 + if (size != sizeof(struct ip_set_req_macipmap_create)) {
4566 + ip_set_printk("data length wrong (want %zu, have %zu)",
4567 + sizeof(struct ip_set_req_macipmap_create),
4568 + size);
4569 + return -EINVAL;
4570 + }
4571 +
4572 + DP("from %u.%u.%u.%u to %u.%u.%u.%u",
4573 + HIPQUAD(req->from), HIPQUAD(req->to));
4574 +
4575 + if (req->from > req->to) {
4576 + DP("bad ip range");
4577 + return -ENOEXEC;
4578 + }
4579 +
4580 + if (req->to - req->from > MAX_RANGE) {
4581 + ip_set_printk("range too big (max %d addresses)",
4582 + MAX_RANGE);
4583 + return -ENOEXEC;
4584 + }
4585 +
4586 + map = kmalloc(sizeof(struct ip_set_macipmap), GFP_KERNEL);
4587 + if (!map) {
4588 + DP("out of memory for %d bytes",
4589 + sizeof(struct ip_set_macipmap));
4590 + return -ENOMEM;
4591 + }
4592 + map->flags = req->flags;
4593 + map->first_ip = req->from;
4594 + map->last_ip = req->to;
4595 + newbytes = members_size(map->first_ip, map->last_ip);
4596 + map->members = ip_set_malloc(newbytes);
4597 + if (!map->members) {
4598 + DP("out of memory for %d bytes", newbytes);
4599 + kfree(map);
4600 + return -ENOMEM;
4601 + }
4602 + memset(map->members, 0, newbytes);
4603 +
4604 + set->data = map;
4605 + return 0;
4606 +}
4607 +
4608 +static void destroy(struct ip_set *set)
4609 +{
4610 + struct ip_set_macipmap *map =
4611 + (struct ip_set_macipmap *) set->data;
4612 +
4613 + ip_set_free(map->members, members_size(map->first_ip, map->last_ip));
4614 + kfree(map);
4615 +
4616 + set->data = NULL;
4617 +}
4618 +
4619 +static void flush(struct ip_set *set)
4620 +{
4621 + struct ip_set_macipmap *map =
4622 + (struct ip_set_macipmap *) set->data;
4623 + memset(map->members, 0, members_size(map->first_ip, map->last_ip));
4624 +}
4625 +
4626 +static void list_header(const struct ip_set *set, void *data)
4627 +{
4628 + struct ip_set_macipmap *map =
4629 + (struct ip_set_macipmap *) set->data;
4630 + struct ip_set_req_macipmap_create *header =
4631 + (struct ip_set_req_macipmap_create *) data;
4632 +
4633 + DP("list_header %x %x %u", map->first_ip, map->last_ip,
4634 + map->flags);
4635 +
4636 + header->from = map->first_ip;
4637 + header->to = map->last_ip;
4638 + header->flags = map->flags;
4639 +}
4640 +
4641 +static int list_members_size(const struct ip_set *set)
4642 +{
4643 + struct ip_set_macipmap *map =
4644 + (struct ip_set_macipmap *) set->data;
4645 +
4646 + return members_size(map->first_ip, map->last_ip);
4647 +}
4648 +
4649 +static void list_members(const struct ip_set *set, void *data)
4650 +{
4651 + struct ip_set_macipmap *map =
4652 + (struct ip_set_macipmap *) set->data;
4653 +
4654 + int bytes = members_size(map->first_ip, map->last_ip);
4655 +
4656 + memcpy(data, map->members, bytes);
4657 +}
4658 +
4659 +static struct ip_set_type ip_set_macipmap = {
4660 + .typename = SETTYPE_NAME,
4661 + .typecode = IPSET_TYPE_IP,
4662 + .protocol_version = IP_SET_PROTOCOL_VERSION,
4663 + .create = &create,
4664 + .destroy = &destroy,
4665 + .flush = &flush,
4666 + .reqsize = sizeof(struct ip_set_req_macipmap),
4667 + .addip = &addip,
4668 + .addip_kernel = &addip_kernel,
4669 + .delip = &delip,
4670 + .delip_kernel = &delip_kernel,
4671 + .testip = &testip,
4672 + .testip_kernel = &testip_kernel,
4673 + .header_size = sizeof(struct ip_set_req_macipmap_create),
4674 + .list_header = &list_header,
4675 + .list_members_size = &list_members_size,
4676 + .list_members = &list_members,
4677 + .me = THIS_MODULE,
4678 +};
4679 +
4680 +MODULE_LICENSE("GPL");
4681 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
4682 +MODULE_DESCRIPTION("macipmap type of IP sets");
4683 +
4684 +static int __init init(void)
4685 +{
4686 + init_max_malloc_size();
4687 + return ip_set_register_set_type(&ip_set_macipmap);
4688 +}
4689 +
4690 +static void __exit fini(void)
4691 +{
4692 + /* FIXME: possible race with ip_set_create() */
4693 + ip_set_unregister_set_type(&ip_set_macipmap);
4694 +}
4695 +
4696 +module_init(init);
4697 +module_exit(fini);
4698 diff -urN linux-2.6.19.old/net/ipv4/netfilter/ip_set_nethash.c linux-2.6.19.dev/net/ipv4/netfilter/ip_set_nethash.c
4699 --- linux-2.6.19.old/net/ipv4/netfilter/ip_set_nethash.c 1970-01-01 01:00:00.000000000 +0100
4700 +++ linux-2.6.19.dev/net/ipv4/netfilter/ip_set_nethash.c 2006-12-14 03:13:43.000000000 +0100
4701 @@ -0,0 +1,449 @@
4702 +/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
4703 + *
4704 + * This program is free software; you can redistribute it and/or modify
4705 + * it under the terms of the GNU General Public License version 2 as
4706 + * published by the Free Software Foundation.
4707 + */
4708 +
4709 +/* Kernel module implementing a cidr nethash set */
4710 +
4711 +#include <linux/module.h>
4712 +#include <linux/ip.h>
4713 +#include <linux/skbuff.h>
4714 +#include <linux/netfilter_ipv4/ip_tables.h>
4715 +#include <linux/netfilter_ipv4/ip_set.h>
4716 +#include <linux/errno.h>
4717 +#include <asm/uaccess.h>
4718 +#include <asm/bitops.h>
4719 +#include <linux/spinlock.h>
4720 +#include <linux/vmalloc.h>
4721 +#include <linux/random.h>
4722 +
4723 +#include <net/ip.h>
4724 +
4725 +#include <linux/netfilter_ipv4/ip_set_malloc.h>
4726 +#include <linux/netfilter_ipv4/ip_set_nethash.h>
4727 +#include <linux/netfilter_ipv4/ip_set_jhash.h>
4728 +#include <linux/netfilter_ipv4/ip_set_prime.h>
4729 +
4730 +static inline __u32
4731 +jhash_ip(const struct ip_set_nethash *map, ip_set_ip_t ip)
4732 +{
4733 + return jhash_1word(ip, map->initval);
4734 +}
4735 +
4736 +static inline __u32
4737 +randhash_ip(const struct ip_set_nethash *map, ip_set_ip_t ip)
4738 +{
4739 + return (1 + ip % map->prime);
4740 +}
4741 +
4742 +static inline __u32
4743 +hash_id_cidr(struct ip_set_nethash *map,
4744 + ip_set_ip_t ip,
4745 + unsigned char cidr,
4746 + ip_set_ip_t *hash_ip)
4747 +{
4748 + __u32 jhash, randhash, id;
4749 + u_int16_t i;
4750 +
4751 + *hash_ip = pack(ip, cidr);
4752 + jhash = jhash_ip(map, *hash_ip);
4753 + randhash = randhash_ip(map, *hash_ip);
4754 +
4755 + for (i = 0; i < map->probes; i++) {
4756 + id = (jhash + i * randhash) % map->hashsize;
4757 + DP("hash key: %u", id);
4758 + if (map->members[id] == *hash_ip)
4759 + return id;
4760 + }
4761 + return UINT_MAX;
4762 +}
4763 +
4764 +static inline __u32
4765 +hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
4766 +{
4767 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
4768 + __u32 id = UINT_MAX;
4769 + int i;
4770 +
4771 + for (i = 0; i < 30 && map->cidr[i]; i++) {
4772 + id = hash_id_cidr(map, ip, map->cidr[i], hash_ip);
4773 + if (id != UINT_MAX)
4774 + break;
4775 + }
4776 + return id;
4777 +}
4778 +
4779 +static inline int
4780 +__testip_cidr(struct ip_set *set, ip_set_ip_t ip, unsigned char cidr,
4781 + ip_set_ip_t *hash_ip)
4782 +{
4783 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
4784 +
4785 + return (hash_id_cidr(map, ip, cidr, hash_ip) != UINT_MAX);
4786 +}
4787 +
4788 +static inline int
4789 +__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
4790 +{
4791 + return (hash_id(set, ip, hash_ip) != UINT_MAX);
4792 +}
4793 +
4794 +static int
4795 +testip(struct ip_set *set, const void *data, size_t size,
4796 + ip_set_ip_t *hash_ip)
4797 +{
4798 + struct ip_set_req_nethash *req =
4799 + (struct ip_set_req_nethash *) data;
4800 +
4801 + if (size != sizeof(struct ip_set_req_nethash)) {
4802 + ip_set_printk("data length wrong (want %zu, have %zu)",
4803 + sizeof(struct ip_set_req_nethash),
4804 + size);
4805 + return -EINVAL;
4806 + }
4807 + return (req->cidr == 32 ? __testip(set, req->ip, hash_ip)
4808 + : __testip_cidr(set, req->ip, req->cidr, hash_ip));
4809 +}
4810 +
4811 +static int
4812 +testip_kernel(struct ip_set *set, const struct sk_buff *skb,
4813 + u_int32_t flags, ip_set_ip_t *hash_ip)
4814 +{
4815 + return __testip(set,
4816 + ntohl(flags & IPSET_SRC ? skb->nh.iph->saddr
4817 + : skb->nh.iph->daddr),
4818 + hash_ip);
4819 +}
4820 +
4821 +static inline int
4822 +__addip_base(struct ip_set_nethash *map, ip_set_ip_t ip)
4823 +{
4824 + __u32 jhash, randhash, probe;
4825 + u_int16_t i;
4826 +
4827 + jhash = jhash_ip(map, ip);
4828 + randhash = randhash_ip(map, ip);
4829 +
4830 + for (i = 0; i < map->probes; i++) {
4831 + probe = (jhash + i * randhash) % map->hashsize;
4832 + if (map->members[probe] == ip)
4833 + return -EEXIST;
4834 + if (!map->members[probe]) {
4835 + map->members[probe] = ip;
4836 + return 0;
4837 + }
4838 + }
4839 + /* Trigger rehashing */
4840 + return -EAGAIN;
4841 +}
4842 +
4843 +static inline int
4844 +__addip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
4845 + ip_set_ip_t *hash_ip)
4846 +{
4847 + *hash_ip = pack(ip, cidr);
4848 + DP("%u.%u.%u.%u/%u, %u.%u.%u.%u", HIPQUAD(ip), cidr, HIPQUAD(*hash_ip));
4849 +
4850 + return __addip_base(map, *hash_ip);
4851 +}
4852 +
4853 +static void
4854 +update_cidr_sizes(struct ip_set_nethash *map, unsigned char cidr)
4855 +{
4856 + unsigned char next;
4857 + int i;
4858 +
4859 + for (i = 0; i < 30 && map->cidr[i]; i++) {
4860 + if (map->cidr[i] == cidr) {
4861 + return;
4862 + } else if (map->cidr[i] < cidr) {
4863 + next = map->cidr[i];
4864 + map->cidr[i] = cidr;
4865 + cidr = next;
4866 + }
4867 + }
4868 + if (i < 30)
4869 + map->cidr[i] = cidr;
4870 +}
4871 +
4872 +static int
4873 +addip(struct ip_set *set, const void *data, size_t size,
4874 + ip_set_ip_t *hash_ip)
4875 +{
4876 + struct ip_set_req_nethash *req =
4877 + (struct ip_set_req_nethash *) data;
4878 + int ret;
4879 +
4880 + if (size != sizeof(struct ip_set_req_nethash)) {
4881 + ip_set_printk("data length wrong (want %zu, have %zu)",
4882 + sizeof(struct ip_set_req_nethash),
4883 + size);
4884 + return -EINVAL;
4885 + }
4886 + ret = __addip((struct ip_set_nethash *) set->data,
4887 + req->ip, req->cidr, hash_ip);
4888 +
4889 + if (ret == 0)
4890 + update_cidr_sizes((struct ip_set_nethash *) set->data,
4891 + req->cidr);
4892 +
4893 + return ret;
4894 +}
4895 +
4896 +static int
4897 +addip_kernel(struct ip_set *set, const struct sk_buff *skb,
4898 + u_int32_t flags, ip_set_ip_t *hash_ip)
4899 +{
4900 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
4901 + int ret = -ERANGE;
4902 + ip_set_ip_t ip = ntohl(flags & IPSET_SRC ? skb->nh.iph->saddr
4903 + : skb->nh.iph->daddr);
4904 +
4905 + if (map->cidr[0])
4906 + ret = __addip(map, ip, map->cidr[0], hash_ip);
4907 +
4908 + return ret;
4909 +}
4910 +
4911 +static int retry(struct ip_set *set)
4912 +{
4913 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
4914 + ip_set_ip_t *members;
4915 + u_int32_t i, hashsize;
4916 + unsigned newbytes;
4917 + int res;
4918 + struct ip_set_nethash tmp = {
4919 + .hashsize = map->hashsize,
4920 + .probes = map->probes,
4921 + .resize = map->resize
4922 + };
4923 +
4924 + if (map->resize == 0)
4925 + return -ERANGE;
4926 +
4927 + memcpy(tmp.cidr, map->cidr, 30 * sizeof(unsigned char));
4928 + again:
4929 + res = 0;
4930 +
4931 + /* Calculate new parameters */
4932 + get_random_bytes(&tmp.initval, 4);
4933 + hashsize = tmp.hashsize + (tmp.hashsize * map->resize)/100;
4934 + if (hashsize == tmp.hashsize)
4935 + hashsize++;
4936 + tmp.prime = make_prime(hashsize);
4937 +
4938 + ip_set_printk("rehashing of set %s triggered: "
4939 + "hashsize grows from %u to %u",
4940 + set->name, tmp.hashsize, hashsize);
4941 + tmp.hashsize = hashsize;
4942 +
4943 + newbytes = hashsize * sizeof(ip_set_ip_t);
4944 + tmp.members = ip_set_malloc_atomic(newbytes);
4945 + if (!tmp.members) {
4946 + DP("out of memory for %d bytes", newbytes);
4947 + return -ENOMEM;
4948 + }
4949 + memset(tmp.members, 0, newbytes);
4950 +
4951 + write_lock_bh(&set->lock);
4952 + map = (struct ip_set_nethash *) set->data; /* Play safe */
4953 + for (i = 0; i < map->hashsize && res == 0; i++) {
4954 + if (map->members[i])
4955 + res = __addip_base(&tmp, map->members[i]);
4956 + }
4957 + if (res) {
4958 + /* Failure, try again */
4959 + write_unlock_bh(&set->lock);
4960 + ip_set_free(tmp.members, newbytes);
4961 + goto again;
4962 + }
4963 +
4964 + /* Success at resizing! */
4965 + members = map->members;
4966 + hashsize = map->hashsize;
4967 +
4968 + map->initval = tmp.initval;
4969 + map->prime = tmp.prime;
4970 + map->hashsize = tmp.hashsize;
4971 + map->members = tmp.members;
4972 + write_unlock_bh(&set->lock);
4973 +
4974 + ip_set_free(members, hashsize * sizeof(ip_set_ip_t));
4975 +
4976 + return 0;
4977 +}
4978 +
4979 +static inline int
4980 +__delip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
4981 + ip_set_ip_t *hash_ip)
4982 +{
4983 + ip_set_ip_t id = hash_id_cidr(map, ip, cidr, hash_ip);
4984 +
4985 + if (id == UINT_MAX)
4986 + return -EEXIST;
4987 +
4988 + map->members[id] = 0;
4989 + return 0;
4990 +}
4991 +
4992 +static int
4993 +delip(struct ip_set *set, const void *data, size_t size,
4994 + ip_set_ip_t *hash_ip)
4995 +{
4996 + struct ip_set_req_nethash *req =
4997 + (struct ip_set_req_nethash *) data;
4998 +
4999 + if (size != sizeof(struct ip_set_req_nethash)) {
5000 + ip_set_printk("data length wrong (want %zu, have %zu)",
5001 + sizeof(struct ip_set_req_nethash),
5002 + size);
5003 + return -EINVAL;
5004 + }
5005 + /* TODO: no garbage collection in map->cidr */
5006 + return __delip((struct ip_set_nethash *) set->data,
5007 + req->ip, req->cidr, hash_ip);
5008 +}
5009 +
5010 +static int
5011 +delip_kernel(struct ip_set *set, const struct sk_buff *skb,
5012 + u_int32_t flags, ip_set_ip_t *hash_ip)
5013 +{
5014 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
5015 + int ret = -ERANGE;
5016 + ip_set_ip_t ip = ntohl(flags & IPSET_SRC ? skb->nh.iph->saddr
5017 + : skb->nh.iph->daddr);
5018 +
5019 + if (map->cidr[0])
5020 + ret = __delip(map, ip, map->cidr[0], hash_ip);
5021 +
5022 + return ret;
5023 +}
5024 +
5025 +static int create(struct ip_set *set, const void *data, size_t size)
5026 +{
5027 + unsigned newbytes;
5028 + struct ip_set_req_nethash_create *req =
5029 + (struct ip_set_req_nethash_create *) data;
5030 + struct ip_set_nethash *map;
5031 +
5032 + if (size != sizeof(struct ip_set_req_nethash_create)) {
5033 + ip_set_printk("data length wrong (want %zu, have %zu)",
5034 + sizeof(struct ip_set_req_nethash_create),
5035 + size);
5036 + return -EINVAL;
5037 + }
5038 +
5039 + if (req->hashsize < 1) {
5040 + ip_set_printk("hashsize too small");
5041 + return -ENOEXEC;
5042 + }
5043 +
5044 + map = kmalloc(sizeof(struct ip_set_nethash), GFP_KERNEL);
5045 + if (!map) {
5046 + DP("out of memory for %d bytes",
5047 + sizeof(struct ip_set_nethash));
5048 + return -ENOMEM;
5049 + }
5050 + get_random_bytes(&map->initval, 4);
5051 + map->prime = make_prime(req->hashsize);
5052 + map->hashsize = req->hashsize;
5053 + map->probes = req->probes;
5054 + map->resize = req->resize;
5055 + memset(map->cidr, 0, 30 * sizeof(unsigned char));
5056 + newbytes = map->hashsize * sizeof(ip_set_ip_t);
5057 + map->members = ip_set_malloc(newbytes);
5058 + if (!map->members) {
5059 + DP("out of memory for %d bytes", newbytes);
5060 + kfree(map);
5061 + return -ENOMEM;
5062 + }
5063 + memset(map->members, 0, newbytes);
5064 +
5065 + set->data = map;
5066 + return 0;
5067 +}
5068 +
5069 +static void destroy(struct ip_set *set)
5070 +{
5071 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
5072 +
5073 + ip_set_free(map->members, map->hashsize * sizeof(ip_set_ip_t));
5074 + kfree(map);
5075 +
5076 + set->data = NULL;
5077 +}
5078 +
5079 +static void flush(struct ip_set *set)
5080 +{
5081 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
5082 + memset(map->members, 0, map->hashsize * sizeof(ip_set_ip_t));
5083 + memset(map->cidr, 0, 30 * sizeof(unsigned char));
5084 +}
5085 +
5086 +static void list_header(const struct ip_set *set, void *data)
5087 +{
5088 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
5089 + struct ip_set_req_nethash_create *header =
5090 + (struct ip_set_req_nethash_create *) data;
5091 +
5092 + header->hashsize = map->hashsize;
5093 + header->probes = map->probes;
5094 + header->resize = map->resize;
5095 +}
5096 +
5097 +static int list_members_size(const struct ip_set *set)
5098 +{
5099 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
5100 +
5101 + return (map->hashsize * sizeof(ip_set_ip_t));
5102 +}
5103 +
5104 +static void list_members(const struct ip_set *set, void *data)
5105 +{
5106 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
5107 + int bytes = map->hashsize * sizeof(ip_set_ip_t);
5108 +
5109 + memcpy(data, map->members, bytes);
5110 +}
5111 +
5112 +static struct ip_set_type ip_set_nethash = {
5113 + .typename = SETTYPE_NAME,
5114 + .typecode = IPSET_TYPE_IP,
5115 + .protocol_version = IP_SET_PROTOCOL_VERSION,
5116 + .create = &create,
5117 + .destroy = &destroy,
5118 + .flush = &flush,
5119 + .reqsize = sizeof(struct ip_set_req_nethash),
5120 + .addip = &addip,
5121 + .addip_kernel = &addip_kernel,
5122 + .retry = &retry,
5123 + .delip = &delip,
5124 + .delip_kernel = &delip_kernel,
5125 + .testip = &testip,
5126 + .testip_kernel = &testip_kernel,
5127 + .header_size = sizeof(struct ip_set_req_nethash_create),
5128 + .list_header = &list_header,
5129 + .list_members_size = &list_members_size,
5130 + .list_members = &list_members,
5131 + .me = THIS_MODULE,
5132 +};
5133 +
5134 +MODULE_LICENSE("GPL");
5135 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
5136 +MODULE_DESCRIPTION("nethash type of IP sets");
5137 +
5138 +static int __init init(void)
5139 +{
5140 + return ip_set_register_set_type(&ip_set_nethash);
5141 +}
5142 +
5143 +static void __exit fini(void)
5144 +{
5145 + /* FIXME: possible race with ip_set_create() */
5146 + ip_set_unregister_set_type(&ip_set_nethash);
5147 +}
5148 +
5149 +module_init(init);
5150 +module_exit(fini);
5151 diff -urN linux-2.6.19.old/net/ipv4/netfilter/ip_set_portmap.c linux-2.6.19.dev/net/ipv4/netfilter/ip_set_portmap.c
5152 --- linux-2.6.19.old/net/ipv4/netfilter/ip_set_portmap.c 1970-01-01 01:00:00.000000000 +0100
5153 +++ linux-2.6.19.dev/net/ipv4/netfilter/ip_set_portmap.c 2006-12-14 03:13:43.000000000 +0100
5154 @@ -0,0 +1,325 @@
5155 +/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
5156 + *
5157 + * This program is free software; you can redistribute it and/or modify
5158 + * it under the terms of the GNU General Public License version 2 as
5159 + * published by the Free Software Foundation.
5160 + */
5161 +
5162 +/* Kernel module implementing a port set type as a bitmap */
5163 +
5164 +#include <linux/module.h>
5165 +#include <linux/ip.h>
5166 +#include <linux/tcp.h>
5167 +#include <linux/udp.h>
5168 +#include <linux/skbuff.h>
5169 +#include <linux/netfilter_ipv4/ip_tables.h>
5170 +#include <linux/netfilter_ipv4/ip_set.h>
5171 +#include <linux/errno.h>
5172 +#include <asm/uaccess.h>
5173 +#include <asm/bitops.h>
5174 +#include <linux/spinlock.h>
5175 +
5176 +#include <net/ip.h>
5177 +
5178 +#include <linux/netfilter_ipv4/ip_set_portmap.h>
5179 +
5180 +/* We must handle non-linear skbs */
5181 +static inline ip_set_ip_t
5182 +get_port(const struct sk_buff *skb, u_int32_t flags)
5183 +{
5184 + struct iphdr *iph = skb->nh.iph;
5185 + u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
5186 +
5187 + switch (iph->protocol) {
5188 + case IPPROTO_TCP: {
5189 + struct tcphdr tcph;
5190 +
5191 + /* See comments at tcp_match in ip_tables.c */
5192 + if (offset)
5193 + return INVALID_PORT;
5194 +
5195 + if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &tcph, sizeof(tcph)) < 0)
5196 + /* No choice either */
5197 + return INVALID_PORT;
5198 +
5199 + return ntohs(flags & IPSET_SRC ?
5200 + tcph.source : tcph.dest);
5201 + }
5202 + case IPPROTO_UDP: {
5203 + struct udphdr udph;
5204 +
5205 + if (offset)
5206 + return INVALID_PORT;
5207 +
5208 + if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &udph, sizeof(udph)) < 0)
5209 + /* No choice either */
5210 + return INVALID_PORT;
5211 +
5212 + return ntohs(flags & IPSET_SRC ?
5213 + udph.source : udph.dest);
5214 + }
5215 + default:
5216 + return INVALID_PORT;
5217 + }
5218 +}
5219 +
5220 +static inline int
5221 +__testport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
5222 +{
5223 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
5224 +
5225 + if (port < map->first_port || port > map->last_port)
5226 + return -ERANGE;
5227 +
5228 + *hash_port = port;
5229 + DP("set: %s, port:%u, %u", set->name, port, *hash_port);
5230 + return !!test_bit(port - map->first_port, map->members);
5231 +}
5232 +
5233 +static int
5234 +testport(struct ip_set *set, const void *data, size_t size,
5235 + ip_set_ip_t *hash_port)
5236 +{
5237 + struct ip_set_req_portmap *req =
5238 + (struct ip_set_req_portmap *) data;
5239 +
5240 + if (size != sizeof(struct ip_set_req_portmap)) {
5241 + ip_set_printk("data length wrong (want %zu, have %zu)",
5242 + sizeof(struct ip_set_req_portmap),
5243 + size);
5244 + return -EINVAL;
5245 + }
5246 + return __testport(set, req->port, hash_port);
5247 +}
5248 +
5249 +static int
5250 +testport_kernel(struct ip_set *set, const struct sk_buff *skb,
5251 + u_int32_t flags, ip_set_ip_t *hash_port)
5252 +{
5253 + int res;
5254 + ip_set_ip_t port = get_port(skb, flags);
5255 +
5256 + DP("flag %s port %u", flags & IPSET_SRC ? "SRC" : "DST", port);
5257 + if (port == INVALID_PORT)
5258 + return 0;
5259 +
5260 + res = __testport(set, port, hash_port);
5261 +
5262 + return (res < 0 ? 0 : res);
5263 +}
5264 +
5265 +static inline int
5266 +__addport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
5267 +{
5268 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
5269 +
5270 + if (port < map->first_port || port > map->last_port)
5271 + return -ERANGE;
5272 + if (test_and_set_bit(port - map->first_port, map->members))
5273 + return -EEXIST;
5274 +
5275 + *hash_port = port;
5276 + DP("port %u", port);
5277 + return 0;
5278 +}
5279 +
5280 +static int
5281 +addport(struct ip_set *set, const void *data, size_t size,
5282 + ip_set_ip_t *hash_port)
5283 +{
5284 + struct ip_set_req_portmap *req =
5285 + (struct ip_set_req_portmap *) data;
5286 +
5287 + if (size != sizeof(struct ip_set_req_portmap)) {
5288 + ip_set_printk("data length wrong (want %zu, have %zu)",
5289 + sizeof(struct ip_set_req_portmap),
5290 + size);
5291 + return -EINVAL;
5292 + }
5293 + return __addport(set, req->port, hash_port);
5294 +}
5295 +
5296 +static int
5297 +addport_kernel(struct ip_set *set, const struct sk_buff *skb,
5298 + u_int32_t flags, ip_set_ip_t *hash_port)
5299 +{
5300 + ip_set_ip_t port = get_port(skb, flags);
5301 +
5302 + if (port == INVALID_PORT)
5303 + return -EINVAL;
5304 +
5305 + return __addport(set, port, hash_port);
5306 +}
5307 +
5308 +static inline int
5309 +__delport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
5310 +{
5311 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
5312 +
5313 + if (port < map->first_port || port > map->last_port)
5314 + return -ERANGE;
5315 + if (!test_and_clear_bit(port - map->first_port, map->members))
5316 + return -EEXIST;
5317 +
5318 + *hash_port = port;
5319 + DP("port %u", port);
5320 + return 0;
5321 +}
5322 +
5323 +static int
5324 +delport(struct ip_set *set, const void *data, size_t size,
5325 + ip_set_ip_t *hash_port)
5326 +{
5327 + struct ip_set_req_portmap *req =
5328 + (struct ip_set_req_portmap *) data;
5329 +
5330 + if (size != sizeof(struct ip_set_req_portmap)) {
5331 + ip_set_printk("data length wrong (want %zu, have %zu)",
5332 + sizeof(struct ip_set_req_portmap),
5333 + size);
5334 + return -EINVAL;
5335 + }
5336 + return __delport(set, req->port, hash_port);
5337 +}
5338 +
5339 +static int
5340 +delport_kernel(struct ip_set *set, const struct sk_buff *skb,
5341 + u_int32_t flags, ip_set_ip_t *hash_port)
5342 +{
5343 + ip_set_ip_t port = get_port(skb, flags);
5344 +
5345 + if (port == INVALID_PORT)
5346 + return -EINVAL;
5347 +
5348 + return __delport(set, port, hash_port);
5349 +}
5350 +
5351 +static int create(struct ip_set *set, const void *data, size_t size)
5352 +{
5353 + int newbytes;
5354 + struct ip_set_req_portmap_create *req =
5355 + (struct ip_set_req_portmap_create *) data;
5356 + struct ip_set_portmap *map;
5357 +
5358 + if (size != sizeof(struct ip_set_req_portmap_create)) {
5359 + ip_set_printk("data length wrong (want %zu, have %zu)",
5360 + sizeof(struct ip_set_req_portmap_create),
5361 + size);
5362 + return -EINVAL;
5363 + }
5364 +
5365 + DP("from %u to %u", req->from, req->to);
5366 +
5367 + if (req->from > req->to) {
5368 + DP("bad port range");
5369 + return -ENOEXEC;
5370 + }
5371 +
5372 + if (req->to - req->from > MAX_RANGE) {
5373 + ip_set_printk("range too big (max %d ports)",
5374 + MAX_RANGE);
5375 + return -ENOEXEC;
5376 + }
5377 +
5378 + map = kmalloc(sizeof(struct ip_set_portmap), GFP_KERNEL);
5379 + if (!map) {
5380 + DP("out of memory for %d bytes",
5381 + sizeof(struct ip_set_portmap));
5382 + return -ENOMEM;
5383 + }
5384 + map->first_port = req->from;
5385 + map->last_port = req->to;
5386 + newbytes = bitmap_bytes(req->from, req->to);
5387 + map->members = kmalloc(newbytes, GFP_KERNEL);
5388 + if (!map->members) {
5389 + DP("out of memory for %d bytes", newbytes);
5390 + kfree(map);
5391 + return -ENOMEM;
5392 + }
5393 + memset(map->members, 0, newbytes);
5394 +
5395 + set->data = map;
5396 + return 0;
5397 +}
5398 +
5399 +static void destroy(struct ip_set *set)
5400 +{
5401 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
5402 +
5403 + kfree(map->members);
5404 + kfree(map);
5405 +
5406 + set->data = NULL;
5407 +}
5408 +
5409 +static void flush(struct ip_set *set)
5410 +{
5411 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
5412 + memset(map->members, 0, bitmap_bytes(map->first_port, map->last_port));
5413 +}
5414 +
5415 +static void list_header(const struct ip_set *set, void *data)
5416 +{
5417 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
5418 + struct ip_set_req_portmap_create *header =
5419 + (struct ip_set_req_portmap_create *) data;
5420 +
5421 + DP("list_header %u %u", map->first_port, map->last_port);
5422 +
5423 + header->from = map->first_port;
5424 + header->to = map->last_port;
5425 +}
5426 +
5427 +static int list_members_size(const struct ip_set *set)
5428 +{
5429 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
5430 +
5431 + return bitmap_bytes(map->first_port, map->last_port);
5432 +}
5433 +
5434 +static void list_members(const struct ip_set *set, void *data)
5435 +{
5436 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
5437 + int bytes = bitmap_bytes(map->first_port, map->last_port);
5438 +
5439 + memcpy(data, map->members, bytes);
5440 +}
5441 +
5442 +static struct ip_set_type ip_set_portmap = {
5443 + .typename = SETTYPE_NAME,
5444 + .typecode = IPSET_TYPE_PORT,
5445 + .protocol_version = IP_SET_PROTOCOL_VERSION,
5446 + .create = &create,
5447 + .destroy = &destroy,
5448 + .flush = &flush,
5449 + .reqsize = sizeof(struct ip_set_req_portmap),
5450 + .addip = &addport,
5451 + .addip_kernel = &addport_kernel,
5452 + .delip = &delport,
5453 + .delip_kernel = &delport_kernel,
5454 + .testip = &testport,
5455 + .testip_kernel = &testport_kernel,
5456 + .header_size = sizeof(struct ip_set_req_portmap_create),
5457 + .list_header = &list_header,
5458 + .list_members_size = &list_members_size,
5459 + .list_members = &list_members,
5460 + .me = THIS_MODULE,
5461 +};
5462 +
5463 +MODULE_LICENSE("GPL");
5464 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
5465 +MODULE_DESCRIPTION("portmap type of IP sets");
5466 +
5467 +static int __init init(void)
5468 +{
5469 + return ip_set_register_set_type(&ip_set_portmap);
5470 +}
5471 +
5472 +static void __exit fini(void)
5473 +{
5474 + /* FIXME: possible race with ip_set_create() */
5475 + ip_set_unregister_set_type(&ip_set_portmap);
5476 +}
5477 +
5478 +module_init(init);
5479 +module_exit(fini);
5480 diff -urN linux-2.6.19.old/net/ipv4/netfilter/ipt_set.c linux-2.6.19.dev/net/ipv4/netfilter/ipt_set.c
5481 --- linux-2.6.19.old/net/ipv4/netfilter/ipt_set.c 1970-01-01 01:00:00.000000000 +0100
5482 +++ linux-2.6.19.dev/net/ipv4/netfilter/ipt_set.c 2006-12-14 03:13:43.000000000 +0100
5483 @@ -0,0 +1,105 @@
5484 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
5485 + * Patrick Schaaf <bof@bof.de>
5486 + * Martin Josefsson <gandalf@wlug.westbo.se>
5487 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
5488 + *
5489 + * This program is free software; you can redistribute it and/or modify
5490 + * it under the terms of the GNU General Public License version 2 as
5491 + * published by the Free Software Foundation.
5492 + */
5493 +
5494 +/* Kernel module to match an IP set. */
5495 +
5496 +#include <linux/module.h>
5497 +#include <linux/ip.h>
5498 +#include <linux/skbuff.h>
5499 +
5500 +#include <linux/netfilter_ipv4/ip_tables.h>
5501 +#include <linux/netfilter_ipv4/ip_set.h>
5502 +#include <linux/netfilter_ipv4/ipt_set.h>
5503 +
5504 +static inline int
5505 +match_set(const struct ipt_set_info *info,
5506 + const struct sk_buff *skb,
5507 + int inv)
5508 +{
5509 + if (ip_set_testip_kernel(info->index, skb, info->flags))
5510 + inv = !inv;
5511 + return inv;
5512 +}
5513 +
5514 +static int
5515 +match(const struct sk_buff *skb,
5516 + const struct net_device *in,
5517 + const struct net_device *out,
5518 + const struct xt_match *match,
5519 + const void *matchinfo,
5520 + int offset,
5521 + unsigned int protoff,
5522 + int *hotdrop)
5523 +{
5524 + const struct ipt_set_info_match *info = matchinfo;
5525 +
5526 + return match_set(&info->match_set,
5527 + skb,
5528 + info->match_set.flags[0] & IPSET_MATCH_INV);
5529 +}
5530 +
5531 +static int
5532 +checkentry(const char *tablename,
5533 + const void *ip,
5534 + const struct xt_match *match,
5535 + void *matchinfo,
5536 + unsigned int hook_mask)
5537 +{
5538 + struct ipt_set_info_match *info =
5539 + (struct ipt_set_info_match *) matchinfo;
5540 + ip_set_id_t index;
5541 +
5542 + index = ip_set_get_byindex(info->match_set.index);
5543 +
5544 + if (index == IP_SET_INVALID_ID) {
5545 + ip_set_printk("Cannot find set indentified by id %u to match",
5546 + info->match_set.index);
5547 + return 0; /* error */
5548 + }
5549 + if (info->match_set.flags[IP_SET_MAX_BINDINGS] != 0) {
5550 + ip_set_printk("That's nasty!");
5551 + return 0; /* error */
5552 + }
5553 +
5554 + return 1;
5555 +}
5556 +
5557 +static void destroy(const struct xt_match *match, void *matchinfo)
5558 +{
5559 + struct ipt_set_info_match *info = matchinfo;
5560 +
5561 + ip_set_put(info->match_set.index);
5562 +}
5563 +
5564 +static struct ipt_match set_match = {
5565 + .name = "set",
5566 + .match = &match,
5567 + .matchsize = sizeof(struct ipt_set_info_match),
5568 + .checkentry = &checkentry,
5569 + .destroy = &destroy,
5570 + .me = THIS_MODULE
5571 +};
5572 +
5573 +MODULE_LICENSE("GPL");
5574 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
5575 +MODULE_DESCRIPTION("iptables IP set match module");
5576 +
5577 +static int __init init(void)
5578 +{
5579 + return ipt_register_match(&set_match);
5580 +}
5581 +
5582 +static void __exit fini(void)
5583 +{
5584 + ipt_unregister_match(&set_match);
5585 +}
5586 +
5587 +module_init(init);
5588 +module_exit(fini);
5589 diff -urN linux-2.6.19.old/net/ipv4/netfilter/ipt_SET.c linux-2.6.19.dev/net/ipv4/netfilter/ipt_SET.c
5590 --- linux-2.6.19.old/net/ipv4/netfilter/ipt_SET.c 1970-01-01 01:00:00.000000000 +0100
5591 +++ linux-2.6.19.dev/net/ipv4/netfilter/ipt_SET.c 2006-12-14 03:13:43.000000000 +0100
5592 @@ -0,0 +1,120 @@
5593 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
5594 + * Patrick Schaaf <bof@bof.de>
5595 + * Martin Josefsson <gandalf@wlug.westbo.se>
5596 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
5597 + *
5598 + * This program is free software; you can redistribute it and/or modify
5599 + * it under the terms of the GNU General Public License version 2 as
5600 + * published by the Free Software Foundation.
5601 + */
5602 +
5603 +/* ipt_SET.c - netfilter target to manipulate IP sets */
5604 +
5605 +#include <linux/types.h>
5606 +#include <linux/ip.h>
5607 +#include <linux/timer.h>
5608 +#include <linux/module.h>
5609 +#include <linux/netfilter.h>
5610 +#include <linux/netdevice.h>
5611 +#include <linux/if.h>
5612 +#include <linux/inetdevice.h>
5613 +#include <net/protocol.h>
5614 +#include <net/checksum.h>
5615 +#include <linux/netfilter_ipv4.h>
5616 +#include <linux/netfilter_ipv4/ip_nat_rule.h>
5617 +#include <linux/netfilter_ipv4/ipt_set.h>
5618 +
5619 +static unsigned int
5620 +target(struct sk_buff **pskb,
5621 + const struct net_device *in,
5622 + const struct net_device *out,
5623 + unsigned int hooknum,
5624 + const struct xt_target *target,
5625 + const void *targinfo)
5626 +{
5627 + const struct ipt_set_info_target *info = targinfo;
5628 +
5629 + if (info->add_set.index != IP_SET_INVALID_ID)
5630 + ip_set_addip_kernel(info->add_set.index,
5631 + *pskb,
5632 + info->add_set.flags);
5633 + if (info->del_set.index != IP_SET_INVALID_ID)
5634 + ip_set_delip_kernel(info->del_set.index,
5635 + *pskb,
5636 + info->del_set.flags);
5637 +
5638 + return IPT_CONTINUE;
5639 +}
5640 +
5641 +static int
5642 +checkentry(const char *tablename,
5643 + const void *e,
5644 + const struct xt_target *target,
5645 + void *targinfo,
5646 + unsigned int hook_mask)
5647 +{
5648 + struct ipt_set_info_target *info =
5649 + (struct ipt_set_info_target *) targinfo;
5650 + ip_set_id_t index;
5651 +
5652 + if (info->add_set.index != IP_SET_INVALID_ID) {
5653 + index = ip_set_get_byindex(info->add_set.index);
5654 + if (index == IP_SET_INVALID_ID) {
5655 + ip_set_printk("cannot find add_set index %u as target",
5656 + info->add_set.index);
5657 + return 0; /* error */
5658 + }
5659 + }
5660 +
5661 + if (info->del_set.index != IP_SET_INVALID_ID) {
5662 + index = ip_set_get_byindex(info->del_set.index);
5663 + if (index == IP_SET_INVALID_ID) {
5664 + ip_set_printk("cannot find del_set index %u as target",
5665 + info->del_set.index);
5666 + return 0; /* error */
5667 + }
5668 + }
5669 + if (info->add_set.flags[IP_SET_MAX_BINDINGS] != 0
5670 + || info->del_set.flags[IP_SET_MAX_BINDINGS] != 0) {
5671 + ip_set_printk("That's nasty!");
5672 + return 0; /* error */
5673 + }
5674 +
5675 + return 1;
5676 +}
5677 +
5678 +static void destroy(const struct xt_target *target, void *targetinfo)
5679 +{
5680 + struct ipt_set_info_target *info = targetinfo;
5681 +
5682 + if (info->add_set.index != IP_SET_INVALID_ID)
5683 + ip_set_put(info->add_set.index);
5684 + if (info->del_set.index != IP_SET_INVALID_ID)
5685 + ip_set_put(info->del_set.index);
5686 +}
5687 +
5688 +static struct ipt_target SET_target = {
5689 + .name = "SET",
5690 + .target = target,
5691 + .targetsize = sizeof(struct ipt_set_info_target),
5692 + .checkentry = checkentry,
5693 + .destroy = destroy,
5694 + .me = THIS_MODULE
5695 +};
5696 +
5697 +MODULE_LICENSE("GPL");
5698 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
5699 +MODULE_DESCRIPTION("iptables IP set target module");
5700 +
5701 +static int __init init(void)
5702 +{
5703 + return ipt_register_target(&SET_target);
5704 +}
5705 +
5706 +static void __exit fini(void)
5707 +{
5708 + ipt_unregister_target(&SET_target);
5709 +}
5710 +
5711 +module_init(init);
5712 +module_exit(fini);
5713 diff -urN linux-2.6.19.old/net/ipv4/netfilter/Kconfig linux-2.6.19.dev/net/ipv4/netfilter/Kconfig
5714 --- linux-2.6.19.old/net/ipv4/netfilter/Kconfig 2006-12-14 03:13:41.000000000 +0100
5715 +++ linux-2.6.19.dev/net/ipv4/netfilter/Kconfig 2006-12-14 03:13:43.000000000 +0100
5716 @@ -647,5 +647,106 @@
5717 Allows altering the ARP packet payload: source and destination
5718 hardware and network addresses.
5719
5720 +config IP_NF_SET
5721 + tristate "IP set support"
5722 + depends on INET && NETFILTER
5723 + help
5724 + This option adds IP set support to the kernel.
5725 + In order to define and use sets, you need the userspace utility
5726 + ipset(8).
5727 +
5728 + To compile it as a module, choose M here. If unsure, say N.
5729 +
5730 +config IP_NF_SET_MAX
5731 + int "Maximum number of IP sets"
5732 + default 256
5733 + range 2 65534
5734 + depends on IP_NF_SET
5735 + help
5736 + You can define here default value of the maximum number
5737 + of IP sets for the kernel.
5738 +
5739 + The value can be overriden by the 'max_sets' module
5740 + parameter of the 'ip_set' module.
5741 +
5742 +config IP_NF_SET_HASHSIZE
5743 + int "Hash size for bindings of IP sets"
5744 + default 1024
5745 + depends on IP_NF_SET
5746 + help
5747 + You can define here default value of the hash size for
5748 + bindings of IP sets.
5749 +
5750 + The value can be overriden by the 'hash_size' module
5751 + parameter of the 'ip_set' module.
5752 +
5753 +config IP_NF_SET_IPMAP
5754 + tristate "ipmap set support"
5755 + depends on IP_NF_SET
5756 + help
5757 + This option adds the ipmap set type support.
5758 +
5759 + To compile it as a module, choose M here. If unsure, say N.
5760 +
5761 +config IP_NF_SET_MACIPMAP
5762 + tristate "macipmap set support"
5763 + depends on IP_NF_SET
5764 + help
5765 + This option adds the macipmap set type support.
5766 +
5767 + To compile it as a module, choose M here. If unsure, say N.
5768 +
5769 +config IP_NF_SET_PORTMAP
5770 + tristate "portmap set support"
5771 + depends on IP_NF_SET
5772 + help
5773 + This option adds the portmap set type support.
5774 +
5775 + To compile it as a module, choose M here. If unsure, say N.
5776 +
5777 +config IP_NF_SET_IPHASH
5778 + tristate "iphash set support"
5779 + depends on IP_NF_SET
5780 + help
5781 + This option adds the iphash set type support.
5782 +
5783 + To compile it as a module, choose M here. If unsure, say N.
5784 +
5785 +config IP_NF_SET_NETHASH
5786 + tristate "nethash set support"
5787 + depends on IP_NF_SET
5788 + help
5789 + This option adds the nethash set type support.
5790 +
5791 + To compile it as a module, choose M here. If unsure, say N.
5792 +
5793 +config IP_NF_SET_IPTREE
5794 + tristate "iptree set support"
5795 + depends on IP_NF_SET
5796 + help
5797 + This option adds the iptree set type support.
5798 +
5799 + To compile it as a module, choose M here. If unsure, say N.
5800 +
5801 +config IP_NF_MATCH_SET
5802 + tristate "set match support"
5803 + depends on IP_NF_SET
5804 + help
5805 + Set matching matches against given IP sets.
5806 + You need the ipset utility to create and set up the sets.
5807 +
5808 + To compile it as a module, choose M here. If unsure, say N.
5809 +
5810 +config IP_NF_TARGET_SET
5811 + tristate "SET target support"
5812 + depends on IP_NF_SET
5813 + help
5814 + The SET target makes possible to add/delete entries
5815 + in IP sets.
5816 + You need the ipset utility to create and set up the sets.
5817 +
5818 + To compile it as a module, choose M here. If unsure, say N.
5819 +
5820 +
5821 endmenu
5822
5823 diff -urN linux-2.6.19.old/net/ipv4/netfilter/Makefile linux-2.6.19.dev/net/ipv4/netfilter/Makefile
5824 --- linux-2.6.19.old/net/ipv4/netfilter/Makefile 2006-12-14 03:13:41.000000000 +0100
5825 +++ linux-2.6.19.dev/net/ipv4/netfilter/Makefile 2006-12-14 03:13:43.000000000 +0100
5826 @@ -54,6 +54,7 @@
5827
5828 # matches
5829 obj-$(CONFIG_IP_NF_MATCH_HASHLIMIT) += ipt_hashlimit.o
5830 +obj-$(CONFIG_IP_NF_MATCH_SET) += ipt_set.o
5831 obj-$(CONFIG_IP_NF_MATCH_IPRANGE) += ipt_iprange.o
5832 obj-$(CONFIG_IP_NF_MATCH_OWNER) += ipt_owner.o
5833 obj-$(CONFIG_IP_NF_MATCH_TOS) += ipt_tos.o
5834 @@ -77,6 +78,17 @@
5835 obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LOG.o
5836 obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
5837 obj-$(CONFIG_IP_NF_TARGET_TCPMSS) += ipt_TCPMSS.o
5838 +obj-$(CONFIG_IP_NF_TARGET_SET) += ipt_SET.o
5839 +
5840 +# sets
5841 +obj-$(CONFIG_IP_NF_SET) += ip_set.o
5842 +obj-$(CONFIG_IP_NF_SET_IPMAP) += ip_set_ipmap.o
5843 +obj-$(CONFIG_IP_NF_SET_PORTMAP) += ip_set_portmap.o
5844 +obj-$(CONFIG_IP_NF_SET_MACIPMAP) += ip_set_macipmap.o
5845 +obj-$(CONFIG_IP_NF_SET_IPHASH) += ip_set_iphash.o
5846 +obj-$(CONFIG_IP_NF_SET_NETHASH) += ip_set_nethash.o
5847 +obj-$(CONFIG_IP_NF_SET_IPTREE) += ip_set_iptree.o
5848 +
5849 obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
5850 obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o
5851
This page took 0.257275 seconds and 5 git commands to generate.