1 diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set.h
2 --- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set.h 1969-12-31 18:00:00.000000000 -0600
3 +++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set.h 2007-06-08 16:29:31.825808000 -0500
8 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
9 + * Patrick Schaaf <bof@bof.de>
10 + * Martin Josefsson <gandalf@wlug.westbo.se>
11 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
13 + * This program is free software; you can redistribute it and/or modify
14 + * it under the terms of the GNU General Public License version 2 as
15 + * published by the Free Software Foundation.
23 + * A sockopt of such quality has hardly ever been seen before on the open
24 + * market! This little beauty, hardly ever used: above 64, so it's
25 + * traditionally used for firewalling, not touched (even once!) by the
26 + * 2.0, 2.2 and 2.4 kernels!
28 + * Comes with its own certificate of authenticity, valid anywhere in the
36 + * Heavily modify by Joakim Axelsson 08.03.2002
37 + * - Made it more modulebased
39 + * Additional heavy modifications by Jozsef Kadlecsik 22.02.2004
41 + * - in order to "deal with" backward compatibility, renamed to ipset
45 + * Used so that the kernel module and ipset-binary can match their versions
47 +#define IP_SET_PROTOCOL_VERSION 2
49 +#define IP_SET_MAXNAMELEN 32 /* set names and set typenames */
51 +/* Lets work with our own typedef for representing an IP address.
52 + * We hope to make the code more portable, possibly to IPv6...
54 + * The representation works in HOST byte order, because most set types
55 + * will perform arithmetic operations and compare operations.
57 + * For now the type is an uint32_t.
59 + * Make sure to ONLY use the functions when translating and parsing
60 + * in order to keep the host byte order and make it more portable:
65 + * (Joakim: where are they???)
68 +typedef uint32_t ip_set_ip_t;
70 +/* Sets are identified by an id in kernel space. Tweak with ip_set_id_t
71 + * and IP_SET_INVALID_ID if you want to increase the max number of sets.
73 +typedef uint16_t ip_set_id_t;
75 +#define IP_SET_INVALID_ID 65535
77 +/* How deep we follow bindings */
78 +#define IP_SET_MAX_BINDINGS 6
81 + * Option flags for kernel operations (ipt_set_info)
83 +#define IPSET_SRC 0x01 /* Source match/add */
84 +#define IPSET_DST 0x02 /* Destination match/add */
85 +#define IPSET_MATCH_INV 0x04 /* Inverse matching */
90 +#define IPSET_TYPE_IP 0x01 /* IP address type of set */
91 +#define IPSET_TYPE_PORT 0x02 /* Port type of set */
92 +#define IPSET_DATA_SINGLE 0x04 /* Single data storage */
93 +#define IPSET_DATA_DOUBLE 0x08 /* Double data storage */
95 +/* Reserved keywords */
96 +#define IPSET_TOKEN_DEFAULT ":default:"
97 +#define IPSET_TOKEN_ALL ":all:"
99 +/* SO_IP_SET operation constants, and their request struct types.
102 + * 0-99: commands with version checking
103 + * 100-199: add/del/test/bind/unbind
104 + * 200-299: list, save, restore
107 +/* Single shot operations:
108 + * version, create, destroy, flush, rename and swap
110 + * Sets are identified by name.
113 +#define IP_SET_REQ_STD \
115 + unsigned version; \
116 + char name[IP_SET_MAXNAMELEN]
118 +#define IP_SET_OP_CREATE 0x00000001 /* Create a new (empty) set */
119 +struct ip_set_req_create {
121 + char typename[IP_SET_MAXNAMELEN];
124 +#define IP_SET_OP_DESTROY 0x00000002 /* Remove a (empty) set */
125 +struct ip_set_req_std {
129 +#define IP_SET_OP_FLUSH 0x00000003 /* Remove all IPs in a set */
130 +/* Uses ip_set_req_std */
132 +#define IP_SET_OP_RENAME 0x00000004 /* Rename a set */
133 +/* Uses ip_set_req_create */
135 +#define IP_SET_OP_SWAP 0x00000005 /* Swap two sets */
136 +/* Uses ip_set_req_create */
138 +union ip_set_name_index {
139 + char name[IP_SET_MAXNAMELEN];
143 +#define IP_SET_OP_GET_BYNAME 0x00000006 /* Get set index by name */
144 +struct ip_set_req_get_set {
147 + union ip_set_name_index set;
150 +#define IP_SET_OP_GET_BYINDEX 0x00000007 /* Get set name by index */
151 +/* Uses ip_set_req_get_set */
153 +#define IP_SET_OP_VERSION 0x00000100 /* Ask kernel version */
154 +struct ip_set_req_version {
159 +/* Double shots operations:
160 + * add, del, test, bind and unbind.
162 + * First we query the kernel to get the index and type of the target set,
163 + * then issue the command. Validity of IP is checked in kernel in order
164 + * to minimalize sockopt operations.
167 +/* Get minimal set data for add/del/test/bind/unbind IP */
168 +#define IP_SET_OP_ADT_GET 0x00000010 /* Get set and type */
169 +struct ip_set_req_adt_get {
172 + union ip_set_name_index set;
173 + char typename[IP_SET_MAXNAMELEN];
176 +#define IP_SET_REQ_BYINDEX \
180 +struct ip_set_req_adt {
181 + IP_SET_REQ_BYINDEX;
184 +#define IP_SET_OP_ADD_IP 0x00000101 /* Add an IP to a set */
185 +/* Uses ip_set_req_adt, with type specific addage */
187 +#define IP_SET_OP_DEL_IP 0x00000102 /* Remove an IP from a set */
188 +/* Uses ip_set_req_adt, with type specific addage */
190 +#define IP_SET_OP_TEST_IP 0x00000103 /* Test an IP in a set */
191 +/* Uses ip_set_req_adt, with type specific addage */
193 +#define IP_SET_OP_BIND_SET 0x00000104 /* Bind an IP to a set */
194 +/* Uses ip_set_req_bind, with type specific addage */
195 +struct ip_set_req_bind {
196 + IP_SET_REQ_BYINDEX;
197 + char binding[IP_SET_MAXNAMELEN];
200 +#define IP_SET_OP_UNBIND_SET 0x00000105 /* Unbind an IP from a set */
201 +/* Uses ip_set_req_bind, with type speficic addage
202 + * index = 0 means unbinding for all sets */
204 +#define IP_SET_OP_TEST_BIND_SET 0x00000106 /* Test binding an IP to a set */
205 +/* Uses ip_set_req_bind, with type specific addage */
207 +/* Multiple shots operations: list, save, restore.
209 + * - check kernel version and query the max number of sets
210 + * - get the basic information on all sets
211 + * and size required for the next step
212 + * - get actual set data: header, data, bindings
215 +/* Get max_sets and the index of a queried set
217 +#define IP_SET_OP_MAX_SETS 0x00000020
218 +struct ip_set_req_max_sets {
221 + ip_set_id_t max_sets; /* max_sets */
222 + ip_set_id_t sets; /* real number of sets */
223 + union ip_set_name_index set; /* index of set if name used */
226 +/* Get the id and name of the sets plus size for next step */
227 +#define IP_SET_OP_LIST_SIZE 0x00000201
228 +#define IP_SET_OP_SAVE_SIZE 0x00000202
229 +struct ip_set_req_setnames {
231 + ip_set_id_t index; /* set to list/save */
232 + size_t size; /* size to get setdata/bindings */
233 + /* followed by sets number of struct ip_set_name_list */
236 +struct ip_set_name_list {
237 + char name[IP_SET_MAXNAMELEN];
238 + char typename[IP_SET_MAXNAMELEN];
243 +/* The actual list operation */
244 +#define IP_SET_OP_LIST 0x00000203
245 +struct ip_set_req_list {
246 + IP_SET_REQ_BYINDEX;
247 + /* sets number of struct ip_set_list in reply */
250 +struct ip_set_list {
252 + ip_set_id_t binding;
254 + size_t header_size; /* Set header data of header_size */
255 + size_t members_size; /* Set members data of members_size */
256 + size_t bindings_size; /* Set bindings data of bindings_size */
259 +struct ip_set_hash_list {
261 + ip_set_id_t binding;
264 +/* The save operation */
265 +#define IP_SET_OP_SAVE 0x00000204
266 +/* Uses ip_set_req_list, in the reply replaced by
267 + * sets number of struct ip_set_save plus a marker
268 + * ip_set_save followed by ip_set_hash_save structures.
270 +struct ip_set_save {
272 + ip_set_id_t binding;
273 + size_t header_size; /* Set header data of header_size */
274 + size_t members_size; /* Set members data of members_size */
277 +/* At restoring, ip == 0 means default binding for the given set: */
278 +struct ip_set_hash_save {
281 + ip_set_id_t binding;
284 +/* The restore operation */
285 +#define IP_SET_OP_RESTORE 0x00000205
286 +/* Uses ip_set_req_setnames followed by ip_set_restore structures
287 + * plus a marker ip_set_restore, followed by ip_set_hash_save
290 +struct ip_set_restore {
291 + char name[IP_SET_MAXNAMELEN];
292 + char typename[IP_SET_MAXNAMELEN];
294 + size_t header_size; /* Create data of header_size */
295 + size_t members_size; /* Set members data of members_size */
298 +static inline int bitmap_bytes(ip_set_ip_t a, ip_set_ip_t b)
300 + return 4 * ((((b - a + 8) / 8) + 3) / 4);
305 +#define ip_set_printk(format, args...) \
307 + printk("%s: %s: ", __FILE__, __FUNCTION__); \
308 + printk(format "\n" , ## args); \
311 +#if defined(IP_SET_DEBUG)
312 +#define DP(format, args...) \
314 + printk("%s: %s (DBG): ", __FILE__, __FUNCTION__);\
315 + printk(format "\n" , ## args); \
317 +#define IP_SET_ASSERT(x) \
320 + printk("IP_SET_ASSERT: %s:%i(%s)\n", \
321 + __FILE__, __LINE__, __FUNCTION__); \
324 +#define DP(format, args...)
325 +#define IP_SET_ASSERT(x)
331 + * The ip_set_type definition - one per set type, e.g. "ipmap".
333 + * Each individual set has a pointer, set->type, going to one
334 + * of these structures. Function pointers inside the structure implement
335 + * the real behaviour of the sets.
337 + * If not mentioned differently, the implementation behind the function
338 + * pointers of a set_type, is expected to return 0 if ok, and a negative
339 + * errno (e.g. -EINVAL) on error.
341 +struct ip_set_type {
342 + struct list_head list; /* next in list of set types */
344 + /* test for IP in set (kernel: iptables -m set src|dst)
345 + * return 0 if not in set, 1 if in set.
347 + int (*testip_kernel) (struct ip_set *set,
348 + const struct sk_buff * skb,
350 + const u_int32_t *flags,
351 + unsigned char index);
353 + /* test for IP in set (userspace: ipset -T set IP)
354 + * return 0 if not in set, 1 if in set.
356 + int (*testip) (struct ip_set *set,
357 + const void *data, size_t size,
361 + * Size of the data structure passed by when
362 + * adding/deletin/testing an entry.
366 + /* Add IP into set (userspace: ipset -A set IP)
367 + * Return -EEXIST if the address is already in the set,
368 + * and -ERANGE if the address lies outside the set bounds.
369 + * If the address was not already in the set, 0 is returned.
371 + int (*addip) (struct ip_set *set,
372 + const void *data, size_t size,
375 + /* Add IP into set (kernel: iptables ... -j SET set src|dst)
376 + * Return -EEXIST if the address is already in the set,
377 + * and -ERANGE if the address lies outside the set bounds.
378 + * If the address was not already in the set, 0 is returned.
380 + int (*addip_kernel) (struct ip_set *set,
381 + const struct sk_buff * skb,
383 + const u_int32_t *flags,
384 + unsigned char index);
386 + /* remove IP from set (userspace: ipset -D set --entry x)
387 + * Return -EEXIST if the address is NOT in the set,
388 + * and -ERANGE if the address lies outside the set bounds.
389 + * If the address really was in the set, 0 is returned.
391 + int (*delip) (struct ip_set *set,
392 + const void *data, size_t size,
395 + /* remove IP from set (kernel: iptables ... -j SET --entry x)
396 + * Return -EEXIST if the address is NOT in the set,
397 + * and -ERANGE if the address lies outside the set bounds.
398 + * If the address really was in the set, 0 is returned.
400 + int (*delip_kernel) (struct ip_set *set,
401 + const struct sk_buff * skb,
403 + const u_int32_t *flags,
404 + unsigned char index);
406 + /* new set creation - allocated type specific items
408 + int (*create) (struct ip_set *set,
409 + const void *data, size_t size);
411 + /* retry the operation after successfully tweaking the set
413 + int (*retry) (struct ip_set *set);
415 + /* set destruction - free type specific items
416 + * There is no return value.
417 + * Can be called only when child sets are destroyed.
419 + void (*destroy) (struct ip_set *set);
421 + /* set flushing - reset all bits in the set, or something similar.
422 + * There is no return value.
424 + void (*flush) (struct ip_set *set);
426 + /* Listing: size needed for header
428 + size_t header_size;
430 + /* Listing: Get the header
432 + * Fill in the information in "data".
433 + * This function is always run after list_header_size() under a
434 + * writelock on the set. Therefor is the length of "data" always
437 + void (*list_header) (const struct ip_set *set,
440 + /* Listing: Get the size for the set members
442 + int (*list_members_size) (const struct ip_set *set);
444 + /* Listing: Get the set members
446 + * Fill in the information in "data".
447 + * This function is always run after list_member_size() under a
448 + * writelock on the set. Therefor is the length of "data" always
451 + void (*list_members) (const struct ip_set *set,
454 + char typename[IP_SET_MAXNAMELEN];
455 + unsigned char features;
456 + int protocol_version;
458 + /* Set this to THIS_MODULE if you are a module, otherwise NULL */
462 +extern int ip_set_register_set_type(struct ip_set_type *set_type);
463 +extern void ip_set_unregister_set_type(struct ip_set_type *set_type);
465 +/* A generic ipset */
467 + char name[IP_SET_MAXNAMELEN]; /* the name of the set */
468 + rwlock_t lock; /* lock for concurrency control */
469 + ip_set_id_t id; /* set id for swapping */
470 + ip_set_id_t binding; /* default binding for the set */
471 + atomic_t ref; /* in kernel and in hash references */
472 + struct ip_set_type *type; /* the set types */
473 + void *data; /* pooltype specific data */
476 +/* Structure to bind set elements to sets */
477 +struct ip_set_hash {
478 + struct list_head list; /* list of clashing entries in hash */
479 + ip_set_ip_t ip; /* ip from set */
480 + ip_set_id_t id; /* set id */
481 + ip_set_id_t binding; /* set we bind the element to */
484 +/* register and unregister set references */
485 +extern ip_set_id_t ip_set_get_byname(const char name[IP_SET_MAXNAMELEN]);
486 +extern ip_set_id_t ip_set_get_byindex(ip_set_id_t id);
487 +extern void ip_set_put(ip_set_id_t id);
489 +/* API for iptables set match, and SET target */
490 +extern void ip_set_addip_kernel(ip_set_id_t id,
491 + const struct sk_buff *skb,
492 + const u_int32_t *flags);
493 +extern void ip_set_delip_kernel(ip_set_id_t id,
494 + const struct sk_buff *skb,
495 + const u_int32_t *flags);
496 +extern int ip_set_testip_kernel(ip_set_id_t id,
497 + const struct sk_buff *skb,
498 + const u_int32_t *flags);
500 +#endif /* __KERNEL__ */
502 +#endif /*_IP_SET_H*/
503 diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_iphash.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_iphash.h
504 --- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_iphash.h 1969-12-31 18:00:00.000000000 -0600
505 +++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_iphash.h 2007-06-08 16:29:31.829808250 -0500
507 +#ifndef __IP_SET_IPHASH_H
508 +#define __IP_SET_IPHASH_H
510 +#include <linux/netfilter_ipv4/ip_set.h>
512 +#define SETTYPE_NAME "iphash"
513 +#define MAX_RANGE 0x0000FFFF
515 +struct ip_set_iphash {
516 + ip_set_ip_t *members; /* the iphash proper */
517 + uint32_t elements; /* number of elements */
518 + uint32_t hashsize; /* hash size */
519 + uint16_t probes; /* max number of probes */
520 + uint16_t resize; /* resize factor in percent */
521 + ip_set_ip_t netmask; /* netmask */
522 + void *initval[0]; /* initvals for jhash_1word */
525 +struct ip_set_req_iphash_create {
529 + ip_set_ip_t netmask;
532 +struct ip_set_req_iphash {
536 +#endif /* __IP_SET_IPHASH_H */
537 diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_ipmap.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_ipmap.h
538 --- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_ipmap.h 1969-12-31 18:00:00.000000000 -0600
539 +++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_ipmap.h 2007-06-08 16:29:31.829808250 -0500
541 +#ifndef __IP_SET_IPMAP_H
542 +#define __IP_SET_IPMAP_H
544 +#include <linux/netfilter_ipv4/ip_set.h>
546 +#define SETTYPE_NAME "ipmap"
547 +#define MAX_RANGE 0x0000FFFF
549 +struct ip_set_ipmap {
550 + void *members; /* the ipmap proper */
551 + ip_set_ip_t first_ip; /* host byte order, included in range */
552 + ip_set_ip_t last_ip; /* host byte order, included in range */
553 + ip_set_ip_t netmask; /* subnet netmask */
554 + ip_set_ip_t sizeid; /* size of set in IPs */
555 + ip_set_ip_t hosts; /* number of hosts in a subnet */
558 +struct ip_set_req_ipmap_create {
561 + ip_set_ip_t netmask;
564 +struct ip_set_req_ipmap {
569 +mask_to_bits(ip_set_ip_t mask)
571 + unsigned int bits = 32;
572 + ip_set_ip_t maskaddr;
574 + if (mask == 0xFFFFFFFF)
577 + maskaddr = 0xFFFFFFFE;
578 + while (--bits >= 0 && maskaddr != mask)
585 +range_to_mask(ip_set_ip_t from, ip_set_ip_t to, unsigned int *bits)
587 + ip_set_ip_t mask = 0xFFFFFFFE;
590 + while (--(*bits) >= 0 && mask && (to & mask) != from)
596 +#endif /* __IP_SET_IPMAP_H */
597 diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_ipporthash.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_ipporthash.h
598 --- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_ipporthash.h 1969-12-31 18:00:00.000000000 -0600
599 +++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_ipporthash.h 2007-06-08 16:29:31.829808250 -0500
601 +#ifndef __IP_SET_IPPORTHASH_H
602 +#define __IP_SET_IPPORTHASH_H
604 +#include <linux/netfilter_ipv4/ip_set.h>
606 +#define SETTYPE_NAME "ipporthash"
607 +#define MAX_RANGE 0x0000FFFF
608 +#define INVALID_PORT (MAX_RANGE + 1)
610 +struct ip_set_ipporthash {
611 + ip_set_ip_t *members; /* the ipporthash proper */
612 + uint32_t elements; /* number of elements */
613 + uint32_t hashsize; /* hash size */
614 + uint16_t probes; /* max number of probes */
615 + uint16_t resize; /* resize factor in percent */
616 + ip_set_ip_t first_ip; /* host byte order, included in range */
617 + ip_set_ip_t last_ip; /* host byte order, included in range */
618 + void *initval[0]; /* initvals for jhash_1word */
621 +struct ip_set_req_ipporthash_create {
629 +struct ip_set_req_ipporthash {
634 +#endif /* __IP_SET_IPPORTHASH_H */
635 diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_iptree.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_iptree.h
636 --- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_iptree.h 1969-12-31 18:00:00.000000000 -0600
637 +++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_iptree.h 2007-06-08 16:29:31.829808250 -0500
639 +#ifndef __IP_SET_IPTREE_H
640 +#define __IP_SET_IPTREE_H
642 +#include <linux/netfilter_ipv4/ip_set.h>
644 +#define SETTYPE_NAME "iptree"
645 +#define MAX_RANGE 0x0000FFFF
647 +struct ip_set_iptreed {
648 + unsigned long expires[256]; /* x.x.x.ADDR */
651 +struct ip_set_iptreec {
652 + struct ip_set_iptreed *tree[256]; /* x.x.ADDR.* */
655 +struct ip_set_iptreeb {
656 + struct ip_set_iptreec *tree[256]; /* x.ADDR.*.* */
659 +struct ip_set_iptree {
660 + unsigned int timeout;
661 + unsigned int gc_interval;
663 + uint32_t elements; /* number of elements */
664 + struct timer_list gc;
665 + struct ip_set_iptreeb *tree[256]; /* ADDR.*.*.* */
669 +struct ip_set_req_iptree_create {
670 + unsigned int timeout;
673 +struct ip_set_req_iptree {
675 + unsigned int timeout;
678 +#endif /* __IP_SET_IPTREE_H */
679 diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_jhash.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_jhash.h
680 --- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_jhash.h 1969-12-31 18:00:00.000000000 -0600
681 +++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_jhash.h 2007-06-08 16:29:31.829808250 -0500
683 +#ifndef _LINUX_IPSET_JHASH_H
684 +#define _LINUX_IPSET_JHASH_H
686 +/* This is a copy of linux/jhash.h but the types u32/u8 are changed
687 + * to __u32/__u8 so that the header file can be included into
688 + * userspace code as well. Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
691 +/* jhash.h: Jenkins hash support.
693 + * Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net)
695 + * http://burtleburtle.net/bob/hash/
697 + * These are the credits from Bob's sources:
699 + * lookup2.c, by Bob Jenkins, December 1996, Public Domain.
700 + * hash(), hash2(), hash3, and mix() are externally useful functions.
701 + * Routines to test the hash are included if SELF_TEST is defined.
702 + * You can use this free for any purpose. It has no warranty.
704 + * Copyright (C) 2003 David S. Miller (davem@redhat.com)
706 + * I've modified Bob's hash to be useful in the Linux kernel, and
707 + * any bugs present are surely my fault. -DaveM
710 +/* NOTE: Arguments are modified. */
711 +#define __jhash_mix(a, b, c) \
713 + a -= b; a -= c; a ^= (c>>13); \
714 + b -= c; b -= a; b ^= (a<<8); \
715 + c -= a; c -= b; c ^= (b>>13); \
716 + a -= b; a -= c; a ^= (c>>12); \
717 + b -= c; b -= a; b ^= (a<<16); \
718 + c -= a; c -= b; c ^= (b>>5); \
719 + a -= b; a -= c; a ^= (c>>3); \
720 + b -= c; b -= a; b ^= (a<<10); \
721 + c -= a; c -= b; c ^= (b>>15); \
724 +/* The golden ration: an arbitrary value */
725 +#define JHASH_GOLDEN_RATIO 0x9e3779b9
727 +/* The most generic version, hashes an arbitrary sequence
728 + * of bytes. No alignment or length assumptions are made about
731 +static inline __u32 jhash(void *key, __u32 length, __u32 initval)
733 + __u32 a, b, c, len;
737 + a = b = JHASH_GOLDEN_RATIO;
740 + while (len >= 12) {
741 + a += (k[0] +((__u32)k[1]<<8) +((__u32)k[2]<<16) +((__u32)k[3]<<24));
742 + b += (k[4] +((__u32)k[5]<<8) +((__u32)k[6]<<16) +((__u32)k[7]<<24));
743 + c += (k[8] +((__u32)k[9]<<8) +((__u32)k[10]<<16)+((__u32)k[11]<<24));
745 + __jhash_mix(a,b,c);
753 + case 11: c += ((__u32)k[10]<<24);
754 + case 10: c += ((__u32)k[9]<<16);
755 + case 9 : c += ((__u32)k[8]<<8);
756 + case 8 : b += ((__u32)k[7]<<24);
757 + case 7 : b += ((__u32)k[6]<<16);
758 + case 6 : b += ((__u32)k[5]<<8);
759 + case 5 : b += k[4];
760 + case 4 : a += ((__u32)k[3]<<24);
761 + case 3 : a += ((__u32)k[2]<<16);
762 + case 2 : a += ((__u32)k[1]<<8);
763 + case 1 : a += k[0];
766 + __jhash_mix(a,b,c);
771 +/* A special optimized version that handles 1 or more of __u32s.
772 + * The length parameter here is the number of __u32s in the key.
774 +static inline __u32 jhash2(__u32 *k, __u32 length, __u32 initval)
776 + __u32 a, b, c, len;
778 + a = b = JHASH_GOLDEN_RATIO;
786 + __jhash_mix(a, b, c);
793 + case 2 : b += k[1];
794 + case 1 : a += k[0];
797 + __jhash_mix(a,b,c);
803 +/* A special ultra-optimized versions that knows they are hashing exactly
804 + * 3, 2 or 1 word(s).
806 + * NOTE: In partilar the "c += length; __jhash_mix(a,b,c);" normally
807 + * done at the end is not done here.
809 +static inline __u32 jhash_3words(__u32 a, __u32 b, __u32 c, __u32 initval)
811 + a += JHASH_GOLDEN_RATIO;
812 + b += JHASH_GOLDEN_RATIO;
815 + __jhash_mix(a, b, c);
820 +static inline __u32 jhash_2words(__u32 a, __u32 b, __u32 initval)
822 + return jhash_3words(a, b, 0, initval);
825 +static inline __u32 jhash_1word(__u32 a, __u32 initval)
827 + return jhash_3words(a, 0, 0, initval);
830 +#endif /* _LINUX_IPSET_JHASH_H */
831 diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_macipmap.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_macipmap.h
832 --- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_macipmap.h 1969-12-31 18:00:00.000000000 -0600
833 +++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_macipmap.h 2007-06-08 16:29:31.829808250 -0500
835 +#ifndef __IP_SET_MACIPMAP_H
836 +#define __IP_SET_MACIPMAP_H
838 +#include <linux/netfilter_ipv4/ip_set.h>
840 +#define SETTYPE_NAME "macipmap"
841 +#define MAX_RANGE 0x0000FFFF
844 +#define IPSET_MACIP_MATCHUNSET 1
847 +#define IPSET_MACIP_ISSET 1
849 +struct ip_set_macipmap {
850 + void *members; /* the macipmap proper */
851 + ip_set_ip_t first_ip; /* host byte order, included in range */
852 + ip_set_ip_t last_ip; /* host byte order, included in range */
856 +struct ip_set_req_macipmap_create {
862 +struct ip_set_req_macipmap {
864 + unsigned char ethernet[ETH_ALEN];
867 +struct ip_set_macip {
868 + unsigned short flags;
869 + unsigned char ethernet[ETH_ALEN];
872 +#endif /* __IP_SET_MACIPMAP_H */
873 diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_malloc.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_malloc.h
874 --- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_malloc.h 1969-12-31 18:00:00.000000000 -0600
875 +++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_malloc.h 2007-06-08 16:29:31.829808250 -0500
877 +#ifndef _IP_SET_MALLOC_H
878 +#define _IP_SET_MALLOC_H
882 +/* Memory allocation and deallocation */
883 +static size_t max_malloc_size = 0;
885 +static inline void init_max_malloc_size(void)
887 +#define CACHE(x) max_malloc_size = x;
888 +#include <linux/kmalloc_sizes.h>
892 +static inline void * ip_set_malloc(size_t bytes)
894 + if (bytes > max_malloc_size)
895 + return vmalloc(bytes);
897 + return kmalloc(bytes, GFP_KERNEL);
900 +static inline void ip_set_free(void * data, size_t bytes)
902 + if (bytes > max_malloc_size)
909 + size_t max_elements;
913 +static inline void *
914 +harray_malloc(size_t hashsize, size_t typesize, int flags)
916 + struct harray *harray;
917 + size_t max_elements, size, i, j;
919 + if (!max_malloc_size)
920 + init_max_malloc_size();
922 + if (typesize > max_malloc_size)
925 + max_elements = max_malloc_size/typesize;
926 + size = hashsize/max_elements;
927 + if (hashsize % max_elements)
930 + /* Last pointer signals end of arrays */
931 + harray = kmalloc(sizeof(struct harray) + (size + 1) * sizeof(void *),
937 + for (i = 0; i < size - 1; i++) {
938 + harray->arrays[i] = kmalloc(max_elements * typesize, flags);
939 + if (!harray->arrays[i])
941 + memset(harray->arrays[i], 0, max_elements * typesize);
943 + harray->arrays[i] = kmalloc((hashsize - i * max_elements) * typesize,
945 + if (!harray->arrays[i])
947 + memset(harray->arrays[i], 0, (hashsize - i * max_elements) * typesize);
949 + harray->max_elements = max_elements;
950 + harray->arrays[size] = NULL;
952 + return (void *)harray;
955 + for (j = 0; j < i; j++) {
956 + kfree(harray->arrays[j]);
962 +static inline void harray_free(void *h)
964 + struct harray *harray = (struct harray *) h;
967 + for (i = 0; harray->arrays[i] != NULL; i++)
968 + kfree(harray->arrays[i]);
972 +static inline void harray_flush(void *h, size_t hashsize, size_t typesize)
974 + struct harray *harray = (struct harray *) h;
977 + for (i = 0; harray->arrays[i+1] != NULL; i++)
978 + memset(harray->arrays[i], 0, harray->max_elements * typesize);
979 + memset(harray->arrays[i], 0,
980 + (hashsize - i * harray->max_elements) * typesize);
983 +#define HARRAY_ELEM(h, type, which) \
985 + struct harray *__h = (struct harray *)(h); \
986 + ((type)((__h)->arrays[(which)/(__h)->max_elements]) \
987 + + (which)%(__h)->max_elements); \
990 +#endif /* __KERNEL__ */
992 +#endif /*_IP_SET_MALLOC_H*/
993 diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_nethash.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_nethash.h
994 --- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_nethash.h 1969-12-31 18:00:00.000000000 -0600
995 +++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_nethash.h 2007-06-08 16:29:31.829808250 -0500
997 +#ifndef __IP_SET_NETHASH_H
998 +#define __IP_SET_NETHASH_H
1000 +#include <linux/netfilter_ipv4/ip_set.h>
1002 +#define SETTYPE_NAME "nethash"
1003 +#define MAX_RANGE 0x0000FFFF
1005 +struct ip_set_nethash {
1006 + ip_set_ip_t *members; /* the nethash proper */
1007 + uint32_t elements; /* number of elements */
1008 + uint32_t hashsize; /* hash size */
1009 + uint16_t probes; /* max number of probes */
1010 + uint16_t resize; /* resize factor in percent */
1011 + unsigned char cidr[30]; /* CIDR sizes */
1012 + void *initval[0]; /* initvals for jhash_1word */
1015 +struct ip_set_req_nethash_create {
1016 + uint32_t hashsize;
1021 +struct ip_set_req_nethash {
1023 + unsigned char cidr;
1026 +static unsigned char shifts[] = {255, 253, 249, 241, 225, 193, 129, 1};
1028 +static inline ip_set_ip_t
1029 +pack(ip_set_ip_t ip, unsigned char cidr)
1031 + ip_set_ip_t addr, *paddr = &addr;
1032 + unsigned char n, t, *a;
1034 + addr = htonl(ip & (0xFFFFFFFF << (32 - (cidr))));
1036 + DP("ip:%u.%u.%u.%u/%u", NIPQUAD(addr), cidr);
1040 + a = &((unsigned char *)paddr)[n];
1041 + *a = *a /(1 << (8 - t)) + shifts[t];
1043 + DP("n: %u, t: %u, a: %u", n, t, *a);
1044 + DP("ip:%u.%u.%u.%u/%u, %u.%u.%u.%u",
1045 + HIPQUAD(ip), cidr, NIPQUAD(addr));
1048 + return ntohl(addr);
1051 +#endif /* __IP_SET_NETHASH_H */
1052 diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_portmap.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_portmap.h
1053 --- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_portmap.h 1969-12-31 18:00:00.000000000 -0600
1054 +++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_portmap.h 2007-06-08 16:29:31.829808250 -0500
1056 +#ifndef __IP_SET_PORTMAP_H
1057 +#define __IP_SET_PORTMAP_H
1059 +#include <linux/netfilter_ipv4/ip_set.h>
1061 +#define SETTYPE_NAME "portmap"
1062 +#define MAX_RANGE 0x0000FFFF
1063 +#define INVALID_PORT (MAX_RANGE + 1)
1065 +struct ip_set_portmap {
1066 + void *members; /* the portmap proper */
1067 + ip_set_ip_t first_port; /* host byte order, included in range */
1068 + ip_set_ip_t last_port; /* host byte order, included in range */
1071 +struct ip_set_req_portmap_create {
1076 +struct ip_set_req_portmap {
1080 +#endif /* __IP_SET_PORTMAP_H */
1081 diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ipt_set.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ipt_set.h
1082 --- linux-2.6.21.1/include/linux/netfilter_ipv4/ipt_set.h 1969-12-31 18:00:00.000000000 -0600
1083 +++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ipt_set.h 2007-06-08 16:29:31.829808250 -0500
1088 +#include <linux/netfilter_ipv4/ip_set.h>
1090 +struct ipt_set_info {
1091 + ip_set_id_t index;
1092 + u_int32_t flags[IP_SET_MAX_BINDINGS + 1];
1096 +struct ipt_set_info_match {
1097 + struct ipt_set_info match_set;
1100 +struct ipt_set_info_target {
1101 + struct ipt_set_info add_set;
1102 + struct ipt_set_info del_set;
1105 +#endif /*_IPT_SET_H*/
1106 diff -ruN linux-2.6.21.1/net/ipv4/netfilter/ip_set.c linux-2.6.21.1.new/net/ipv4/netfilter/ip_set.c
1107 --- linux-2.6.21.1/net/ipv4/netfilter/ip_set.c 1969-12-31 18:00:00.000000000 -0600
1108 +++ linux-2.6.21.1.new/net/ipv4/netfilter/ip_set.c 2007-06-08 16:29:31.829808250 -0500
1110 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
1111 + * Patrick Schaaf <bof@bof.de>
1112 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
1114 + * This program is free software; you can redistribute it and/or modify
1115 + * it under the terms of the GNU General Public License version 2 as
1116 + * published by the Free Software Foundation.
1119 +/* Kernel module for IP set management */
1121 +#include <linux/version.h>
1122 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
1123 +#include <linux/config.h>
1125 +#include <linux/module.h>
1126 +#include <linux/moduleparam.h>
1127 +#include <linux/kmod.h>
1128 +#include <linux/ip.h>
1129 +#include <linux/skbuff.h>
1130 +#include <linux/random.h>
1131 +#include <linux/jhash.h>
1132 +#include <linux/netfilter_ipv4/ip_tables.h>
1133 +#include <linux/errno.h>
1134 +#include <asm/uaccess.h>
1135 +#include <asm/bitops.h>
1136 +#include <asm/semaphore.h>
1137 +#include <linux/spinlock.h>
1138 +#include <linux/vmalloc.h>
1140 +#define ASSERT_READ_LOCK(x)
1141 +#define ASSERT_WRITE_LOCK(x)
1142 +#include <linux/netfilter_ipv4/ip_set.h>
1144 +static struct list_head set_type_list; /* all registered sets */
1145 +static struct ip_set **ip_set_list; /* all individual sets */
1146 +static DEFINE_RWLOCK(ip_set_lock); /* protects the lists and the hash */
1147 +static DECLARE_MUTEX(ip_set_app_mutex); /* serializes user access */
1148 +static ip_set_id_t ip_set_max = CONFIG_IP_NF_SET_MAX;
1149 +static ip_set_id_t ip_set_bindings_hash_size = CONFIG_IP_NF_SET_HASHSIZE;
1150 +static struct list_head *ip_set_hash; /* hash of bindings */
1151 +static unsigned int ip_set_hash_random; /* random seed */
1154 + * Sets are identified either by the index in ip_set_list or by id.
1155 + * The id never changes and is used to find a key in the hash.
1156 + * The index may change by swapping and used at all other places
1157 + * (set/SET netfilter modules, binding value, etc.)
1159 + * Userspace requests are serialized by ip_set_mutex and sets can
1160 + * be deleted only from userspace. Therefore ip_set_list locking
1161 + * must obey the following rules:
1163 + * - kernel requests: read and write locking mandatory
1164 + * - user requests: read locking optional, write locking mandatory
1168 +__ip_set_get(ip_set_id_t index)
1170 + atomic_inc(&ip_set_list[index]->ref);
1174 +__ip_set_put(ip_set_id_t index)
1176 + atomic_dec(&ip_set_list[index]->ref);
1180 + * Binding routines
1183 +static inline struct ip_set_hash *
1184 +__ip_set_find(u_int32_t key, ip_set_id_t id, ip_set_ip_t ip)
1186 + struct ip_set_hash *set_hash;
1188 + list_for_each_entry(set_hash, &ip_set_hash[key], list)
1189 + if (set_hash->id == id && set_hash->ip == ip)
1196 +ip_set_find_in_hash(ip_set_id_t id, ip_set_ip_t ip)
1198 + u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
1199 + % ip_set_bindings_hash_size;
1200 + struct ip_set_hash *set_hash;
1202 + ASSERT_READ_LOCK(&ip_set_lock);
1203 + IP_SET_ASSERT(ip_set_list[id]);
1204 + DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
1206 + set_hash = __ip_set_find(key, id, ip);
1208 + DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
1210 + set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
1212 + return (set_hash != NULL ? set_hash->binding : IP_SET_INVALID_ID);
1216 +__set_hash_del(struct ip_set_hash *set_hash)
1218 + ASSERT_WRITE_LOCK(&ip_set_lock);
1219 + IP_SET_ASSERT(ip_set_list[set_hash->binding]);
1221 + __ip_set_put(set_hash->binding);
1222 + list_del(&set_hash->list);
1227 +ip_set_hash_del(ip_set_id_t id, ip_set_ip_t ip)
1229 + u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
1230 + % ip_set_bindings_hash_size;
1231 + struct ip_set_hash *set_hash;
1233 + IP_SET_ASSERT(ip_set_list[id]);
1234 + DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
1235 + write_lock_bh(&ip_set_lock);
1236 + set_hash = __ip_set_find(key, id, ip);
1237 + DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
1239 + set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
1241 + if (set_hash != NULL)
1242 + __set_hash_del(set_hash);
1243 + write_unlock_bh(&ip_set_lock);
1248 +ip_set_hash_add(ip_set_id_t id, ip_set_ip_t ip, ip_set_id_t binding)
1250 + u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
1251 + % ip_set_bindings_hash_size;
1252 + struct ip_set_hash *set_hash;
1255 + IP_SET_ASSERT(ip_set_list[id]);
1256 + IP_SET_ASSERT(ip_set_list[binding]);
1257 + DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
1258 + HIPQUAD(ip), ip_set_list[binding]->name);
1259 + write_lock_bh(&ip_set_lock);
1260 + set_hash = __ip_set_find(key, id, ip);
1262 + set_hash = kmalloc(sizeof(struct ip_set_hash), GFP_ATOMIC);
1267 + INIT_LIST_HEAD(&set_hash->list);
1268 + set_hash->id = id;
1269 + set_hash->ip = ip;
1270 + list_add(&set_hash->list, &ip_set_hash[key]);
1272 + IP_SET_ASSERT(ip_set_list[set_hash->binding]);
1273 + DP("overwrite binding: %s",
1274 + ip_set_list[set_hash->binding]->name);
1275 + __ip_set_put(set_hash->binding);
1277 + set_hash->binding = binding;
1278 + __ip_set_get(set_hash->binding);
1279 + DP("stored: key %u, id %u (%s), ip %u.%u.%u.%u, binding %u (%s)",
1280 + key, id, ip_set_list[id]->name,
1281 + HIPQUAD(ip), binding, ip_set_list[binding]->name);
1283 + write_unlock_bh(&ip_set_lock);
1287 +#define FOREACH_HASH_DO(fn, args...) \
1289 + ip_set_id_t __key; \
1290 + struct ip_set_hash *__set_hash; \
1292 + for (__key = 0; __key < ip_set_bindings_hash_size; __key++) { \
1293 + list_for_each_entry(__set_hash, &ip_set_hash[__key], list) \
1294 + fn(__set_hash , ## args); \
1298 +#define FOREACH_HASH_RW_DO(fn, args...) \
1300 + ip_set_id_t __key; \
1301 + struct ip_set_hash *__set_hash, *__n; \
1303 + ASSERT_WRITE_LOCK(&ip_set_lock); \
1304 + for (__key = 0; __key < ip_set_bindings_hash_size; __key++) { \
1305 + list_for_each_entry_safe(__set_hash, __n, &ip_set_hash[__key], list)\
1306 + fn(__set_hash , ## args); \
1310 +/* Add, del and test set entries from kernel */
1312 +#define follow_bindings(index, set, ip) \
1313 +((index = ip_set_find_in_hash((set)->id, ip)) != IP_SET_INVALID_ID \
1314 + || (index = (set)->binding) != IP_SET_INVALID_ID)
1317 +ip_set_testip_kernel(ip_set_id_t index,
1318 + const struct sk_buff *skb,
1319 + const u_int32_t *flags)
1321 + struct ip_set *set;
1324 + unsigned char i = 0;
1326 + IP_SET_ASSERT(flags[i]);
1327 + read_lock_bh(&ip_set_lock);
1329 + set = ip_set_list[index];
1330 + IP_SET_ASSERT(set);
1331 + DP("set %s, index %u", set->name, index);
1332 + read_lock_bh(&set->lock);
1333 + res = set->type->testip_kernel(set, skb, &ip, flags, i++);
1334 + read_unlock_bh(&set->lock);
1335 + i += !!(set->type->features & IPSET_DATA_DOUBLE);
1338 + && follow_bindings(index, set, ip));
1339 + read_unlock_bh(&ip_set_lock);
1345 +ip_set_addip_kernel(ip_set_id_t index,
1346 + const struct sk_buff *skb,
1347 + const u_int32_t *flags)
1349 + struct ip_set *set;
1352 + unsigned char i = 0;
1354 + IP_SET_ASSERT(flags[i]);
1356 + read_lock_bh(&ip_set_lock);
1358 + set = ip_set_list[index];
1359 + IP_SET_ASSERT(set);
1360 + DP("set %s, index %u", set->name, index);
1361 + write_lock_bh(&set->lock);
1362 + res = set->type->addip_kernel(set, skb, &ip, flags, i++);
1363 + write_unlock_bh(&set->lock);
1364 + i += !!(set->type->features & IPSET_DATA_DOUBLE);
1365 + } while ((res == 0 || res == -EEXIST)
1367 + && follow_bindings(index, set, ip));
1368 + read_unlock_bh(&ip_set_lock);
1370 + if (res == -EAGAIN
1371 + && set->type->retry
1372 + && (res = set->type->retry(set)) == 0)
1377 +ip_set_delip_kernel(ip_set_id_t index,
1378 + const struct sk_buff *skb,
1379 + const u_int32_t *flags)
1381 + struct ip_set *set;
1384 + unsigned char i = 0;
1386 + IP_SET_ASSERT(flags[i]);
1387 + read_lock_bh(&ip_set_lock);
1389 + set = ip_set_list[index];
1390 + IP_SET_ASSERT(set);
1391 + DP("set %s, index %u", set->name, index);
1392 + write_lock_bh(&set->lock);
1393 + res = set->type->delip_kernel(set, skb, &ip, flags, i++);
1394 + write_unlock_bh(&set->lock);
1395 + i += !!(set->type->features & IPSET_DATA_DOUBLE);
1396 + } while ((res == 0 || res == -EEXIST)
1398 + && follow_bindings(index, set, ip));
1399 + read_unlock_bh(&ip_set_lock);
1402 +/* Register and deregister settype */
1404 +static inline struct ip_set_type *
1405 +find_set_type(const char *name)
1407 + struct ip_set_type *set_type;
1409 + list_for_each_entry(set_type, &set_type_list, list)
1410 + if (!strncmp(set_type->typename, name, IP_SET_MAXNAMELEN - 1))
1416 +ip_set_register_set_type(struct ip_set_type *set_type)
1420 + if (set_type->protocol_version != IP_SET_PROTOCOL_VERSION) {
1421 + ip_set_printk("'%s' uses wrong protocol version %u (want %u)",
1422 + set_type->typename,
1423 + set_type->protocol_version,
1424 + IP_SET_PROTOCOL_VERSION);
1428 + write_lock_bh(&ip_set_lock);
1429 + if (find_set_type(set_type->typename)) {
1431 + ip_set_printk("'%s' already registered!",
1432 + set_type->typename);
1436 + if (!try_module_get(THIS_MODULE)) {
1440 + list_add(&set_type->list, &set_type_list);
1441 + DP("'%s' registered.", set_type->typename);
1443 + write_unlock_bh(&ip_set_lock);
1448 +ip_set_unregister_set_type(struct ip_set_type *set_type)
1450 + write_lock_bh(&ip_set_lock);
1451 + if (!find_set_type(set_type->typename)) {
1452 + ip_set_printk("'%s' not registered?",
1453 + set_type->typename);
1456 + list_del(&set_type->list);
1457 + module_put(THIS_MODULE);
1458 + DP("'%s' unregistered.", set_type->typename);
1460 + write_unlock_bh(&ip_set_lock);
1465 + * Userspace routines
1469 + * Find set by name, reference it once. The reference makes sure the
1470 + * thing pointed to, does not go away under our feet. Drop the reference
1471 + * later, using ip_set_put().
1474 +ip_set_get_byname(const char *name)
1476 + ip_set_id_t i, index = IP_SET_INVALID_ID;
1478 + down(&ip_set_app_mutex);
1479 + for (i = 0; i < ip_set_max; i++) {
1480 + if (ip_set_list[i] != NULL
1481 + && strcmp(ip_set_list[i]->name, name) == 0) {
1487 + up(&ip_set_app_mutex);
1492 + * Find set by index, reference it once. The reference makes sure the
1493 + * thing pointed to, does not go away under our feet. Drop the reference
1494 + * later, using ip_set_put().
1497 +ip_set_get_byindex(ip_set_id_t index)
1499 + down(&ip_set_app_mutex);
1501 + if (index >= ip_set_max)
1502 + return IP_SET_INVALID_ID;
1504 + if (ip_set_list[index])
1505 + __ip_set_get(index);
1507 + index = IP_SET_INVALID_ID;
1509 + up(&ip_set_app_mutex);
1514 + * If the given set pointer points to a valid set, decrement
1515 + * reference count by 1. The caller shall not assume the index
1516 + * to be valid, after calling this function.
1518 +void ip_set_put(ip_set_id_t index)
1520 + down(&ip_set_app_mutex);
1521 + if (ip_set_list[index])
1522 + __ip_set_put(index);
1523 + up(&ip_set_app_mutex);
1526 +/* Find a set by name or index */
1528 +ip_set_find_byname(const char *name)
1530 + ip_set_id_t i, index = IP_SET_INVALID_ID;
1532 + for (i = 0; i < ip_set_max; i++) {
1533 + if (ip_set_list[i] != NULL
1534 + && strcmp(ip_set_list[i]->name, name) == 0) {
1543 +ip_set_find_byindex(ip_set_id_t index)
1545 + if (index >= ip_set_max || ip_set_list[index] == NULL)
1546 + index = IP_SET_INVALID_ID;
1552 + * Add, del, test, bind and unbind
1556 +__ip_set_testip(struct ip_set *set,
1563 + read_lock_bh(&set->lock);
1564 + res = set->type->testip(set, data, size, ip);
1565 + read_unlock_bh(&set->lock);
1571 +__ip_set_addip(ip_set_id_t index,
1575 + struct ip_set *set = ip_set_list[index];
1579 + IP_SET_ASSERT(set);
1581 + write_lock_bh(&set->lock);
1582 + res = set->type->addip(set, data, size, &ip);
1583 + write_unlock_bh(&set->lock);
1584 + } while (res == -EAGAIN
1585 + && set->type->retry
1586 + && (res = set->type->retry(set)) == 0);
1592 +ip_set_addip(ip_set_id_t index,
1597 + return __ip_set_addip(index,
1598 + data + sizeof(struct ip_set_req_adt),
1599 + size - sizeof(struct ip_set_req_adt));
1603 +ip_set_delip(ip_set_id_t index,
1607 + struct ip_set *set = ip_set_list[index];
1611 + IP_SET_ASSERT(set);
1612 + write_lock_bh(&set->lock);
1613 + res = set->type->delip(set,
1614 + data + sizeof(struct ip_set_req_adt),
1615 + size - sizeof(struct ip_set_req_adt),
1617 + write_unlock_bh(&set->lock);
1623 +ip_set_testip(ip_set_id_t index,
1627 + struct ip_set *set = ip_set_list[index];
1631 + IP_SET_ASSERT(set);
1632 + res = __ip_set_testip(set,
1633 + data + sizeof(struct ip_set_req_adt),
1634 + size - sizeof(struct ip_set_req_adt),
1637 + return (res > 0 ? -EEXIST : res);
1641 +ip_set_bindip(ip_set_id_t index,
1645 + struct ip_set *set = ip_set_list[index];
1646 + struct ip_set_req_bind *req_bind;
1647 + ip_set_id_t binding;
1651 + IP_SET_ASSERT(set);
1652 + if (size < sizeof(struct ip_set_req_bind))
1655 + req_bind = (struct ip_set_req_bind *) data;
1656 + req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
1658 + if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
1659 + /* Default binding of a set */
1660 + char *binding_name;
1662 + if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
1665 + binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
1666 + binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
1668 + binding = ip_set_find_byname(binding_name);
1669 + if (binding == IP_SET_INVALID_ID)
1672 + write_lock_bh(&ip_set_lock);
1673 + /* Sets as binding values are referenced */
1674 + if (set->binding != IP_SET_INVALID_ID)
1675 + __ip_set_put(set->binding);
1676 + set->binding = binding;
1677 + __ip_set_get(set->binding);
1678 + write_unlock_bh(&ip_set_lock);
1682 + binding = ip_set_find_byname(req_bind->binding);
1683 + if (binding == IP_SET_INVALID_ID)
1686 + res = __ip_set_testip(set,
1687 + data + sizeof(struct ip_set_req_bind),
1688 + size - sizeof(struct ip_set_req_bind),
1690 + DP("set %s, ip: %u.%u.%u.%u, binding %s",
1691 + set->name, HIPQUAD(ip), ip_set_list[binding]->name);
1694 + res = ip_set_hash_add(set->id, ip, binding);
1699 +#define FOREACH_SET_DO(fn, args...) \
1701 + ip_set_id_t __i; \
1702 + struct ip_set *__set; \
1704 + for (__i = 0; __i < ip_set_max; __i++) { \
1705 + __set = ip_set_list[__i]; \
1706 + if (__set != NULL) \
1707 + fn(__set , ##args); \
1712 +__set_hash_del_byid(struct ip_set_hash *set_hash, ip_set_id_t id)
1714 + if (set_hash->id == id)
1715 + __set_hash_del(set_hash);
1719 +__unbind_default(struct ip_set *set)
1721 + if (set->binding != IP_SET_INVALID_ID) {
1722 + /* Sets as binding values are referenced */
1723 + __ip_set_put(set->binding);
1724 + set->binding = IP_SET_INVALID_ID;
1729 +ip_set_unbindip(ip_set_id_t index,
1733 + struct ip_set *set;
1734 + struct ip_set_req_bind *req_bind;
1739 + if (size < sizeof(struct ip_set_req_bind))
1742 + req_bind = (struct ip_set_req_bind *) data;
1743 + req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
1745 + DP("%u %s", index, req_bind->binding);
1746 + if (index == IP_SET_INVALID_ID) {
1747 + /* unbind :all: */
1748 + if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
1749 + /* Default binding of sets */
1750 + write_lock_bh(&ip_set_lock);
1751 + FOREACH_SET_DO(__unbind_default);
1752 + write_unlock_bh(&ip_set_lock);
1754 + } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
1755 + /* Flush all bindings of all sets*/
1756 + write_lock_bh(&ip_set_lock);
1757 + FOREACH_HASH_RW_DO(__set_hash_del);
1758 + write_unlock_bh(&ip_set_lock);
1761 + DP("unreachable reached!");
1765 + set = ip_set_list[index];
1766 + IP_SET_ASSERT(set);
1767 + if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
1768 + /* Default binding of set */
1769 + ip_set_id_t binding = ip_set_find_byindex(set->binding);
1771 + if (binding == IP_SET_INVALID_ID)
1774 + write_lock_bh(&ip_set_lock);
1775 + /* Sets in hash values are referenced */
1776 + __ip_set_put(set->binding);
1777 + set->binding = IP_SET_INVALID_ID;
1778 + write_unlock_bh(&ip_set_lock);
1781 + } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
1782 + /* Flush all bindings */
1784 + write_lock_bh(&ip_set_lock);
1785 + FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
1786 + write_unlock_bh(&ip_set_lock);
1790 + res = __ip_set_testip(set,
1791 + data + sizeof(struct ip_set_req_bind),
1792 + size - sizeof(struct ip_set_req_bind),
1795 + DP("set %s, ip: %u.%u.%u.%u", set->name, HIPQUAD(ip));
1797 + res = ip_set_hash_del(set->id, ip);
1803 +ip_set_testbind(ip_set_id_t index,
1807 + struct ip_set *set = ip_set_list[index];
1808 + struct ip_set_req_bind *req_bind;
1809 + ip_set_id_t binding;
1813 + IP_SET_ASSERT(set);
1814 + if (size < sizeof(struct ip_set_req_bind))
1817 + req_bind = (struct ip_set_req_bind *) data;
1818 + req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
1820 + if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
1821 + /* Default binding of set */
1822 + char *binding_name;
1824 + if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
1827 + binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
1828 + binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
1830 + binding = ip_set_find_byname(binding_name);
1831 + if (binding == IP_SET_INVALID_ID)
1834 + res = (set->binding == binding) ? -EEXIST : 0;
1838 + binding = ip_set_find_byname(req_bind->binding);
1839 + if (binding == IP_SET_INVALID_ID)
1843 + res = __ip_set_testip(set,
1844 + data + sizeof(struct ip_set_req_bind),
1845 + size - sizeof(struct ip_set_req_bind),
1847 + DP("set %s, ip: %u.%u.%u.%u, binding %s",
1848 + set->name, HIPQUAD(ip), ip_set_list[binding]->name);
1851 + res = (ip_set_find_in_hash(set->id, ip) == binding)
1857 +static struct ip_set_type *
1858 +find_set_type_rlock(const char *typename)
1860 + struct ip_set_type *type;
1862 + read_lock_bh(&ip_set_lock);
1863 + type = find_set_type(typename);
1865 + read_unlock_bh(&ip_set_lock);
1871 +find_free_id(const char *name,
1872 + ip_set_id_t *index,
1877 + *id = IP_SET_INVALID_ID;
1878 + for (i = 0; i < ip_set_max; i++) {
1879 + if (ip_set_list[i] == NULL) {
1880 + if (*id == IP_SET_INVALID_ID)
1882 + } else if (strcmp(name, ip_set_list[i]->name) == 0)
1886 + if (*id == IP_SET_INVALID_ID)
1887 + /* No free slot remained */
1889 + /* Check that index is usable as id (swapping) */
1891 + for (i = 0; i < ip_set_max; i++) {
1892 + if (ip_set_list[i] != NULL
1893 + && ip_set_list[i]->id == *id) {
1905 +ip_set_create(const char *name,
1906 + const char *typename,
1907 + ip_set_id_t restore,
1911 + struct ip_set *set;
1912 + ip_set_id_t index = 0, id;
1915 + DP("setname: %s, typename: %s, id: %u", name, typename, restore);
1917 + * First, and without any locks, allocate and initialize
1918 + * a normal base set structure.
1920 + set = kmalloc(sizeof(struct ip_set), GFP_KERNEL);
1923 + set->lock = RW_LOCK_UNLOCKED;
1924 + strncpy(set->name, name, IP_SET_MAXNAMELEN);
1925 + set->binding = IP_SET_INVALID_ID;
1926 + atomic_set(&set->ref, 0);
1929 + * Next, take the &ip_set_lock, check that we know the type,
1930 + * and take a reference on the type, to make sure it
1931 + * stays available while constructing our new set.
1933 + * After referencing the type, we drop the &ip_set_lock,
1934 + * and let the new set construction run without locks.
1936 + set->type = find_set_type_rlock(typename);
1937 + if (set->type == NULL) {
1938 + /* Try loading the module */
1939 + char modulename[IP_SET_MAXNAMELEN + strlen("ip_set_") + 1];
1940 + strcpy(modulename, "ip_set_");
1941 + strcat(modulename, typename);
1942 + DP("try to load %s", modulename);
1943 + request_module(modulename);
1944 + set->type = find_set_type_rlock(typename);
1946 + if (set->type == NULL) {
1947 + ip_set_printk("no set type '%s', set '%s' not created",
1952 + if (!try_module_get(set->type->me)) {
1953 + read_unlock_bh(&ip_set_lock);
1957 + read_unlock_bh(&ip_set_lock);
1960 + * Without holding any locks, create private part.
1962 + res = set->type->create(set, data, size);
1966 + /* BTW, res==0 here. */
1969 + * Here, we have a valid, constructed set. &ip_set_lock again,
1970 + * find free id/index and check that it is not already in
1973 + write_lock_bh(&ip_set_lock);
1974 + if ((res = find_free_id(set->name, &index, &id)) != 0) {
1975 + DP("no free id!");
1979 + /* Make sure restore gets the same index */
1980 + if (restore != IP_SET_INVALID_ID && index != restore) {
1981 + DP("Can't restore, sets are screwed up");
1987 + * Finally! Add our shiny new set to the list, and be done.
1989 + DP("create: '%s' created with index %u, id %u!", set->name, index, id);
1991 + ip_set_list[index] = set;
1992 + write_unlock_bh(&ip_set_lock);
1996 + write_unlock_bh(&ip_set_lock);
1997 + set->type->destroy(set);
1999 + module_put(set->type->me);
2006 + * Destroy a given existing set
2009 +ip_set_destroy_set(ip_set_id_t index)
2011 + struct ip_set *set = ip_set_list[index];
2013 + IP_SET_ASSERT(set);
2014 + DP("set: %s", set->name);
2015 + write_lock_bh(&ip_set_lock);
2016 + FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
2017 + if (set->binding != IP_SET_INVALID_ID)
2018 + __ip_set_put(set->binding);
2019 + ip_set_list[index] = NULL;
2020 + write_unlock_bh(&ip_set_lock);
2022 + /* Must call it without holding any lock */
2023 + set->type->destroy(set);
2024 + module_put(set->type->me);
2029 + * Destroy a set - or all sets
2030 + * Sets must not be referenced/used.
2033 +ip_set_destroy(ip_set_id_t index)
2037 + /* ref modification always protected by the mutex */
2038 + if (index != IP_SET_INVALID_ID) {
2039 + if (atomic_read(&ip_set_list[index]->ref))
2041 + ip_set_destroy_set(index);
2043 + for (i = 0; i < ip_set_max; i++) {
2044 + if (ip_set_list[i] != NULL
2045 + && (atomic_read(&ip_set_list[i]->ref)))
2049 + for (i = 0; i < ip_set_max; i++) {
2050 + if (ip_set_list[i] != NULL)
2051 + ip_set_destroy_set(i);
2058 +ip_set_flush_set(struct ip_set *set)
2060 + DP("set: %s %u", set->name, set->id);
2062 + write_lock_bh(&set->lock);
2063 + set->type->flush(set);
2064 + write_unlock_bh(&set->lock);
2068 + * Flush data in a set - or in all sets
2071 +ip_set_flush(ip_set_id_t index)
2073 + if (index != IP_SET_INVALID_ID) {
2074 + IP_SET_ASSERT(ip_set_list[index]);
2075 + ip_set_flush_set(ip_set_list[index]);
2077 + FOREACH_SET_DO(ip_set_flush_set);
2084 +ip_set_rename(ip_set_id_t index, const char *name)
2086 + struct ip_set *set = ip_set_list[index];
2090 + DP("set: %s to %s", set->name, name);
2091 + write_lock_bh(&ip_set_lock);
2092 + for (i = 0; i < ip_set_max; i++) {
2093 + if (ip_set_list[i] != NULL
2094 + && strncmp(ip_set_list[i]->name,
2096 + IP_SET_MAXNAMELEN - 1) == 0) {
2101 + strncpy(set->name, name, IP_SET_MAXNAMELEN);
2103 + write_unlock_bh(&ip_set_lock);
2108 + * Swap two sets so that name/index points to the other.
2109 + * References are also swapped.
2112 +ip_set_swap(ip_set_id_t from_index, ip_set_id_t to_index)
2114 + struct ip_set *from = ip_set_list[from_index];
2115 + struct ip_set *to = ip_set_list[to_index];
2116 + char from_name[IP_SET_MAXNAMELEN];
2117 + u_int32_t from_ref;
2119 + DP("set: %s to %s", from->name, to->name);
2120 + /* Features must not change. Artifical restriction. */
2121 + if (from->type->features != to->type->features)
2124 + /* No magic here: ref munging protected by the mutex */
2125 + write_lock_bh(&ip_set_lock);
2126 + strncpy(from_name, from->name, IP_SET_MAXNAMELEN);
2127 + from_ref = atomic_read(&from->ref);
2129 + strncpy(from->name, to->name, IP_SET_MAXNAMELEN);
2130 + atomic_set(&from->ref, atomic_read(&to->ref));
2131 + strncpy(to->name, from_name, IP_SET_MAXNAMELEN);
2132 + atomic_set(&to->ref, from_ref);
2134 + ip_set_list[from_index] = to;
2135 + ip_set_list[to_index] = from;
2137 + write_unlock_bh(&ip_set_lock);
2146 +__set_hash_bindings_size_list(struct ip_set_hash *set_hash,
2147 + ip_set_id_t id, size_t *size)
2149 + if (set_hash->id == id)
2150 + *size += sizeof(struct ip_set_hash_list);
2154 +__set_hash_bindings_size_save(struct ip_set_hash *set_hash,
2155 + ip_set_id_t id, size_t *size)
2157 + if (set_hash->id == id)
2158 + *size += sizeof(struct ip_set_hash_save);
2162 +__set_hash_bindings(struct ip_set_hash *set_hash,
2163 + ip_set_id_t id, void *data, int *used)
2165 + if (set_hash->id == id) {
2166 + struct ip_set_hash_list *hash_list =
2167 + (struct ip_set_hash_list *)(data + *used);
2169 + hash_list->ip = set_hash->ip;
2170 + hash_list->binding = set_hash->binding;
2171 + *used += sizeof(struct ip_set_hash_list);
2175 +static int ip_set_list_set(ip_set_id_t index,
2180 + struct ip_set *set = ip_set_list[index];
2181 + struct ip_set_list *set_list;
2183 + /* Pointer to our header */
2184 + set_list = (struct ip_set_list *) (data + *used);
2186 + DP("set: %s, used: %d %p %p", set->name, *used, data, data + *used);
2188 + /* Get and ensure header size */
2189 + if (*used + sizeof(struct ip_set_list) > len)
2190 + goto not_enough_mem;
2191 + *used += sizeof(struct ip_set_list);
2193 + read_lock_bh(&set->lock);
2194 + /* Get and ensure set specific header size */
2195 + set_list->header_size = set->type->header_size;
2196 + if (*used + set_list->header_size > len)
2199 + /* Fill in the header */
2200 + set_list->index = index;
2201 + set_list->binding = set->binding;
2202 + set_list->ref = atomic_read(&set->ref);
2204 + /* Fill in set spefific header data */
2205 + set->type->list_header(set, data + *used);
2206 + *used += set_list->header_size;
2208 + /* Get and ensure set specific members size */
2209 + set_list->members_size = set->type->list_members_size(set);
2210 + if (*used + set_list->members_size > len)
2213 + /* Fill in set spefific members data */
2214 + set->type->list_members(set, data + *used);
2215 + *used += set_list->members_size;
2216 + read_unlock_bh(&set->lock);
2220 + /* Get and ensure set specific bindings size */
2221 + set_list->bindings_size = 0;
2222 + FOREACH_HASH_DO(__set_hash_bindings_size_list,
2223 + set->id, &set_list->bindings_size);
2224 + if (*used + set_list->bindings_size > len)
2225 + goto not_enough_mem;
2227 + /* Fill in set spefific bindings data */
2228 + FOREACH_HASH_DO(__set_hash_bindings, set->id, data, used);
2233 + read_unlock_bh(&set->lock);
2235 + DP("not enough mem, try again");
2242 +static int ip_set_save_set(ip_set_id_t index,
2247 + struct ip_set *set;
2248 + struct ip_set_save *set_save;
2250 + /* Pointer to our header */
2251 + set_save = (struct ip_set_save *) (data + *used);
2253 + /* Get and ensure header size */
2254 + if (*used + sizeof(struct ip_set_save) > len)
2255 + goto not_enough_mem;
2256 + *used += sizeof(struct ip_set_save);
2258 + set = ip_set_list[index];
2259 + DP("set: %s, used: %u(%u) %p %p", set->name, *used, len,
2260 + data, data + *used);
2262 + read_lock_bh(&set->lock);
2263 + /* Get and ensure set specific header size */
2264 + set_save->header_size = set->type->header_size;
2265 + if (*used + set_save->header_size > len)
2268 + /* Fill in the header */
2269 + set_save->index = index;
2270 + set_save->binding = set->binding;
2272 + /* Fill in set spefific header data */
2273 + set->type->list_header(set, data + *used);
2274 + *used += set_save->header_size;
2276 + DP("set header filled: %s, used: %u(%u) %p %p", set->name, *used,
2277 + set_save->header_size, data, data + *used);
2278 + /* Get and ensure set specific members size */
2279 + set_save->members_size = set->type->list_members_size(set);
2280 + if (*used + set_save->members_size > len)
2283 + /* Fill in set spefific members data */
2284 + set->type->list_members(set, data + *used);
2285 + *used += set_save->members_size;
2286 + read_unlock_bh(&set->lock);
2287 + DP("set members filled: %s, used: %u(%u) %p %p", set->name, *used,
2288 + set_save->members_size, data, data + *used);
2292 + read_unlock_bh(&set->lock);
2294 + DP("not enough mem, try again");
2299 +__set_hash_save_bindings(struct ip_set_hash *set_hash,
2307 + && (id == IP_SET_INVALID_ID || set_hash->id == id)) {
2308 + struct ip_set_hash_save *hash_save =
2309 + (struct ip_set_hash_save *)(data + *used);
2310 + /* Ensure bindings size */
2311 + if (*used + sizeof(struct ip_set_hash_save) > len) {
2315 + hash_save->id = set_hash->id;
2316 + hash_save->ip = set_hash->ip;
2317 + hash_save->binding = set_hash->binding;
2318 + *used += sizeof(struct ip_set_hash_save);
2322 +static int ip_set_save_bindings(ip_set_id_t index,
2328 + struct ip_set_save *set_save;
2330 + DP("used %u, len %u", *used, len);
2331 + /* Get and ensure header size */
2332 + if (*used + sizeof(struct ip_set_save) > len)
2336 + set_save = (struct ip_set_save *) (data + *used);
2337 + set_save->index = IP_SET_INVALID_ID;
2338 + set_save->header_size = 0;
2339 + set_save->members_size = 0;
2340 + *used += sizeof(struct ip_set_save);
2342 + DP("marker added used %u, len %u", *used, len);
2343 + /* Fill in bindings data */
2344 + if (index != IP_SET_INVALID_ID)
2345 + /* Sets are identified by id in hash */
2346 + index = ip_set_list[index]->id;
2347 + FOREACH_HASH_DO(__set_hash_save_bindings, index, data, used, len, &res);
2355 +static int ip_set_restore(void *data,
2359 + int line = 0, used = 0, members_size;
2360 + struct ip_set *set;
2361 + struct ip_set_hash_save *hash_save;
2362 + struct ip_set_restore *set_restore;
2363 + ip_set_id_t index;
2365 + /* Loop to restore sets */
2369 + DP("%u %u %u", used, sizeof(struct ip_set_restore), len);
2370 + /* Get and ensure header size */
2371 + if (used + sizeof(struct ip_set_restore) > len)
2373 + set_restore = (struct ip_set_restore *) (data + used);
2374 + used += sizeof(struct ip_set_restore);
2376 + /* Ensure data size */
2378 + + set_restore->header_size
2379 + + set_restore->members_size > len)
2382 + /* Check marker */
2383 + if (set_restore->index == IP_SET_INVALID_ID) {
2388 + /* Try to create the set */
2389 + DP("restore %s %s", set_restore->name, set_restore->typename);
2390 + res = ip_set_create(set_restore->name,
2391 + set_restore->typename,
2392 + set_restore->index,
2394 + set_restore->header_size);
2398 + used += set_restore->header_size;
2400 + index = ip_set_find_byindex(set_restore->index);
2401 + DP("index %u, restore_index %u", index, set_restore->index);
2402 + if (index != set_restore->index)
2404 + /* Try to restore members data */
2405 + set = ip_set_list[index];
2407 + DP("members_size %u reqsize %u",
2408 + set_restore->members_size, set->type->reqsize);
2409 + while (members_size + set->type->reqsize <=
2410 + set_restore->members_size) {
2412 + DP("members: %u, line %u", members_size, line);
2413 + res = __ip_set_addip(index,
2414 + data + used + members_size,
2415 + set->type->reqsize);
2416 + if (!(res == 0 || res == -EEXIST))
2418 + members_size += set->type->reqsize;
2421 + DP("members_size %u %u",
2422 + set_restore->members_size, members_size);
2423 + if (members_size != set_restore->members_size)
2425 + used += set_restore->members_size;
2429 + /* Loop to restore bindings */
2430 + while (used < len) {
2433 + DP("restore binding, line %u", line);
2434 + /* Get and ensure size */
2435 + if (used + sizeof(struct ip_set_hash_save) > len)
2437 + hash_save = (struct ip_set_hash_save *) (data + used);
2438 + used += sizeof(struct ip_set_hash_save);
2440 + /* hash_save->id is used to store the index */
2441 + index = ip_set_find_byindex(hash_save->id);
2442 + DP("restore binding index %u, id %u, %u -> %u",
2443 + index, hash_save->id, hash_save->ip, hash_save->binding);
2444 + if (index != hash_save->id)
2446 + if (ip_set_find_byindex(hash_save->binding) == IP_SET_INVALID_ID) {
2447 + DP("corrupt binding set index %u", hash_save->binding);
2450 + set = ip_set_list[hash_save->id];
2451 + /* Null valued IP means default binding */
2452 + if (hash_save->ip)
2453 + res = ip_set_hash_add(set->id,
2455 + hash_save->binding);
2457 + IP_SET_ASSERT(set->binding == IP_SET_INVALID_ID);
2458 + write_lock_bh(&ip_set_lock);
2459 + set->binding = hash_save->binding;
2460 + __ip_set_get(set->binding);
2461 + write_unlock_bh(&ip_set_lock);
2462 + DP("default binding: %u", set->binding);
2474 +ip_set_sockfn_set(struct sock *sk, int optval, void *user, unsigned int len)
2477 + int res = 0; /* Assume OK */
2479 + struct ip_set_req_adt *req_adt;
2480 + ip_set_id_t index = IP_SET_INVALID_ID;
2481 + int (*adtfn)(ip_set_id_t index,
2482 + const void *data, size_t size);
2484 + int (*fn)(ip_set_id_t index,
2485 + const void *data, size_t size);
2487 + { { ip_set_addip }, { ip_set_delip }, { ip_set_testip},
2488 + { ip_set_bindip}, { ip_set_unbindip }, { ip_set_testbind },
2491 + DP("optval=%d, user=%p, len=%d", optval, user, len);
2492 + if (!capable(CAP_NET_ADMIN))
2494 + if (optval != SO_IP_SET)
2496 + if (len <= sizeof(unsigned)) {
2497 + ip_set_printk("short userdata (want >%zu, got %u)",
2498 + sizeof(unsigned), len);
2501 + data = vmalloc(len);
2503 + DP("out of mem for %u bytes", len);
2506 + if (copy_from_user(data, user, len) != 0) {
2510 + if (down_interruptible(&ip_set_app_mutex)) {
2515 + op = (unsigned *)data;
2518 + if (*op < IP_SET_OP_VERSION) {
2519 + /* Check the version at the beginning of operations */
2520 + struct ip_set_req_version *req_version =
2521 + (struct ip_set_req_version *) data;
2522 + if (req_version->version != IP_SET_PROTOCOL_VERSION) {
2529 + case IP_SET_OP_CREATE:{
2530 + struct ip_set_req_create *req_create
2531 + = (struct ip_set_req_create *) data;
2533 + if (len < sizeof(struct ip_set_req_create)) {
2534 + ip_set_printk("short CREATE data (want >=%zu, got %u)",
2535 + sizeof(struct ip_set_req_create), len);
2539 + req_create->name[IP_SET_MAXNAMELEN - 1] = '\0';
2540 + req_create->typename[IP_SET_MAXNAMELEN - 1] = '\0';
2541 + res = ip_set_create(req_create->name,
2542 + req_create->typename,
2543 + IP_SET_INVALID_ID,
2544 + data + sizeof(struct ip_set_req_create),
2545 + len - sizeof(struct ip_set_req_create));
2548 + case IP_SET_OP_DESTROY:{
2549 + struct ip_set_req_std *req_destroy
2550 + = (struct ip_set_req_std *) data;
2552 + if (len != sizeof(struct ip_set_req_std)) {
2553 + ip_set_printk("invalid DESTROY data (want %zu, got %u)",
2554 + sizeof(struct ip_set_req_std), len);
2558 + if (strcmp(req_destroy->name, IPSET_TOKEN_ALL) == 0) {
2559 + /* Destroy all sets */
2560 + index = IP_SET_INVALID_ID;
2562 + req_destroy->name[IP_SET_MAXNAMELEN - 1] = '\0';
2563 + index = ip_set_find_byname(req_destroy->name);
2565 + if (index == IP_SET_INVALID_ID) {
2571 + res = ip_set_destroy(index);
2574 + case IP_SET_OP_FLUSH:{
2575 + struct ip_set_req_std *req_flush =
2576 + (struct ip_set_req_std *) data;
2578 + if (len != sizeof(struct ip_set_req_std)) {
2579 + ip_set_printk("invalid FLUSH data (want %zu, got %u)",
2580 + sizeof(struct ip_set_req_std), len);
2584 + if (strcmp(req_flush->name, IPSET_TOKEN_ALL) == 0) {
2585 + /* Flush all sets */
2586 + index = IP_SET_INVALID_ID;
2588 + req_flush->name[IP_SET_MAXNAMELEN - 1] = '\0';
2589 + index = ip_set_find_byname(req_flush->name);
2591 + if (index == IP_SET_INVALID_ID) {
2596 + res = ip_set_flush(index);
2599 + case IP_SET_OP_RENAME:{
2600 + struct ip_set_req_create *req_rename
2601 + = (struct ip_set_req_create *) data;
2603 + if (len != sizeof(struct ip_set_req_create)) {
2604 + ip_set_printk("invalid RENAME data (want %zu, got %u)",
2605 + sizeof(struct ip_set_req_create), len);
2610 + req_rename->name[IP_SET_MAXNAMELEN - 1] = '\0';
2611 + req_rename->typename[IP_SET_MAXNAMELEN - 1] = '\0';
2613 + index = ip_set_find_byname(req_rename->name);
2614 + if (index == IP_SET_INVALID_ID) {
2618 + res = ip_set_rename(index, req_rename->typename);
2621 + case IP_SET_OP_SWAP:{
2622 + struct ip_set_req_create *req_swap
2623 + = (struct ip_set_req_create *) data;
2624 + ip_set_id_t to_index;
2626 + if (len != sizeof(struct ip_set_req_create)) {
2627 + ip_set_printk("invalid SWAP data (want %zu, got %u)",
2628 + sizeof(struct ip_set_req_create), len);
2633 + req_swap->name[IP_SET_MAXNAMELEN - 1] = '\0';
2634 + req_swap->typename[IP_SET_MAXNAMELEN - 1] = '\0';
2636 + index = ip_set_find_byname(req_swap->name);
2637 + if (index == IP_SET_INVALID_ID) {
2641 + to_index = ip_set_find_byname(req_swap->typename);
2642 + if (to_index == IP_SET_INVALID_ID) {
2646 + res = ip_set_swap(index, to_index);
2650 + break; /* Set identified by id */
2653 + /* There we may have add/del/test/bind/unbind/test_bind operations */
2654 + if (*op < IP_SET_OP_ADD_IP || *op > IP_SET_OP_TEST_BIND_SET) {
2658 + adtfn = adtfn_table[*op - IP_SET_OP_ADD_IP].fn;
2660 + if (len < sizeof(struct ip_set_req_adt)) {
2661 + ip_set_printk("short data in adt request (want >=%zu, got %u)",
2662 + sizeof(struct ip_set_req_adt), len);
2666 + req_adt = (struct ip_set_req_adt *) data;
2668 + /* -U :all: :all:|:default: uses IP_SET_INVALID_ID */
2669 + if (!(*op == IP_SET_OP_UNBIND_SET
2670 + && req_adt->index == IP_SET_INVALID_ID)) {
2671 + index = ip_set_find_byindex(req_adt->index);
2672 + if (index == IP_SET_INVALID_ID) {
2677 + res = adtfn(index, data, len);
2680 + up(&ip_set_app_mutex);
2684 + DP("final result %d", res);
2689 +ip_set_sockfn_get(struct sock *sk, int optval, void *user, int *len)
2693 + ip_set_id_t index = IP_SET_INVALID_ID;
2695 + int copylen = *len;
2697 + DP("optval=%d, user=%p, len=%d", optval, user, *len);
2698 + if (!capable(CAP_NET_ADMIN))
2700 + if (optval != SO_IP_SET)
2702 + if (*len < sizeof(unsigned)) {
2703 + ip_set_printk("short userdata (want >=%zu, got %d)",
2704 + sizeof(unsigned), *len);
2707 + data = vmalloc(*len);
2709 + DP("out of mem for %d bytes", *len);
2712 + if (copy_from_user(data, user, *len) != 0) {
2716 + if (down_interruptible(&ip_set_app_mutex)) {
2721 + op = (unsigned *) data;
2724 + if (*op < IP_SET_OP_VERSION) {
2725 + /* Check the version at the beginning of operations */
2726 + struct ip_set_req_version *req_version =
2727 + (struct ip_set_req_version *) data;
2728 + if (req_version->version != IP_SET_PROTOCOL_VERSION) {
2735 + case IP_SET_OP_VERSION: {
2736 + struct ip_set_req_version *req_version =
2737 + (struct ip_set_req_version *) data;
2739 + if (*len != sizeof(struct ip_set_req_version)) {
2740 + ip_set_printk("invalid VERSION (want %zu, got %d)",
2741 + sizeof(struct ip_set_req_version),
2747 + req_version->version = IP_SET_PROTOCOL_VERSION;
2748 + res = copy_to_user(user, req_version,
2749 + sizeof(struct ip_set_req_version));
2752 + case IP_SET_OP_GET_BYNAME: {
2753 + struct ip_set_req_get_set *req_get
2754 + = (struct ip_set_req_get_set *) data;
2756 + if (*len != sizeof(struct ip_set_req_get_set)) {
2757 + ip_set_printk("invalid GET_BYNAME (want %zu, got %d)",
2758 + sizeof(struct ip_set_req_get_set), *len);
2762 + req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
2763 + index = ip_set_find_byname(req_get->set.name);
2764 + req_get->set.index = index;
2767 + case IP_SET_OP_GET_BYINDEX: {
2768 + struct ip_set_req_get_set *req_get
2769 + = (struct ip_set_req_get_set *) data;
2771 + if (*len != sizeof(struct ip_set_req_get_set)) {
2772 + ip_set_printk("invalid GET_BYINDEX (want %zu, got %d)",
2773 + sizeof(struct ip_set_req_get_set), *len);
2777 + req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
2778 + index = ip_set_find_byindex(req_get->set.index);
2779 + strncpy(req_get->set.name,
2780 + index == IP_SET_INVALID_ID ? ""
2781 + : ip_set_list[index]->name, IP_SET_MAXNAMELEN);
2784 + case IP_SET_OP_ADT_GET: {
2785 + struct ip_set_req_adt_get *req_get
2786 + = (struct ip_set_req_adt_get *) data;
2788 + if (*len != sizeof(struct ip_set_req_adt_get)) {
2789 + ip_set_printk("invalid ADT_GET (want %zu, got %d)",
2790 + sizeof(struct ip_set_req_adt_get), *len);
2794 + req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
2795 + index = ip_set_find_byname(req_get->set.name);
2796 + if (index != IP_SET_INVALID_ID) {
2797 + req_get->set.index = index;
2798 + strncpy(req_get->typename,
2799 + ip_set_list[index]->type->typename,
2800 + IP_SET_MAXNAMELEN - 1);
2807 + case IP_SET_OP_MAX_SETS: {
2808 + struct ip_set_req_max_sets *req_max_sets
2809 + = (struct ip_set_req_max_sets *) data;
2812 + if (*len != sizeof(struct ip_set_req_max_sets)) {
2813 + ip_set_printk("invalid MAX_SETS (want %zu, got %d)",
2814 + sizeof(struct ip_set_req_max_sets), *len);
2819 + if (strcmp(req_max_sets->set.name, IPSET_TOKEN_ALL) == 0) {
2820 + req_max_sets->set.index = IP_SET_INVALID_ID;
2822 + req_max_sets->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
2823 + req_max_sets->set.index =
2824 + ip_set_find_byname(req_max_sets->set.name);
2825 + if (req_max_sets->set.index == IP_SET_INVALID_ID) {
2830 + req_max_sets->max_sets = ip_set_max;
2831 + req_max_sets->sets = 0;
2832 + for (i = 0; i < ip_set_max; i++) {
2833 + if (ip_set_list[i] != NULL)
2834 + req_max_sets->sets++;
2838 + case IP_SET_OP_LIST_SIZE:
2839 + case IP_SET_OP_SAVE_SIZE: {
2840 + struct ip_set_req_setnames *req_setnames
2841 + = (struct ip_set_req_setnames *) data;
2842 + struct ip_set_name_list *name_list;
2843 + struct ip_set *set;
2847 + if (*len < sizeof(struct ip_set_req_setnames)) {
2848 + ip_set_printk("short LIST_SIZE (want >=%zu, got %d)",
2849 + sizeof(struct ip_set_req_setnames), *len);
2854 + req_setnames->size = 0;
2855 + used = sizeof(struct ip_set_req_setnames);
2856 + for (i = 0; i < ip_set_max; i++) {
2857 + if (ip_set_list[i] == NULL)
2859 + name_list = (struct ip_set_name_list *)
2861 + used += sizeof(struct ip_set_name_list);
2862 + if (used > copylen) {
2866 + set = ip_set_list[i];
2867 + /* Fill in index, name, etc. */
2868 + name_list->index = i;
2869 + name_list->id = set->id;
2870 + strncpy(name_list->name,
2872 + IP_SET_MAXNAMELEN - 1);
2873 + strncpy(name_list->typename,
2874 + set->type->typename,
2875 + IP_SET_MAXNAMELEN - 1);
2876 + DP("filled %s of type %s, index %u\n",
2877 + name_list->name, name_list->typename,
2878 + name_list->index);
2879 + if (!(req_setnames->index == IP_SET_INVALID_ID
2880 + || req_setnames->index == i))
2884 + case IP_SET_OP_LIST_SIZE: {
2885 + req_setnames->size += sizeof(struct ip_set_list)
2886 + + set->type->header_size
2887 + + set->type->list_members_size(set);
2888 + /* Sets are identified by id in the hash */
2889 + FOREACH_HASH_DO(__set_hash_bindings_size_list,
2890 + set->id, &req_setnames->size);
2893 + case IP_SET_OP_SAVE_SIZE: {
2894 + req_setnames->size += sizeof(struct ip_set_save)
2895 + + set->type->header_size
2896 + + set->type->list_members_size(set);
2897 + FOREACH_HASH_DO(__set_hash_bindings_size_save,
2898 + set->id, &req_setnames->size);
2905 + if (copylen != used) {
2911 + case IP_SET_OP_LIST: {
2912 + struct ip_set_req_list *req_list
2913 + = (struct ip_set_req_list *) data;
2917 + if (*len < sizeof(struct ip_set_req_list)) {
2918 + ip_set_printk("short LIST (want >=%zu, got %d)",
2919 + sizeof(struct ip_set_req_list), *len);
2923 + index = req_list->index;
2924 + if (index != IP_SET_INVALID_ID
2925 + && ip_set_find_byindex(index) != index) {
2930 + if (index == IP_SET_INVALID_ID) {
2931 + /* List all sets */
2932 + for (i = 0; i < ip_set_max && res == 0; i++) {
2933 + if (ip_set_list[i] != NULL)
2934 + res = ip_set_list_set(i, data, &used, *len);
2937 + /* List an individual set */
2938 + res = ip_set_list_set(index, data, &used, *len);
2942 + else if (copylen != used) {
2948 + case IP_SET_OP_SAVE: {
2949 + struct ip_set_req_list *req_save
2950 + = (struct ip_set_req_list *) data;
2954 + if (*len < sizeof(struct ip_set_req_list)) {
2955 + ip_set_printk("short SAVE (want >=%zu, got %d)",
2956 + sizeof(struct ip_set_req_list), *len);
2960 + index = req_save->index;
2961 + if (index != IP_SET_INVALID_ID
2962 + && ip_set_find_byindex(index) != index) {
2967 + if (index == IP_SET_INVALID_ID) {
2968 + /* Save all sets */
2969 + for (i = 0; i < ip_set_max && res == 0; i++) {
2970 + if (ip_set_list[i] != NULL)
2971 + res = ip_set_save_set(i, data, &used, *len);
2974 + /* Save an individual set */
2975 + res = ip_set_save_set(index, data, &used, *len);
2978 + res = ip_set_save_bindings(index, data, &used, *len);
2982 + else if (copylen != used) {
2988 + case IP_SET_OP_RESTORE: {
2989 + struct ip_set_req_setnames *req_restore
2990 + = (struct ip_set_req_setnames *) data;
2993 + if (*len < sizeof(struct ip_set_req_setnames)
2994 + || *len != req_restore->size) {
2995 + ip_set_printk("invalid RESTORE (want =%zu, got %d)",
2996 + req_restore->size, *len);
3000 + line = ip_set_restore(data + sizeof(struct ip_set_req_setnames),
3001 + req_restore->size - sizeof(struct ip_set_req_setnames));
3002 + DP("ip_set_restore: %u", line);
3005 + req_restore->size = line;
3006 + copylen = sizeof(struct ip_set_req_setnames);
3014 + } /* end of switch(op) */
3017 + DP("set %s, copylen %u", index != IP_SET_INVALID_ID
3018 + && ip_set_list[index]
3019 + ? ip_set_list[index]->name
3020 + : ":all:", copylen);
3021 + res = copy_to_user(user, data, copylen);
3024 + up(&ip_set_app_mutex);
3028 + DP("final result %d", res);
3032 +static struct nf_sockopt_ops so_set = {
3034 + .set_optmin = SO_IP_SET,
3035 + .set_optmax = SO_IP_SET + 1,
3036 + .set = &ip_set_sockfn_set,
3037 + .get_optmin = SO_IP_SET,
3038 + .get_optmax = SO_IP_SET + 1,
3039 + .get = &ip_set_sockfn_get,
3043 +static int max_sets, hash_size;
3044 +module_param(max_sets, int, 0600);
3045 +MODULE_PARM_DESC(max_sets, "maximal number of sets");
3046 +module_param(hash_size, int, 0600);
3047 +MODULE_PARM_DESC(hash_size, "hash size for bindings");
3048 +MODULE_LICENSE("GPL");
3049 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
3050 +MODULE_DESCRIPTION("module implementing core IP set support");
3052 +static int __init init(void)
3057 + get_random_bytes(&ip_set_hash_random, 4);
3059 + ip_set_max = max_sets;
3060 + ip_set_list = vmalloc(sizeof(struct ip_set *) * ip_set_max);
3061 + if (!ip_set_list) {
3062 + printk(KERN_ERR "Unable to create ip_set_list\n");
3065 + memset(ip_set_list, 0, sizeof(struct ip_set *) * ip_set_max);
3067 + ip_set_bindings_hash_size = hash_size;
3068 + ip_set_hash = vmalloc(sizeof(struct list_head) * ip_set_bindings_hash_size);
3069 + if (!ip_set_hash) {
3070 + printk(KERN_ERR "Unable to create ip_set_hash\n");
3071 + vfree(ip_set_list);
3074 + for (i = 0; i < ip_set_bindings_hash_size; i++)
3075 + INIT_LIST_HEAD(&ip_set_hash[i]);
3077 + INIT_LIST_HEAD(&set_type_list);
3079 + res = nf_register_sockopt(&so_set);
3081 + ip_set_printk("SO_SET registry failed: %d", res);
3082 + vfree(ip_set_list);
3083 + vfree(ip_set_hash);
3089 +static void __exit fini(void)
3091 + /* There can't be any existing set or binding */
3092 + nf_unregister_sockopt(&so_set);
3093 + vfree(ip_set_list);
3094 + vfree(ip_set_hash);
3095 + DP("these are the famous last words");
3098 +EXPORT_SYMBOL(ip_set_register_set_type);
3099 +EXPORT_SYMBOL(ip_set_unregister_set_type);
3101 +EXPORT_SYMBOL(ip_set_get_byname);
3102 +EXPORT_SYMBOL(ip_set_get_byindex);
3103 +EXPORT_SYMBOL(ip_set_put);
3105 +EXPORT_SYMBOL(ip_set_addip_kernel);
3106 +EXPORT_SYMBOL(ip_set_delip_kernel);
3107 +EXPORT_SYMBOL(ip_set_testip_kernel);
3111 diff -ruN linux-2.6.21.1/net/ipv4/netfilter/ip_set_iphash.c linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_iphash.c
3112 --- linux-2.6.21.1/net/ipv4/netfilter/ip_set_iphash.c 1969-12-31 18:00:00.000000000 -0600
3113 +++ linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_iphash.c 2007-06-08 16:29:31.829808250 -0500
3115 +/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
3117 + * This program is free software; you can redistribute it and/or modify
3118 + * it under the terms of the GNU General Public License version 2 as
3119 + * published by the Free Software Foundation.
3122 +/* Kernel module implementing an ip hash set */
3124 +#include <linux/module.h>
3125 +#include <linux/ip.h>
3126 +#include <linux/skbuff.h>
3127 +#include <linux/netfilter_ipv4/ip_tables.h>
3128 +#include <linux/netfilter_ipv4/ip_set.h>
3129 +#include <linux/errno.h>
3130 +#include <asm/uaccess.h>
3131 +#include <asm/bitops.h>
3132 +#include <linux/spinlock.h>
3133 +#include <linux/vmalloc.h>
3134 +#include <linux/random.h>
3136 +#include <net/ip.h>
3138 +#include <linux/netfilter_ipv4/ip_set_malloc.h>
3139 +#include <linux/netfilter_ipv4/ip_set_iphash.h>
3140 +#include <linux/netfilter_ipv4/ip_set_jhash.h>
3142 +static int limit = MAX_RANGE;
3144 +static inline __u32
3145 +jhash_ip(const struct ip_set_iphash *map, uint16_t i, ip_set_ip_t ip)
3147 + return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
3150 +static inline __u32
3151 +hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3153 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3156 + ip_set_ip_t *elem;
3158 + *hash_ip = ip & map->netmask;
3159 + DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u, %u.%u.%u.%u",
3160 + set->name, HIPQUAD(ip), HIPQUAD(*hash_ip), HIPQUAD(map->netmask));
3162 + for (i = 0; i < map->probes; i++) {
3163 + id = jhash_ip(map, i, *hash_ip) % map->hashsize;
3164 + DP("hash key: %u", id);
3165 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
3166 + if (*elem == *hash_ip)
3168 + /* No shortcut at testing - there can be deleted
3175 +__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3177 + return (ip && hash_id(set, ip, hash_ip) != UINT_MAX);
3181 +testip(struct ip_set *set, const void *data, size_t size,
3182 + ip_set_ip_t *hash_ip)
3184 + struct ip_set_req_iphash *req =
3185 + (struct ip_set_req_iphash *) data;
3187 + if (size != sizeof(struct ip_set_req_iphash)) {
3188 + ip_set_printk("data length wrong (want %zu, have %zu)",
3189 + sizeof(struct ip_set_req_iphash),
3193 + return __testip(set, req->ip, hash_ip);
3197 +testip_kernel(struct ip_set *set,
3198 + const struct sk_buff *skb,
3199 + ip_set_ip_t *hash_ip,
3200 + const u_int32_t *flags,
3201 + unsigned char index)
3203 + return __testip(set,
3204 + ntohl(flags[index] & IPSET_SRC
3205 + ? skb->nh.iph->saddr
3206 + : skb->nh.iph->daddr),
3211 +__addip(struct ip_set_iphash *map, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3215 + ip_set_ip_t *elem;
3217 + if (!ip || map->elements > limit)
3220 + *hash_ip = ip & map->netmask;
3222 + for (i = 0; i < map->probes; i++) {
3223 + probe = jhash_ip(map, i, *hash_ip) % map->hashsize;
3224 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
3225 + if (*elem == *hash_ip)
3233 + /* Trigger rehashing */
3238 +addip(struct ip_set *set, const void *data, size_t size,
3239 + ip_set_ip_t *hash_ip)
3241 + struct ip_set_req_iphash *req =
3242 + (struct ip_set_req_iphash *) data;
3244 + if (size != sizeof(struct ip_set_req_iphash)) {
3245 + ip_set_printk("data length wrong (want %zu, have %zu)",
3246 + sizeof(struct ip_set_req_iphash),
3250 + return __addip((struct ip_set_iphash *) set->data, req->ip, hash_ip);
3254 +addip_kernel(struct ip_set *set,
3255 + const struct sk_buff *skb,
3256 + ip_set_ip_t *hash_ip,
3257 + const u_int32_t *flags,
3258 + unsigned char index)
3260 + return __addip((struct ip_set_iphash *) set->data,
3261 + ntohl(flags[index] & IPSET_SRC
3262 + ? skb->nh.iph->saddr
3263 + : skb->nh.iph->daddr),
3267 +static int retry(struct ip_set *set)
3269 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3270 + ip_set_ip_t hash_ip, *elem;
3272 + u_int32_t i, hashsize = map->hashsize;
3274 + struct ip_set_iphash *tmp;
3276 + if (map->resize == 0)
3282 + /* Calculate new hash size */
3283 + hashsize += (hashsize * map->resize)/100;
3284 + if (hashsize == map->hashsize)
3287 + ip_set_printk("rehashing of set %s triggered: "
3288 + "hashsize grows from %u to %u",
3289 + set->name, map->hashsize, hashsize);
3291 + tmp = kmalloc(sizeof(struct ip_set_iphash)
3292 + + map->probes * sizeof(uint32_t), GFP_ATOMIC);
3294 + DP("out of memory for %d bytes",
3295 + sizeof(struct ip_set_iphash)
3296 + + map->probes * sizeof(uint32_t));
3299 + tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
3300 + if (!tmp->members) {
3301 + DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
3305 + tmp->hashsize = hashsize;
3306 + tmp->elements = 0;
3307 + tmp->probes = map->probes;
3308 + tmp->resize = map->resize;
3309 + tmp->netmask = map->netmask;
3310 + memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
3312 + write_lock_bh(&set->lock);
3313 + map = (struct ip_set_iphash *) set->data; /* Play safe */
3314 + for (i = 0; i < map->hashsize && res == 0; i++) {
3315 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
3317 + res = __addip(tmp, *elem, &hash_ip);
3320 + /* Failure, try again */
3321 + write_unlock_bh(&set->lock);
3322 + harray_free(tmp->members);
3327 + /* Success at resizing! */
3328 + members = map->members;
3330 + map->hashsize = tmp->hashsize;
3331 + map->members = tmp->members;
3332 + write_unlock_bh(&set->lock);
3334 + harray_free(members);
3341 +__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3343 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3344 + ip_set_ip_t id, *elem;
3349 + id = hash_id(set, ip, hash_ip);
3350 + if (id == UINT_MAX)
3353 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
3361 +delip(struct ip_set *set, const void *data, size_t size,
3362 + ip_set_ip_t *hash_ip)
3364 + struct ip_set_req_iphash *req =
3365 + (struct ip_set_req_iphash *) data;
3367 + if (size != sizeof(struct ip_set_req_iphash)) {
3368 + ip_set_printk("data length wrong (want %zu, have %zu)",
3369 + sizeof(struct ip_set_req_iphash),
3373 + return __delip(set, req->ip, hash_ip);
3377 +delip_kernel(struct ip_set *set,
3378 + const struct sk_buff *skb,
3379 + ip_set_ip_t *hash_ip,
3380 + const u_int32_t *flags,
3381 + unsigned char index)
3383 + return __delip(set,
3384 + ntohl(flags[index] & IPSET_SRC
3385 + ? skb->nh.iph->saddr
3386 + : skb->nh.iph->daddr),
3390 +static int create(struct ip_set *set, const void *data, size_t size)
3392 + struct ip_set_req_iphash_create *req =
3393 + (struct ip_set_req_iphash_create *) data;
3394 + struct ip_set_iphash *map;
3397 + if (size != sizeof(struct ip_set_req_iphash_create)) {
3398 + ip_set_printk("data length wrong (want %zu, have %zu)",
3399 + sizeof(struct ip_set_req_iphash_create),
3404 + if (req->hashsize < 1) {
3405 + ip_set_printk("hashsize too small");
3409 + if (req->probes < 1) {
3410 + ip_set_printk("probes too small");
3414 + map = kmalloc(sizeof(struct ip_set_iphash)
3415 + + req->probes * sizeof(uint32_t), GFP_KERNEL);
3417 + DP("out of memory for %d bytes",
3418 + sizeof(struct ip_set_iphash)
3419 + + req->probes * sizeof(uint32_t));
3422 + for (i = 0; i < req->probes; i++)
3423 + get_random_bytes(((uint32_t *) map->initval)+i, 4);
3424 + map->elements = 0;
3425 + map->hashsize = req->hashsize;
3426 + map->probes = req->probes;
3427 + map->resize = req->resize;
3428 + map->netmask = req->netmask;
3429 + map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
3430 + if (!map->members) {
3431 + DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
3440 +static void destroy(struct ip_set *set)
3442 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3444 + harray_free(map->members);
3450 +static void flush(struct ip_set *set)
3452 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3453 + harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
3454 + map->elements = 0;
3457 +static void list_header(const struct ip_set *set, void *data)
3459 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3460 + struct ip_set_req_iphash_create *header =
3461 + (struct ip_set_req_iphash_create *) data;
3463 + header->hashsize = map->hashsize;
3464 + header->probes = map->probes;
3465 + header->resize = map->resize;
3466 + header->netmask = map->netmask;
3469 +static int list_members_size(const struct ip_set *set)
3471 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3473 + return (map->hashsize * sizeof(ip_set_ip_t));
3476 +static void list_members(const struct ip_set *set, void *data)
3478 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3479 + ip_set_ip_t i, *elem;
3481 + for (i = 0; i < map->hashsize; i++) {
3482 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
3483 + ((ip_set_ip_t *)data)[i] = *elem;
3487 +static struct ip_set_type ip_set_iphash = {
3488 + .typename = SETTYPE_NAME,
3489 + .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
3490 + .protocol_version = IP_SET_PROTOCOL_VERSION,
3491 + .create = &create,
3492 + .destroy = &destroy,
3494 + .reqsize = sizeof(struct ip_set_req_iphash),
3496 + .addip_kernel = &addip_kernel,
3499 + .delip_kernel = &delip_kernel,
3500 + .testip = &testip,
3501 + .testip_kernel = &testip_kernel,
3502 + .header_size = sizeof(struct ip_set_req_iphash_create),
3503 + .list_header = &list_header,
3504 + .list_members_size = &list_members_size,
3505 + .list_members = &list_members,
3506 + .me = THIS_MODULE,
3509 +MODULE_LICENSE("GPL");
3510 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
3511 +MODULE_DESCRIPTION("iphash type of IP sets");
3512 +module_param(limit, int, 0600);
3513 +MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
3515 +static int __init init(void)
3517 + return ip_set_register_set_type(&ip_set_iphash);
3520 +static void __exit fini(void)
3522 + /* FIXME: possible race with ip_set_create() */
3523 + ip_set_unregister_set_type(&ip_set_iphash);
3528 diff -ruN linux-2.6.21.1/net/ipv4/netfilter/ip_set_ipmap.c linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_ipmap.c
3529 --- linux-2.6.21.1/net/ipv4/netfilter/ip_set_ipmap.c 1969-12-31 18:00:00.000000000 -0600
3530 +++ linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_ipmap.c 2007-06-08 16:29:31.833808500 -0500
3532 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
3533 + * Patrick Schaaf <bof@bof.de>
3534 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
3536 + * This program is free software; you can redistribute it and/or modify
3537 + * it under the terms of the GNU General Public License version 2 as
3538 + * published by the Free Software Foundation.
3541 +/* Kernel module implementing an IP set type: the single bitmap type */
3543 +#include <linux/module.h>
3544 +#include <linux/ip.h>
3545 +#include <linux/skbuff.h>
3546 +#include <linux/netfilter_ipv4/ip_tables.h>
3547 +#include <linux/netfilter_ipv4/ip_set.h>
3548 +#include <linux/errno.h>
3549 +#include <asm/uaccess.h>
3550 +#include <asm/bitops.h>
3551 +#include <linux/spinlock.h>
3553 +#include <linux/netfilter_ipv4/ip_set_ipmap.h>
3555 +static inline ip_set_ip_t
3556 +ip_to_id(const struct ip_set_ipmap *map, ip_set_ip_t ip)
3558 + return (ip - map->first_ip)/map->hosts;
3562 +__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3564 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3566 + if (ip < map->first_ip || ip > map->last_ip)
3569 + *hash_ip = ip & map->netmask;
3570 + DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
3571 + set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
3572 + return !!test_bit(ip_to_id(map, *hash_ip), map->members);
3576 +testip(struct ip_set *set, const void *data, size_t size,
3577 + ip_set_ip_t *hash_ip)
3579 + struct ip_set_req_ipmap *req =
3580 + (struct ip_set_req_ipmap *) data;
3582 + if (size != sizeof(struct ip_set_req_ipmap)) {
3583 + ip_set_printk("data length wrong (want %zu, have %zu)",
3584 + sizeof(struct ip_set_req_ipmap),
3588 + return __testip(set, req->ip, hash_ip);
3592 +testip_kernel(struct ip_set *set,
3593 + const struct sk_buff *skb,
3594 + ip_set_ip_t *hash_ip,
3595 + const u_int32_t *flags,
3596 + unsigned char index)
3600 + DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
3601 + flags[index] & IPSET_SRC ? "SRC" : "DST",
3602 + NIPQUAD(skb->nh.iph->saddr),
3603 + NIPQUAD(skb->nh.iph->daddr));
3605 + res = __testip(set,
3606 + ntohl(flags[index] & IPSET_SRC
3607 + ? skb->nh.iph->saddr
3608 + : skb->nh.iph->daddr),
3610 + return (res < 0 ? 0 : res);
3614 +__addip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3616 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3618 + if (ip < map->first_ip || ip > map->last_ip)
3621 + *hash_ip = ip & map->netmask;
3622 + DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
3623 + if (test_and_set_bit(ip_to_id(map, *hash_ip), map->members))
3630 +addip(struct ip_set *set, const void *data, size_t size,
3631 + ip_set_ip_t *hash_ip)
3633 + struct ip_set_req_ipmap *req =
3634 + (struct ip_set_req_ipmap *) data;
3636 + if (size != sizeof(struct ip_set_req_ipmap)) {
3637 + ip_set_printk("data length wrong (want %zu, have %zu)",
3638 + sizeof(struct ip_set_req_ipmap),
3642 + DP("%u.%u.%u.%u", HIPQUAD(req->ip));
3643 + return __addip(set, req->ip, hash_ip);
3647 +addip_kernel(struct ip_set *set,
3648 + const struct sk_buff *skb,
3649 + ip_set_ip_t *hash_ip,
3650 + const u_int32_t *flags,
3651 + unsigned char index)
3653 + return __addip(set,
3654 + ntohl(flags[index] & IPSET_SRC
3655 + ? skb->nh.iph->saddr
3656 + : skb->nh.iph->daddr),
3661 +__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3663 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3665 + if (ip < map->first_ip || ip > map->last_ip)
3668 + *hash_ip = ip & map->netmask;
3669 + DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
3670 + if (!test_and_clear_bit(ip_to_id(map, *hash_ip), map->members))
3677 +delip(struct ip_set *set, const void *data, size_t size,
3678 + ip_set_ip_t *hash_ip)
3680 + struct ip_set_req_ipmap *req =
3681 + (struct ip_set_req_ipmap *) data;
3683 + if (size != sizeof(struct ip_set_req_ipmap)) {
3684 + ip_set_printk("data length wrong (want %zu, have %zu)",
3685 + sizeof(struct ip_set_req_ipmap),
3689 + return __delip(set, req->ip, hash_ip);
3693 +delip_kernel(struct ip_set *set,
3694 + const struct sk_buff *skb,
3695 + ip_set_ip_t *hash_ip,
3696 + const u_int32_t *flags,
3697 + unsigned char index)
3699 + return __delip(set,
3700 + ntohl(flags[index] & IPSET_SRC
3701 + ? skb->nh.iph->saddr
3702 + : skb->nh.iph->daddr),
3706 +static int create(struct ip_set *set, const void *data, size_t size)
3709 + struct ip_set_req_ipmap_create *req =
3710 + (struct ip_set_req_ipmap_create *) data;
3711 + struct ip_set_ipmap *map;
3713 + if (size != sizeof(struct ip_set_req_ipmap_create)) {
3714 + ip_set_printk("data length wrong (want %zu, have %zu)",
3715 + sizeof(struct ip_set_req_ipmap_create),
3720 + DP("from %u.%u.%u.%u to %u.%u.%u.%u",
3721 + HIPQUAD(req->from), HIPQUAD(req->to));
3723 + if (req->from > req->to) {
3724 + DP("bad ip range");
3728 + map = kmalloc(sizeof(struct ip_set_ipmap), GFP_KERNEL);
3730 + DP("out of memory for %d bytes",
3731 + sizeof(struct ip_set_ipmap));
3734 + map->first_ip = req->from;
3735 + map->last_ip = req->to;
3736 + map->netmask = req->netmask;
3738 + if (req->netmask == 0xFFFFFFFF) {
3740 + map->sizeid = map->last_ip - map->first_ip + 1;
3742 + unsigned int mask_bits, netmask_bits;
3745 + map->first_ip &= map->netmask; /* Should we better bark? */
3747 + mask = range_to_mask(map->first_ip, map->last_ip, &mask_bits);
3748 + netmask_bits = mask_to_bits(map->netmask);
3750 + if ((!mask && (map->first_ip || map->last_ip != 0xFFFFFFFF))
3751 + || netmask_bits <= mask_bits)
3754 + DP("mask_bits %u, netmask_bits %u",
3755 + mask_bits, netmask_bits);
3756 + map->hosts = 2 << (32 - netmask_bits - 1);
3757 + map->sizeid = 2 << (netmask_bits - mask_bits - 1);
3759 + if (map->sizeid > MAX_RANGE + 1) {
3760 + ip_set_printk("range too big (max %d addresses)",
3765 + DP("hosts %u, sizeid %u", map->hosts, map->sizeid);
3766 + newbytes = bitmap_bytes(0, map->sizeid - 1);
3767 + map->members = kmalloc(newbytes, GFP_KERNEL);
3768 + if (!map->members) {
3769 + DP("out of memory for %d bytes", newbytes);
3773 + memset(map->members, 0, newbytes);
3779 +static void destroy(struct ip_set *set)
3781 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3783 + kfree(map->members);
3789 +static void flush(struct ip_set *set)
3791 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3792 + memset(map->members, 0, bitmap_bytes(0, map->sizeid - 1));
3795 +static void list_header(const struct ip_set *set, void *data)
3797 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3798 + struct ip_set_req_ipmap_create *header =
3799 + (struct ip_set_req_ipmap_create *) data;
3801 + header->from = map->first_ip;
3802 + header->to = map->last_ip;
3803 + header->netmask = map->netmask;
3806 +static int list_members_size(const struct ip_set *set)
3808 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3810 + return bitmap_bytes(0, map->sizeid - 1);
3813 +static void list_members(const struct ip_set *set, void *data)
3815 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3816 + int bytes = bitmap_bytes(0, map->sizeid - 1);
3818 + memcpy(data, map->members, bytes);
3821 +static struct ip_set_type ip_set_ipmap = {
3822 + .typename = SETTYPE_NAME,
3823 + .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
3824 + .protocol_version = IP_SET_PROTOCOL_VERSION,
3825 + .create = &create,
3826 + .destroy = &destroy,
3828 + .reqsize = sizeof(struct ip_set_req_ipmap),
3830 + .addip_kernel = &addip_kernel,
3832 + .delip_kernel = &delip_kernel,
3833 + .testip = &testip,
3834 + .testip_kernel = &testip_kernel,
3835 + .header_size = sizeof(struct ip_set_req_ipmap_create),
3836 + .list_header = &list_header,
3837 + .list_members_size = &list_members_size,
3838 + .list_members = &list_members,
3839 + .me = THIS_MODULE,
3842 +MODULE_LICENSE("GPL");
3843 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
3844 +MODULE_DESCRIPTION("ipmap type of IP sets");
3846 +static int __init init(void)
3848 + return ip_set_register_set_type(&ip_set_ipmap);
3851 +static void __exit fini(void)
3853 + /* FIXME: possible race with ip_set_create() */
3854 + ip_set_unregister_set_type(&ip_set_ipmap);
3859 diff -ruN linux-2.6.21.1/net/ipv4/netfilter/ip_set_ipporthash.c linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_ipporthash.c
3860 --- linux-2.6.21.1/net/ipv4/netfilter/ip_set_ipporthash.c 1969-12-31 18:00:00.000000000 -0600
3861 +++ linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_ipporthash.c 2007-06-08 16:29:31.833808500 -0500
3863 +/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
3865 + * This program is free software; you can redistribute it and/or modify
3866 + * it under the terms of the GNU General Public License version 2 as
3867 + * published by the Free Software Foundation.
3870 +/* Kernel module implementing an ip+port hash set */
3872 +#include <linux/module.h>
3873 +#include <linux/ip.h>
3874 +#include <linux/tcp.h>
3875 +#include <linux/udp.h>
3876 +#include <linux/skbuff.h>
3877 +#include <linux/netfilter_ipv4/ip_tables.h>
3878 +#include <linux/netfilter_ipv4/ip_set.h>
3879 +#include <linux/errno.h>
3880 +#include <asm/uaccess.h>
3881 +#include <asm/bitops.h>
3882 +#include <linux/spinlock.h>
3883 +#include <linux/vmalloc.h>
3884 +#include <linux/random.h>
3886 +#include <net/ip.h>
3888 +#include <linux/netfilter_ipv4/ip_set_malloc.h>
3889 +#include <linux/netfilter_ipv4/ip_set_ipporthash.h>
3890 +#include <linux/netfilter_ipv4/ip_set_jhash.h>
3892 +static int limit = MAX_RANGE;
3894 +/* We must handle non-linear skbs */
3895 +static inline ip_set_ip_t
3896 +get_port(const struct sk_buff *skb, u_int32_t flags)
3898 + struct iphdr *iph = skb->nh.iph;
3899 + u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
3901 + switch (iph->protocol) {
3902 + case IPPROTO_TCP: {
3903 + struct tcphdr tcph;
3905 + /* See comments at tcp_match in ip_tables.c */
3907 + return INVALID_PORT;
3909 + if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &tcph, sizeof(tcph)) < 0)
3910 + /* No choice either */
3911 + return INVALID_PORT;
3913 + return ntohs(flags & IPSET_SRC ?
3914 + tcph.source : tcph.dest);
3916 + case IPPROTO_UDP: {
3917 + struct udphdr udph;
3920 + return INVALID_PORT;
3922 + if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &udph, sizeof(udph)) < 0)
3923 + /* No choice either */
3924 + return INVALID_PORT;
3926 + return ntohs(flags & IPSET_SRC ?
3927 + udph.source : udph.dest);
3930 + return INVALID_PORT;
3934 +static inline __u32
3935 +jhash_ip(const struct ip_set_ipporthash *map, uint16_t i, ip_set_ip_t ip)
3937 + return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
3940 +#define HASH_IP(map, ip, port) (port + ((ip - ((map)->first_ip)) << 16))
3942 +static inline __u32
3943 +hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
3944 + ip_set_ip_t *hash_ip)
3946 + struct ip_set_ipporthash *map =
3947 + (struct ip_set_ipporthash *) set->data;
3950 + ip_set_ip_t *elem;
3952 + *hash_ip = HASH_IP(map, ip, port);
3953 + DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
3954 + set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
3956 + for (i = 0; i < map->probes; i++) {
3957 + id = jhash_ip(map, i, *hash_ip) % map->hashsize;
3958 + DP("hash key: %u", id);
3959 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
3960 + if (*elem == *hash_ip)
3962 + /* No shortcut at testing - there can be deleted
3969 +__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
3970 + ip_set_ip_t *hash_ip)
3972 + struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
3974 + if (ip < map->first_ip || ip > map->last_ip)
3977 + return (hash_id(set, ip, port, hash_ip) != UINT_MAX);
3981 +testip(struct ip_set *set, const void *data, size_t size,
3982 + ip_set_ip_t *hash_ip)
3984 + struct ip_set_req_ipporthash *req =
3985 + (struct ip_set_req_ipporthash *) data;
3987 + if (size != sizeof(struct ip_set_req_ipporthash)) {
3988 + ip_set_printk("data length wrong (want %zu, have %zu)",
3989 + sizeof(struct ip_set_req_ipporthash),
3993 + return __testip(set, req->ip, req->port, hash_ip);
3997 +testip_kernel(struct ip_set *set,
3998 + const struct sk_buff *skb,
3999 + ip_set_ip_t *hash_ip,
4000 + const u_int32_t *flags,
4001 + unsigned char index)
4005 + if (flags[index+1] == 0)
4008 + port = get_port(skb, flags[index+1]);
4010 + DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
4011 + flags[index] & IPSET_SRC ? "SRC" : "DST",
4012 + NIPQUAD(skb->nh.iph->saddr),
4013 + NIPQUAD(skb->nh.iph->daddr));
4014 + DP("flag %s port %u",
4015 + flags[index+1] & IPSET_SRC ? "SRC" : "DST",
4017 + if (port == INVALID_PORT)
4020 + return __testip(set,
4021 + ntohl(flags[index] & IPSET_SRC
4022 + ? skb->nh.iph->saddr
4023 + : skb->nh.iph->daddr),
4029 +__add_haship(struct ip_set_ipporthash *map, ip_set_ip_t hash_ip)
4033 + ip_set_ip_t *elem;
4035 + for (i = 0; i < map->probes; i++) {
4036 + probe = jhash_ip(map, i, hash_ip) % map->hashsize;
4037 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
4038 + if (*elem == hash_ip)
4046 + /* Trigger rehashing */
4051 +__addip(struct ip_set_ipporthash *map, ip_set_ip_t ip, ip_set_ip_t port,
4052 + ip_set_ip_t *hash_ip)
4054 + if (map->elements > limit)
4056 + if (ip < map->first_ip || ip > map->last_ip)
4059 + *hash_ip = HASH_IP(map, ip, port);
4061 + return __add_haship(map, *hash_ip);
4065 +addip(struct ip_set *set, const void *data, size_t size,
4066 + ip_set_ip_t *hash_ip)
4068 + struct ip_set_req_ipporthash *req =
4069 + (struct ip_set_req_ipporthash *) data;
4071 + if (size != sizeof(struct ip_set_req_ipporthash)) {
4072 + ip_set_printk("data length wrong (want %zu, have %zu)",
4073 + sizeof(struct ip_set_req_ipporthash),
4077 + return __addip((struct ip_set_ipporthash *) set->data,
4078 + req->ip, req->port, hash_ip);
4082 +addip_kernel(struct ip_set *set,
4083 + const struct sk_buff *skb,
4084 + ip_set_ip_t *hash_ip,
4085 + const u_int32_t *flags,
4086 + unsigned char index)
4090 + if (flags[index+1] == 0)
4093 + port = get_port(skb, flags[index+1]);
4095 + DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
4096 + flags[index] & IPSET_SRC ? "SRC" : "DST",
4097 + NIPQUAD(skb->nh.iph->saddr),
4098 + NIPQUAD(skb->nh.iph->daddr));
4099 + DP("flag %s port %u",
4100 + flags[index+1] & IPSET_SRC ? "SRC" : "DST",
4102 + if (port == INVALID_PORT)
4105 + return __addip((struct ip_set_ipporthash *) set->data,
4106 + ntohl(flags[index] & IPSET_SRC
4107 + ? skb->nh.iph->saddr
4108 + : skb->nh.iph->daddr),
4113 +static int retry(struct ip_set *set)
4115 + struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4116 + ip_set_ip_t *elem;
4118 + u_int32_t i, hashsize = map->hashsize;
4120 + struct ip_set_ipporthash *tmp;
4122 + if (map->resize == 0)
4128 + /* Calculate new hash size */
4129 + hashsize += (hashsize * map->resize)/100;
4130 + if (hashsize == map->hashsize)
4133 + ip_set_printk("rehashing of set %s triggered: "
4134 + "hashsize grows from %u to %u",
4135 + set->name, map->hashsize, hashsize);
4137 + tmp = kmalloc(sizeof(struct ip_set_ipporthash)
4138 + + map->probes * sizeof(uint32_t), GFP_ATOMIC);
4140 + DP("out of memory for %d bytes",
4141 + sizeof(struct ip_set_ipporthash)
4142 + + map->probes * sizeof(uint32_t));
4145 + tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
4146 + if (!tmp->members) {
4147 + DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
4151 + tmp->hashsize = hashsize;
4152 + tmp->elements = 0;
4153 + tmp->probes = map->probes;
4154 + tmp->resize = map->resize;
4155 + tmp->first_ip = map->first_ip;
4156 + tmp->last_ip = map->last_ip;
4157 + memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
4159 + write_lock_bh(&set->lock);
4160 + map = (struct ip_set_ipporthash *) set->data; /* Play safe */
4161 + for (i = 0; i < map->hashsize && res == 0; i++) {
4162 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
4164 + res = __add_haship(tmp, *elem);
4167 + /* Failure, try again */
4168 + write_unlock_bh(&set->lock);
4169 + harray_free(tmp->members);
4174 + /* Success at resizing! */
4175 + members = map->members;
4177 + map->hashsize = tmp->hashsize;
4178 + map->members = tmp->members;
4179 + write_unlock_bh(&set->lock);
4181 + harray_free(members);
4188 +__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
4189 + ip_set_ip_t *hash_ip)
4191 + struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4193 + ip_set_ip_t *elem;
4195 + if (ip < map->first_ip || ip > map->last_ip)
4198 + id = hash_id(set, ip, port, hash_ip);
4200 + if (id == UINT_MAX)
4203 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
4211 +delip(struct ip_set *set, const void *data, size_t size,
4212 + ip_set_ip_t *hash_ip)
4214 + struct ip_set_req_ipporthash *req =
4215 + (struct ip_set_req_ipporthash *) data;
4217 + if (size != sizeof(struct ip_set_req_ipporthash)) {
4218 + ip_set_printk("data length wrong (want %zu, have %zu)",
4219 + sizeof(struct ip_set_req_ipporthash),
4223 + return __delip(set, req->ip, req->port, hash_ip);
4227 +delip_kernel(struct ip_set *set,
4228 + const struct sk_buff *skb,
4229 + ip_set_ip_t *hash_ip,
4230 + const u_int32_t *flags,
4231 + unsigned char index)
4235 + if (flags[index+1] == 0)
4238 + port = get_port(skb, flags[index+1]);
4240 + DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
4241 + flags[index] & IPSET_SRC ? "SRC" : "DST",
4242 + NIPQUAD(skb->nh.iph->saddr),
4243 + NIPQUAD(skb->nh.iph->daddr));
4244 + DP("flag %s port %u",
4245 + flags[index+1] & IPSET_SRC ? "SRC" : "DST",
4247 + if (port == INVALID_PORT)
4250 + return __delip(set,
4251 + ntohl(flags[index] & IPSET_SRC
4252 + ? skb->nh.iph->saddr
4253 + : skb->nh.iph->daddr),
4258 +static int create(struct ip_set *set, const void *data, size_t size)
4260 + struct ip_set_req_ipporthash_create *req =
4261 + (struct ip_set_req_ipporthash_create *) data;
4262 + struct ip_set_ipporthash *map;
4265 + if (size != sizeof(struct ip_set_req_ipporthash_create)) {
4266 + ip_set_printk("data length wrong (want %zu, have %zu)",
4267 + sizeof(struct ip_set_req_ipporthash_create),
4272 + if (req->hashsize < 1) {
4273 + ip_set_printk("hashsize too small");
4277 + if (req->probes < 1) {
4278 + ip_set_printk("probes too small");
4282 + map = kmalloc(sizeof(struct ip_set_ipporthash)
4283 + + req->probes * sizeof(uint32_t), GFP_KERNEL);
4285 + DP("out of memory for %d bytes",
4286 + sizeof(struct ip_set_ipporthash)
4287 + + req->probes * sizeof(uint32_t));
4290 + for (i = 0; i < req->probes; i++)
4291 + get_random_bytes(((uint32_t *) map->initval)+i, 4);
4292 + map->elements = 0;
4293 + map->hashsize = req->hashsize;
4294 + map->probes = req->probes;
4295 + map->resize = req->resize;
4296 + map->first_ip = req->from;
4297 + map->last_ip = req->to;
4298 + map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
4299 + if (!map->members) {
4300 + DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
4309 +static void destroy(struct ip_set *set)
4311 + struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4313 + harray_free(map->members);
4319 +static void flush(struct ip_set *set)
4321 + struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4322 + harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
4323 + map->elements = 0;
4326 +static void list_header(const struct ip_set *set, void *data)
4328 + struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4329 + struct ip_set_req_ipporthash_create *header =
4330 + (struct ip_set_req_ipporthash_create *) data;
4332 + header->hashsize = map->hashsize;
4333 + header->probes = map->probes;
4334 + header->resize = map->resize;
4335 + header->from = map->first_ip;
4336 + header->to = map->last_ip;
4339 +static int list_members_size(const struct ip_set *set)
4341 + struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4343 + return (map->hashsize * sizeof(ip_set_ip_t));
4346 +static void list_members(const struct ip_set *set, void *data)
4348 + struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4349 + ip_set_ip_t i, *elem;
4351 + for (i = 0; i < map->hashsize; i++) {
4352 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
4353 + ((ip_set_ip_t *)data)[i] = *elem;
4357 +static struct ip_set_type ip_set_ipporthash = {
4358 + .typename = SETTYPE_NAME,
4359 + .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_DATA_DOUBLE,
4360 + .protocol_version = IP_SET_PROTOCOL_VERSION,
4361 + .create = &create,
4362 + .destroy = &destroy,
4364 + .reqsize = sizeof(struct ip_set_req_ipporthash),
4366 + .addip_kernel = &addip_kernel,
4369 + .delip_kernel = &delip_kernel,
4370 + .testip = &testip,
4371 + .testip_kernel = &testip_kernel,
4372 + .header_size = sizeof(struct ip_set_req_ipporthash_create),
4373 + .list_header = &list_header,
4374 + .list_members_size = &list_members_size,
4375 + .list_members = &list_members,
4376 + .me = THIS_MODULE,
4379 +MODULE_LICENSE("GPL");
4380 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
4381 +MODULE_DESCRIPTION("ipporthash type of IP sets");
4382 +module_param(limit, int, 0600);
4383 +MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
4385 +static int __init init(void)
4387 + return ip_set_register_set_type(&ip_set_ipporthash);
4390 +static void __exit fini(void)
4392 + /* FIXME: possible race with ip_set_create() */
4393 + ip_set_unregister_set_type(&ip_set_ipporthash);
4398 diff -ruN linux-2.6.21.1/net/ipv4/netfilter/ip_set_iptree.c linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_iptree.c
4399 --- linux-2.6.21.1/net/ipv4/netfilter/ip_set_iptree.c 1969-12-31 18:00:00.000000000 -0600
4400 +++ linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_iptree.c 2007-06-08 16:29:31.833808500 -0500
4402 +/* Copyright (C) 2005 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
4404 + * This program is free software; you can redistribute it and/or modify
4405 + * it under the terms of the GNU General Public License version 2 as
4406 + * published by the Free Software Foundation.
4409 +/* Kernel module implementing an IP set type: the iptree type */
4411 +#include <linux/version.h>
4412 +#include <linux/module.h>
4413 +#include <linux/ip.h>
4414 +#include <linux/skbuff.h>
4415 +#include <linux/slab.h>
4416 +#include <linux/delay.h>
4417 +#include <linux/netfilter_ipv4/ip_tables.h>
4418 +#include <linux/netfilter_ipv4/ip_set.h>
4419 +#include <linux/errno.h>
4420 +#include <asm/uaccess.h>
4421 +#include <asm/bitops.h>
4422 +#include <linux/spinlock.h>
4424 +/* Backward compatibility */
4429 +#include <linux/netfilter_ipv4/ip_set_iptree.h>
4431 +static int limit = MAX_RANGE;
4433 +/* Garbage collection interval in seconds: */
4434 +#define IPTREE_GC_TIME 5*60
4435 +/* Sleep so many milliseconds before trying again
4436 + * to delete the gc timer at destroying/flushing a set */
4437 +#define IPTREE_DESTROY_SLEEP 100
4439 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
4440 +static struct kmem_cache *branch_cachep;
4441 +static struct kmem_cache *leaf_cachep;
4443 +static kmem_cache_t *branch_cachep;
4444 +static kmem_cache_t *leaf_cachep;
4447 +#define ABCD(a,b,c,d,addrp) do { \
4448 + a = ((unsigned char *)addrp)[3]; \
4449 + b = ((unsigned char *)addrp)[2]; \
4450 + c = ((unsigned char *)addrp)[1]; \
4451 + d = ((unsigned char *)addrp)[0]; \
4454 +#define TESTIP_WALK(map, elem, branch) do { \
4455 + if ((map)->tree[elem]) { \
4456 + branch = (map)->tree[elem]; \
4462 +__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
4464 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4465 + struct ip_set_iptreeb *btree;
4466 + struct ip_set_iptreec *ctree;
4467 + struct ip_set_iptreed *dtree;
4468 + unsigned char a,b,c,d;
4474 + ABCD(a, b, c, d, hash_ip);
4475 + DP("%u %u %u %u timeout %u", a, b, c, d, map->timeout);
4476 + TESTIP_WALK(map, a, btree);
4477 + TESTIP_WALK(btree, b, ctree);
4478 + TESTIP_WALK(ctree, c, dtree);
4479 + DP("%lu %lu", dtree->expires[d], jiffies);
4480 + return !!(map->timeout ? (time_after(dtree->expires[d], jiffies))
4481 + : dtree->expires[d]);
4485 +testip(struct ip_set *set, const void *data, size_t size,
4486 + ip_set_ip_t *hash_ip)
4488 + struct ip_set_req_iptree *req =
4489 + (struct ip_set_req_iptree *) data;
4491 + if (size != sizeof(struct ip_set_req_iptree)) {
4492 + ip_set_printk("data length wrong (want %zu, have %zu)",
4493 + sizeof(struct ip_set_req_iptree),
4497 + return __testip(set, req->ip, hash_ip);
4501 +testip_kernel(struct ip_set *set,
4502 + const struct sk_buff *skb,
4503 + ip_set_ip_t *hash_ip,
4504 + const u_int32_t *flags,
4505 + unsigned char index)
4509 + DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
4510 + flags[index] & IPSET_SRC ? "SRC" : "DST",
4511 + NIPQUAD(skb->nh.iph->saddr),
4512 + NIPQUAD(skb->nh.iph->daddr));
4514 + res = __testip(set,
4515 + ntohl(flags[index] & IPSET_SRC
4516 + ? skb->nh.iph->saddr
4517 + : skb->nh.iph->daddr),
4519 + return (res < 0 ? 0 : res);
4522 +#define ADDIP_WALK(map, elem, branch, type, cachep, flags) do { \
4523 + if ((map)->tree[elem]) { \
4524 + DP("found %u", elem); \
4525 + branch = (map)->tree[elem]; \
4527 + branch = (type *) \
4528 + kmem_cache_alloc(cachep, flags); \
4529 + if (branch == NULL) \
4531 + memset(branch, 0, sizeof(*branch)); \
4532 + (map)->tree[elem] = branch; \
4533 + DP("alloc %u", elem); \
4538 +__addip(struct ip_set *set, ip_set_ip_t ip, unsigned int timeout,
4539 + ip_set_ip_t *hash_ip,
4540 + unsigned int __nocast flags)
4542 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4543 + struct ip_set_iptreeb *btree;
4544 + struct ip_set_iptreec *ctree;
4545 + struct ip_set_iptreed *dtree;
4546 + unsigned char a,b,c,d;
4549 + if (!ip || map->elements > limit)
4550 + /* We could call the garbage collector
4551 + * but it's probably overkill */
4555 + ABCD(a, b, c, d, hash_ip);
4556 + DP("%u %u %u %u timeout %u", a, b, c, d, timeout);
4557 + ADDIP_WALK(map, a, btree, struct ip_set_iptreeb, branch_cachep, flags);
4558 + ADDIP_WALK(btree, b, ctree, struct ip_set_iptreec, branch_cachep, flags);
4559 + ADDIP_WALK(ctree, c, dtree, struct ip_set_iptreed, leaf_cachep, flags);
4560 + if (dtree->expires[d]
4561 + && (!map->timeout || time_after(dtree->expires[d], jiffies)))
4563 + dtree->expires[d] = map->timeout ? (timeout * HZ + jiffies) : 1;
4565 + if (dtree->expires[d] == 0)
4566 + dtree->expires[d] = 1;
4567 + DP("%u %lu", d, dtree->expires[d]);
4574 +addip(struct ip_set *set, const void *data, size_t size,
4575 + ip_set_ip_t *hash_ip)
4577 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4578 + struct ip_set_req_iptree *req =
4579 + (struct ip_set_req_iptree *) data;
4581 + if (size != sizeof(struct ip_set_req_iptree)) {
4582 + ip_set_printk("data length wrong (want %zu, have %zu)",
4583 + sizeof(struct ip_set_req_iptree),
4587 + DP("%u.%u.%u.%u %u", HIPQUAD(req->ip), req->timeout);
4588 + return __addip(set, req->ip,
4589 + req->timeout ? req->timeout : map->timeout,
4595 +addip_kernel(struct ip_set *set,
4596 + const struct sk_buff *skb,
4597 + ip_set_ip_t *hash_ip,
4598 + const u_int32_t *flags,
4599 + unsigned char index)
4601 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4603 + return __addip(set,
4604 + ntohl(flags[index] & IPSET_SRC
4605 + ? skb->nh.iph->saddr
4606 + : skb->nh.iph->daddr),
4612 +#define DELIP_WALK(map, elem, branch) do { \
4613 + if ((map)->tree[elem]) { \
4614 + branch = (map)->tree[elem]; \
4620 +__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
4622 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4623 + struct ip_set_iptreeb *btree;
4624 + struct ip_set_iptreec *ctree;
4625 + struct ip_set_iptreed *dtree;
4626 + unsigned char a,b,c,d;
4632 + ABCD(a, b, c, d, hash_ip);
4633 + DELIP_WALK(map, a, btree);
4634 + DELIP_WALK(btree, b, ctree);
4635 + DELIP_WALK(ctree, c, dtree);
4637 + if (dtree->expires[d]) {
4638 + dtree->expires[d] = 0;
4646 +delip(struct ip_set *set, const void *data, size_t size,
4647 + ip_set_ip_t *hash_ip)
4649 + struct ip_set_req_iptree *req =
4650 + (struct ip_set_req_iptree *) data;
4652 + if (size != sizeof(struct ip_set_req_iptree)) {
4653 + ip_set_printk("data length wrong (want %zu, have %zu)",
4654 + sizeof(struct ip_set_req_iptree),
4658 + return __delip(set, req->ip, hash_ip);
4662 +delip_kernel(struct ip_set *set,
4663 + const struct sk_buff *skb,
4664 + ip_set_ip_t *hash_ip,
4665 + const u_int32_t *flags,
4666 + unsigned char index)
4668 + return __delip(set,
4669 + ntohl(flags[index] & IPSET_SRC
4670 + ? skb->nh.iph->saddr
4671 + : skb->nh.iph->daddr),
4675 +#define LOOP_WALK_BEGIN(map, i, branch) \
4676 + for (i = 0; i < 256; i++) { \
4677 + if (!(map)->tree[i]) \
4679 + branch = (map)->tree[i]
4681 +#define LOOP_WALK_END }
4683 +static void ip_tree_gc(unsigned long ul_set)
4685 + struct ip_set *set = (void *) ul_set;
4686 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4687 + struct ip_set_iptreeb *btree;
4688 + struct ip_set_iptreec *ctree;
4689 + struct ip_set_iptreed *dtree;
4690 + unsigned int a,b,c,d;
4691 + unsigned char i,j,k;
4694 + DP("gc: %s", set->name);
4695 + write_lock_bh(&set->lock);
4696 + LOOP_WALK_BEGIN(map, a, btree);
4697 + LOOP_WALK_BEGIN(btree, b, ctree);
4698 + LOOP_WALK_BEGIN(ctree, c, dtree);
4699 + for (d = 0; d < 256; d++) {
4700 + if (dtree->expires[d]) {
4701 + DP("gc: %u %u %u %u: expires %lu jiffies %lu",
4703 + dtree->expires[d], jiffies);
4705 + && time_before(dtree->expires[d], jiffies)) {
4706 + dtree->expires[d] = 0;
4713 + DP("gc: %s: leaf %u %u %u empty",
4714 + set->name, a, b, c);
4715 + kmem_cache_free(leaf_cachep, dtree);
4716 + ctree->tree[c] = NULL;
4718 + DP("gc: %s: leaf %u %u %u not empty",
4719 + set->name, a, b, c);
4725 + DP("gc: %s: branch %u %u empty",
4727 + kmem_cache_free(branch_cachep, ctree);
4728 + btree->tree[b] = NULL;
4730 + DP("gc: %s: branch %u %u not empty",
4737 + DP("gc: %s: branch %u empty",
4739 + kmem_cache_free(branch_cachep, btree);
4740 + map->tree[a] = NULL;
4742 + DP("gc: %s: branch %u not empty",
4747 + write_unlock_bh(&set->lock);
4749 + map->gc.expires = jiffies + map->gc_interval * HZ;
4750 + add_timer(&map->gc);
4753 +static inline void init_gc_timer(struct ip_set *set)
4755 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4757 + /* Even if there is no timeout for the entries,
4758 + * we still have to call gc because delete
4759 + * do not clean up empty branches */
4760 + map->gc_interval = IPTREE_GC_TIME;
4761 + init_timer(&map->gc);
4762 + map->gc.data = (unsigned long) set;
4763 + map->gc.function = ip_tree_gc;
4764 + map->gc.expires = jiffies + map->gc_interval * HZ;
4765 + add_timer(&map->gc);
4768 +static int create(struct ip_set *set, const void *data, size_t size)
4770 + struct ip_set_req_iptree_create *req =
4771 + (struct ip_set_req_iptree_create *) data;
4772 + struct ip_set_iptree *map;
4774 + if (size != sizeof(struct ip_set_req_iptree_create)) {
4775 + ip_set_printk("data length wrong (want %zu, have %zu)",
4776 + sizeof(struct ip_set_req_iptree_create),
4781 + map = kmalloc(sizeof(struct ip_set_iptree), GFP_KERNEL);
4783 + DP("out of memory for %d bytes",
4784 + sizeof(struct ip_set_iptree));
4787 + memset(map, 0, sizeof(*map));
4788 + map->timeout = req->timeout;
4789 + map->elements = 0;
4792 + init_gc_timer(set);
4797 +static void __flush(struct ip_set_iptree *map)
4799 + struct ip_set_iptreeb *btree;
4800 + struct ip_set_iptreec *ctree;
4801 + struct ip_set_iptreed *dtree;
4802 + unsigned int a,b,c;
4804 + LOOP_WALK_BEGIN(map, a, btree);
4805 + LOOP_WALK_BEGIN(btree, b, ctree);
4806 + LOOP_WALK_BEGIN(ctree, c, dtree);
4807 + kmem_cache_free(leaf_cachep, dtree);
4809 + kmem_cache_free(branch_cachep, ctree);
4811 + kmem_cache_free(branch_cachep, btree);
4813 + map->elements = 0;
4816 +static void destroy(struct ip_set *set)
4818 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4820 + /* gc might be running */
4821 + while (!del_timer(&map->gc))
4822 + msleep(IPTREE_DESTROY_SLEEP);
4828 +static void flush(struct ip_set *set)
4830 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4831 + unsigned int timeout = map->timeout;
4833 + /* gc might be running */
4834 + while (!del_timer(&map->gc))
4835 + msleep(IPTREE_DESTROY_SLEEP);
4837 + memset(map, 0, sizeof(*map));
4838 + map->timeout = timeout;
4840 + init_gc_timer(set);
4843 +static void list_header(const struct ip_set *set, void *data)
4845 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4846 + struct ip_set_req_iptree_create *header =
4847 + (struct ip_set_req_iptree_create *) data;
4849 + header->timeout = map->timeout;
4852 +static int list_members_size(const struct ip_set *set)
4854 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4855 + struct ip_set_iptreeb *btree;
4856 + struct ip_set_iptreec *ctree;
4857 + struct ip_set_iptreed *dtree;
4858 + unsigned int a,b,c,d;
4859 + unsigned int count = 0;
4861 + LOOP_WALK_BEGIN(map, a, btree);
4862 + LOOP_WALK_BEGIN(btree, b, ctree);
4863 + LOOP_WALK_BEGIN(ctree, c, dtree);
4864 + for (d = 0; d < 256; d++) {
4865 + if (dtree->expires[d]
4866 + && (!map->timeout || time_after(dtree->expires[d], jiffies)))
4873 + DP("members %u", count);
4874 + return (count * sizeof(struct ip_set_req_iptree));
4877 +static void list_members(const struct ip_set *set, void *data)
4879 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4880 + struct ip_set_iptreeb *btree;
4881 + struct ip_set_iptreec *ctree;
4882 + struct ip_set_iptreed *dtree;
4883 + unsigned int a,b,c,d;
4884 + size_t offset = 0;
4885 + struct ip_set_req_iptree *entry;
4887 + LOOP_WALK_BEGIN(map, a, btree);
4888 + LOOP_WALK_BEGIN(btree, b, ctree);
4889 + LOOP_WALK_BEGIN(ctree, c, dtree);
4890 + for (d = 0; d < 256; d++) {
4891 + if (dtree->expires[d]
4892 + && (!map->timeout || time_after(dtree->expires[d], jiffies))) {
4893 + entry = (struct ip_set_req_iptree *)(data + offset);
4894 + entry->ip = ((a << 24) | (b << 16) | (c << 8) | d);
4895 + entry->timeout = !map->timeout ? 0
4896 + : (dtree->expires[d] - jiffies)/HZ;
4897 + offset += sizeof(struct ip_set_req_iptree);
4905 +static struct ip_set_type ip_set_iptree = {
4906 + .typename = SETTYPE_NAME,
4907 + .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
4908 + .protocol_version = IP_SET_PROTOCOL_VERSION,
4909 + .create = &create,
4910 + .destroy = &destroy,
4912 + .reqsize = sizeof(struct ip_set_req_iptree),
4914 + .addip_kernel = &addip_kernel,
4916 + .delip_kernel = &delip_kernel,
4917 + .testip = &testip,
4918 + .testip_kernel = &testip_kernel,
4919 + .header_size = sizeof(struct ip_set_req_iptree_create),
4920 + .list_header = &list_header,
4921 + .list_members_size = &list_members_size,
4922 + .list_members = &list_members,
4923 + .me = THIS_MODULE,
4926 +MODULE_LICENSE("GPL");
4927 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
4928 +MODULE_DESCRIPTION("iptree type of IP sets");
4929 +module_param(limit, int, 0600);
4930 +MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
4932 +static int __init init(void)
4936 + branch_cachep = kmem_cache_create("ip_set_iptreeb",
4937 + sizeof(struct ip_set_iptreeb),
4938 + 0, 0, NULL, NULL);
4939 + if (!branch_cachep) {
4940 + printk(KERN_ERR "Unable to create ip_set_iptreeb slab cache\n");
4944 + leaf_cachep = kmem_cache_create("ip_set_iptreed",
4945 + sizeof(struct ip_set_iptreed),
4946 + 0, 0, NULL, NULL);
4947 + if (!leaf_cachep) {
4948 + printk(KERN_ERR "Unable to create ip_set_iptreed slab cache\n");
4952 + ret = ip_set_register_set_type(&ip_set_iptree);
4956 + kmem_cache_destroy(leaf_cachep);
4958 + kmem_cache_destroy(branch_cachep);
4963 +static void __exit fini(void)
4965 + /* FIXME: possible race with ip_set_create() */
4966 + ip_set_unregister_set_type(&ip_set_iptree);
4967 + kmem_cache_destroy(leaf_cachep);
4968 + kmem_cache_destroy(branch_cachep);
4973 diff -ruN linux-2.6.21.1/net/ipv4/netfilter/ip_set_macipmap.c linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_macipmap.c
4974 --- linux-2.6.21.1/net/ipv4/netfilter/ip_set_macipmap.c 1969-12-31 18:00:00.000000000 -0600
4975 +++ linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_macipmap.c 2007-06-08 16:29:31.833808500 -0500
4977 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
4978 + * Patrick Schaaf <bof@bof.de>
4979 + * Martin Josefsson <gandalf@wlug.westbo.se>
4980 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
4982 + * This program is free software; you can redistribute it and/or modify
4983 + * it under the terms of the GNU General Public License version 2 as
4984 + * published by the Free Software Foundation.
4987 +/* Kernel module implementing an IP set type: the macipmap type */
4989 +#include <linux/module.h>
4990 +#include <linux/ip.h>
4991 +#include <linux/skbuff.h>
4992 +#include <linux/netfilter_ipv4/ip_tables.h>
4993 +#include <linux/netfilter_ipv4/ip_set.h>
4994 +#include <linux/errno.h>
4995 +#include <asm/uaccess.h>
4996 +#include <asm/bitops.h>
4997 +#include <linux/spinlock.h>
4998 +#include <linux/if_ether.h>
4999 +#include <linux/vmalloc.h>
5001 +#include <linux/netfilter_ipv4/ip_set_malloc.h>
5002 +#include <linux/netfilter_ipv4/ip_set_macipmap.h>
5005 +testip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
5007 + struct ip_set_macipmap *map = (struct ip_set_macipmap *) set->data;
5008 + struct ip_set_macip *table = (struct ip_set_macip *) map->members;
5009 + struct ip_set_req_macipmap *req = (struct ip_set_req_macipmap *) data;
5011 + if (size != sizeof(struct ip_set_req_macipmap)) {
5012 + ip_set_printk("data length wrong (want %zu, have %zu)",
5013 + sizeof(struct ip_set_req_macipmap),
5018 + if (req->ip < map->first_ip || req->ip > map->last_ip)
5021 + *hash_ip = req->ip;
5022 + DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
5023 + set->name, HIPQUAD(req->ip), HIPQUAD(*hash_ip));
5024 + if (test_bit(IPSET_MACIP_ISSET,
5025 + (void *) &table[req->ip - map->first_ip].flags)) {
5026 + return (memcmp(req->ethernet,
5027 + &table[req->ip - map->first_ip].ethernet,
5030 + return (map->flags & IPSET_MACIP_MATCHUNSET ? 1 : 0);
5035 +testip_kernel(struct ip_set *set,
5036 + const struct sk_buff *skb,
5037 + ip_set_ip_t *hash_ip,
5038 + const u_int32_t *flags,
5039 + unsigned char index)
5041 + struct ip_set_macipmap *map =
5042 + (struct ip_set_macipmap *) set->data;
5043 + struct ip_set_macip *table =
5044 + (struct ip_set_macip *) map->members;
5047 + ip = ntohl(flags[index] & IPSET_SRC
5048 + ? skb->nh.iph->saddr
5049 + : skb->nh.iph->daddr);
5050 + DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
5051 + flags[index] & IPSET_SRC ? "SRC" : "DST",
5052 + NIPQUAD(skb->nh.iph->saddr),
5053 + NIPQUAD(skb->nh.iph->daddr));
5055 + if (ip < map->first_ip || ip > map->last_ip)
5059 + DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
5060 + set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
5061 + if (test_bit(IPSET_MACIP_ISSET,
5062 + (void *) &table[ip - map->first_ip].flags)) {
5063 + /* Is mac pointer valid?
5064 + * If so, compare... */
5065 + return (skb->mac.raw >= skb->head
5066 + && (skb->mac.raw + ETH_HLEN) <= skb->data
5067 + && (memcmp(eth_hdr(skb)->h_source,
5068 + &table[ip - map->first_ip].ethernet,
5071 + return (map->flags & IPSET_MACIP_MATCHUNSET ? 1 : 0);
5075 +/* returns 0 on success */
5077 +__addip(struct ip_set *set,
5078 + ip_set_ip_t ip, unsigned char *ethernet, ip_set_ip_t *hash_ip)
5080 + struct ip_set_macipmap *map =
5081 + (struct ip_set_macipmap *) set->data;
5082 + struct ip_set_macip *table =
5083 + (struct ip_set_macip *) map->members;
5085 + if (ip < map->first_ip || ip > map->last_ip)
5087 + if (test_and_set_bit(IPSET_MACIP_ISSET,
5088 + (void *) &table[ip - map->first_ip].flags))
5092 + DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
5093 + memcpy(&table[ip - map->first_ip].ethernet, ethernet, ETH_ALEN);
5098 +addip(struct ip_set *set, const void *data, size_t size,
5099 + ip_set_ip_t *hash_ip)
5101 + struct ip_set_req_macipmap *req =
5102 + (struct ip_set_req_macipmap *) data;
5104 + if (size != sizeof(struct ip_set_req_macipmap)) {
5105 + ip_set_printk("data length wrong (want %zu, have %zu)",
5106 + sizeof(struct ip_set_req_macipmap),
5110 + return __addip(set, req->ip, req->ethernet, hash_ip);
5114 +addip_kernel(struct ip_set *set,
5115 + const struct sk_buff *skb,
5116 + ip_set_ip_t *hash_ip,
5117 + const u_int32_t *flags,
5118 + unsigned char index)
5122 + ip = ntohl(flags[index] & IPSET_SRC
5123 + ? skb->nh.iph->saddr
5124 + : skb->nh.iph->daddr);
5126 + if (!(skb->mac.raw >= skb->head
5127 + && (skb->mac.raw + ETH_HLEN) <= skb->data))
5130 + return __addip(set, ip, eth_hdr(skb)->h_source, hash_ip);
5134 +__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
5136 + struct ip_set_macipmap *map =
5137 + (struct ip_set_macipmap *) set->data;
5138 + struct ip_set_macip *table =
5139 + (struct ip_set_macip *) map->members;
5141 + if (ip < map->first_ip || ip > map->last_ip)
5143 + if (!test_and_clear_bit(IPSET_MACIP_ISSET,
5144 + (void *)&table[ip - map->first_ip].flags))
5148 + DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
5153 +delip(struct ip_set *set, const void *data, size_t size,
5154 + ip_set_ip_t *hash_ip)
5156 + struct ip_set_req_macipmap *req =
5157 + (struct ip_set_req_macipmap *) data;
5159 + if (size != sizeof(struct ip_set_req_macipmap)) {
5160 + ip_set_printk("data length wrong (want %zu, have %zu)",
5161 + sizeof(struct ip_set_req_macipmap),
5165 + return __delip(set, req->ip, hash_ip);
5169 +delip_kernel(struct ip_set *set,
5170 + const struct sk_buff *skb,
5171 + ip_set_ip_t *hash_ip,
5172 + const u_int32_t *flags,
5173 + unsigned char index)
5175 + return __delip(set,
5176 + ntohl(flags[index] & IPSET_SRC
5177 + ? skb->nh.iph->saddr
5178 + : skb->nh.iph->daddr),
5182 +static inline size_t members_size(ip_set_id_t from, ip_set_id_t to)
5184 + return (size_t)((to - from + 1) * sizeof(struct ip_set_macip));
5187 +static int create(struct ip_set *set, const void *data, size_t size)
5190 + struct ip_set_req_macipmap_create *req =
5191 + (struct ip_set_req_macipmap_create *) data;
5192 + struct ip_set_macipmap *map;
5194 + if (size != sizeof(struct ip_set_req_macipmap_create)) {
5195 + ip_set_printk("data length wrong (want %zu, have %zu)",
5196 + sizeof(struct ip_set_req_macipmap_create),
5201 + DP("from %u.%u.%u.%u to %u.%u.%u.%u",
5202 + HIPQUAD(req->from), HIPQUAD(req->to));
5204 + if (req->from > req->to) {
5205 + DP("bad ip range");
5209 + if (req->to - req->from > MAX_RANGE) {
5210 + ip_set_printk("range too big (max %d addresses)",
5215 + map = kmalloc(sizeof(struct ip_set_macipmap), GFP_KERNEL);
5217 + DP("out of memory for %d bytes",
5218 + sizeof(struct ip_set_macipmap));
5221 + map->flags = req->flags;
5222 + map->first_ip = req->from;
5223 + map->last_ip = req->to;
5224 + newbytes = members_size(map->first_ip, map->last_ip);
5225 + map->members = ip_set_malloc(newbytes);
5226 + DP("members: %u %p", newbytes, map->members);
5227 + if (!map->members) {
5228 + DP("out of memory for %d bytes", newbytes);
5232 + memset(map->members, 0, newbytes);
5238 +static void destroy(struct ip_set *set)
5240 + struct ip_set_macipmap *map =
5241 + (struct ip_set_macipmap *) set->data;
5243 + ip_set_free(map->members, members_size(map->first_ip, map->last_ip));
5249 +static void flush(struct ip_set *set)
5251 + struct ip_set_macipmap *map =
5252 + (struct ip_set_macipmap *) set->data;
5253 + memset(map->members, 0, members_size(map->first_ip, map->last_ip));
5256 +static void list_header(const struct ip_set *set, void *data)
5258 + struct ip_set_macipmap *map =
5259 + (struct ip_set_macipmap *) set->data;
5260 + struct ip_set_req_macipmap_create *header =
5261 + (struct ip_set_req_macipmap_create *) data;
5263 + DP("list_header %x %x %u", map->first_ip, map->last_ip,
5266 + header->from = map->first_ip;
5267 + header->to = map->last_ip;
5268 + header->flags = map->flags;
5271 +static int list_members_size(const struct ip_set *set)
5273 + struct ip_set_macipmap *map =
5274 + (struct ip_set_macipmap *) set->data;
5276 + DP("%u", members_size(map->first_ip, map->last_ip));
5277 + return members_size(map->first_ip, map->last_ip);
5280 +static void list_members(const struct ip_set *set, void *data)
5282 + struct ip_set_macipmap *map =
5283 + (struct ip_set_macipmap *) set->data;
5285 + int bytes = members_size(map->first_ip, map->last_ip);
5287 + DP("members: %u %p", bytes, map->members);
5288 + memcpy(data, map->members, bytes);
5291 +static struct ip_set_type ip_set_macipmap = {
5292 + .typename = SETTYPE_NAME,
5293 + .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
5294 + .protocol_version = IP_SET_PROTOCOL_VERSION,
5295 + .create = &create,
5296 + .destroy = &destroy,
5298 + .reqsize = sizeof(struct ip_set_req_macipmap),
5300 + .addip_kernel = &addip_kernel,
5302 + .delip_kernel = &delip_kernel,
5303 + .testip = &testip,
5304 + .testip_kernel = &testip_kernel,
5305 + .header_size = sizeof(struct ip_set_req_macipmap_create),
5306 + .list_header = &list_header,
5307 + .list_members_size = &list_members_size,
5308 + .list_members = &list_members,
5309 + .me = THIS_MODULE,
5312 +MODULE_LICENSE("GPL");
5313 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
5314 +MODULE_DESCRIPTION("macipmap type of IP sets");
5316 +static int __init init(void)
5318 + init_max_malloc_size();
5319 + return ip_set_register_set_type(&ip_set_macipmap);
5322 +static void __exit fini(void)
5324 + /* FIXME: possible race with ip_set_create() */
5325 + ip_set_unregister_set_type(&ip_set_macipmap);
5330 diff -ruN linux-2.6.21.1/net/ipv4/netfilter/ip_set_nethash.c linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_nethash.c
5331 --- linux-2.6.21.1/net/ipv4/netfilter/ip_set_nethash.c 1969-12-31 18:00:00.000000000 -0600
5332 +++ linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_nethash.c 2007-06-08 16:29:31.833808500 -0500
5334 +/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
5336 + * This program is free software; you can redistribute it and/or modify
5337 + * it under the terms of the GNU General Public License version 2 as
5338 + * published by the Free Software Foundation.
5341 +/* Kernel module implementing a cidr nethash set */
5343 +#include <linux/module.h>
5344 +#include <linux/ip.h>
5345 +#include <linux/skbuff.h>
5346 +#include <linux/netfilter_ipv4/ip_tables.h>
5347 +#include <linux/netfilter_ipv4/ip_set.h>
5348 +#include <linux/errno.h>
5349 +#include <asm/uaccess.h>
5350 +#include <asm/bitops.h>
5351 +#include <linux/spinlock.h>
5352 +#include <linux/vmalloc.h>
5353 +#include <linux/random.h>
5355 +#include <net/ip.h>
5357 +#include <linux/netfilter_ipv4/ip_set_malloc.h>
5358 +#include <linux/netfilter_ipv4/ip_set_nethash.h>
5359 +#include <linux/netfilter_ipv4/ip_set_jhash.h>
5361 +static int limit = MAX_RANGE;
5363 +static inline __u32
5364 +jhash_ip(const struct ip_set_nethash *map, uint16_t i, ip_set_ip_t ip)
5366 + return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
5369 +static inline __u32
5370 +hash_id_cidr(struct ip_set_nethash *map,
5372 + unsigned char cidr,
5373 + ip_set_ip_t *hash_ip)
5377 + ip_set_ip_t *elem;
5379 + *hash_ip = pack(ip, cidr);
5381 + for (i = 0; i < map->probes; i++) {
5382 + id = jhash_ip(map, i, *hash_ip) % map->hashsize;
5383 + DP("hash key: %u", id);
5384 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
5385 + if (*elem == *hash_ip)
5391 +static inline __u32
5392 +hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
5394 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
5395 + __u32 id = UINT_MAX;
5398 + for (i = 0; i < 30 && map->cidr[i]; i++) {
5399 + id = hash_id_cidr(map, ip, map->cidr[i], hash_ip);
5400 + if (id != UINT_MAX)
5407 +__testip_cidr(struct ip_set *set, ip_set_ip_t ip, unsigned char cidr,
5408 + ip_set_ip_t *hash_ip)
5410 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
5412 + return (ip && hash_id_cidr(map, ip, cidr, hash_ip) != UINT_MAX);
5416 +__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
5418 + return (ip && hash_id(set, ip, hash_ip) != UINT_MAX);
5422 +testip(struct ip_set *set, const void *data, size_t size,
5423 + ip_set_ip_t *hash_ip)
5425 + struct ip_set_req_nethash *req =
5426 + (struct ip_set_req_nethash *) data;
5428 + if (size != sizeof(struct ip_set_req_nethash)) {
5429 + ip_set_printk("data length wrong (want %zu, have %zu)",
5430 + sizeof(struct ip_set_req_nethash),
5434 + return (req->cidr == 32 ? __testip(set, req->ip, hash_ip)
5435 + : __testip_cidr(set, req->ip, req->cidr, hash_ip));
5439 +testip_kernel(struct ip_set *set,
5440 + const struct sk_buff *skb,
5441 + ip_set_ip_t *hash_ip,
5442 + const u_int32_t *flags,
5443 + unsigned char index)
5445 + return __testip(set,
5446 + ntohl(flags[index] & IPSET_SRC
5447 + ? skb->nh.iph->saddr
5448 + : skb->nh.iph->daddr),
5453 +__addip_base(struct ip_set_nethash *map, ip_set_ip_t ip)
5457 + ip_set_ip_t *elem;
5459 + for (i = 0; i < map->probes; i++) {
5460 + probe = jhash_ip(map, i, ip) % map->hashsize;
5461 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
5470 + /* Trigger rehashing */
5475 +__addip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
5476 + ip_set_ip_t *hash_ip)
5478 + if (!ip || map->elements > limit)
5481 + *hash_ip = pack(ip, cidr);
5482 + DP("%u.%u.%u.%u/%u, %u.%u.%u.%u", HIPQUAD(ip), cidr, HIPQUAD(*hash_ip));
5484 + return __addip_base(map, *hash_ip);
5488 +update_cidr_sizes(struct ip_set_nethash *map, unsigned char cidr)
5490 + unsigned char next;
5493 + for (i = 0; i < 30 && map->cidr[i]; i++) {
5494 + if (map->cidr[i] == cidr) {
5496 + } else if (map->cidr[i] < cidr) {
5497 + next = map->cidr[i];
5498 + map->cidr[i] = cidr;
5503 + map->cidr[i] = cidr;
5507 +addip(struct ip_set *set, const void *data, size_t size,
5508 + ip_set_ip_t *hash_ip)
5510 + struct ip_set_req_nethash *req =
5511 + (struct ip_set_req_nethash *) data;
5514 + if (size != sizeof(struct ip_set_req_nethash)) {
5515 + ip_set_printk("data length wrong (want %zu, have %zu)",
5516 + sizeof(struct ip_set_req_nethash),
5520 + ret = __addip((struct ip_set_nethash *) set->data,
5521 + req->ip, req->cidr, hash_ip);
5524 + update_cidr_sizes((struct ip_set_nethash *) set->data,
5531 +addip_kernel(struct ip_set *set,
5532 + const struct sk_buff *skb,
5533 + ip_set_ip_t *hash_ip,
5534 + const u_int32_t *flags,
5535 + unsigned char index)
5537 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
5538 + int ret = -ERANGE;
5539 + ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC
5540 + ? skb->nh.iph->saddr
5541 + : skb->nh.iph->daddr);
5544 + ret = __addip(map, ip, map->cidr[0], hash_ip);
5549 +static int retry(struct ip_set *set)
5551 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
5552 + ip_set_ip_t *elem;
5554 + u_int32_t i, hashsize = map->hashsize;
5556 + struct ip_set_nethash *tmp;
5558 + if (map->resize == 0)
5564 + /* Calculate new parameters */
5565 + hashsize += (hashsize * map->resize)/100;
5566 + if (hashsize == map->hashsize)
5569 + ip_set_printk("rehashing of set %s triggered: "
5570 + "hashsize grows from %u to %u",
5571 + set->name, map->hashsize, hashsize);
5573 + tmp = kmalloc(sizeof(struct ip_set_nethash)
5574 + + map->probes * sizeof(uint32_t), GFP_ATOMIC);
5576 + DP("out of memory for %d bytes",
5577 + sizeof(struct ip_set_nethash)
5578 + + map->probes * sizeof(uint32_t));
5581 + tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
5582 + if (!tmp->members) {
5583 + DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
5587 + tmp->hashsize = hashsize;
5588 + tmp->elements = 0;
5589 + tmp->probes = map->probes;
5590 + tmp->resize = map->resize;
5591 + memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
5592 + memcpy(tmp->cidr, map->cidr, 30 * sizeof(unsigned char));
5594 + write_lock_bh(&set->lock);
5595 + map = (struct ip_set_nethash *) set->data; /* Play safe */
5596 + for (i = 0; i < map->hashsize && res == 0; i++) {
5597 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
5599 + res = __addip_base(tmp, *elem);
5602 + /* Failure, try again */
5603 + write_unlock_bh(&set->lock);
5604 + harray_free(tmp->members);
5609 + /* Success at resizing! */
5610 + members = map->members;
5612 + map->hashsize = tmp->hashsize;
5613 + map->members = tmp->members;
5614 + write_unlock_bh(&set->lock);
5616 + harray_free(members);
5623 +__delip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
5624 + ip_set_ip_t *hash_ip)
5626 + ip_set_ip_t id, *elem;
5631 + id = hash_id_cidr(map, ip, cidr, hash_ip);
5632 + if (id == UINT_MAX)
5635 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
5642 +delip(struct ip_set *set, const void *data, size_t size,
5643 + ip_set_ip_t *hash_ip)
5645 + struct ip_set_req_nethash *req =
5646 + (struct ip_set_req_nethash *) data;
5648 + if (size != sizeof(struct ip_set_req_nethash)) {
5649 + ip_set_printk("data length wrong (want %zu, have %zu)",
5650 + sizeof(struct ip_set_req_nethash),
5654 + /* TODO: no garbage collection in map->cidr */
5655 + return __delip((struct ip_set_nethash *) set->data,
5656 + req->ip, req->cidr, hash_ip);
5660 +delip_kernel(struct ip_set *set,
5661 + const struct sk_buff *skb,
5662 + ip_set_ip_t *hash_ip,
5663 + const u_int32_t *flags,
5664 + unsigned char index)
5666 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
5667 + int ret = -ERANGE;
5668 + ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC
5669 + ? skb->nh.iph->saddr
5670 + : skb->nh.iph->daddr);
5673 + ret = __delip(map, ip, map->cidr[0], hash_ip);
5678 +static int create(struct ip_set *set, const void *data, size_t size)
5680 + struct ip_set_req_nethash_create *req =
5681 + (struct ip_set_req_nethash_create *) data;
5682 + struct ip_set_nethash *map;
5685 + if (size != sizeof(struct ip_set_req_nethash_create)) {
5686 + ip_set_printk("data length wrong (want %zu, have %zu)",
5687 + sizeof(struct ip_set_req_nethash_create),
5692 + if (req->hashsize < 1) {
5693 + ip_set_printk("hashsize too small");
5696 + if (req->probes < 1) {
5697 + ip_set_printk("probes too small");
5701 + map = kmalloc(sizeof(struct ip_set_nethash)
5702 + + req->probes * sizeof(uint32_t), GFP_KERNEL);
5704 + DP("out of memory for %d bytes",
5705 + sizeof(struct ip_set_nethash)
5706 + + req->probes * sizeof(uint32_t));
5709 + for (i = 0; i < req->probes; i++)
5710 + get_random_bytes(((uint32_t *) map->initval)+i, 4);
5711 + map->elements = 0;
5712 + map->hashsize = req->hashsize;
5713 + map->probes = req->probes;
5714 + map->resize = req->resize;
5715 + memset(map->cidr, 0, 30 * sizeof(unsigned char));
5716 + map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
5717 + if (!map->members) {
5718 + DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
5727 +static void destroy(struct ip_set *set)
5729 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
5731 + harray_free(map->members);
5737 +static void flush(struct ip_set *set)
5739 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
5740 + harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
5741 + memset(map->cidr, 0, 30 * sizeof(unsigned char));
5742 + map->elements = 0;
5745 +static void list_header(const struct ip_set *set, void *data)
5747 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
5748 + struct ip_set_req_nethash_create *header =
5749 + (struct ip_set_req_nethash_create *) data;
5751 + header->hashsize = map->hashsize;
5752 + header->probes = map->probes;
5753 + header->resize = map->resize;
5756 +static int list_members_size(const struct ip_set *set)
5758 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
5760 + return (map->hashsize * sizeof(ip_set_ip_t));
5763 +static void list_members(const struct ip_set *set, void *data)
5765 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
5766 + ip_set_ip_t i, *elem;
5768 + for (i = 0; i < map->hashsize; i++) {
5769 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
5770 + ((ip_set_ip_t *)data)[i] = *elem;
5774 +static struct ip_set_type ip_set_nethash = {
5775 + .typename = SETTYPE_NAME,
5776 + .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
5777 + .protocol_version = IP_SET_PROTOCOL_VERSION,
5778 + .create = &create,
5779 + .destroy = &destroy,
5781 + .reqsize = sizeof(struct ip_set_req_nethash),
5783 + .addip_kernel = &addip_kernel,
5786 + .delip_kernel = &delip_kernel,
5787 + .testip = &testip,
5788 + .testip_kernel = &testip_kernel,
5789 + .header_size = sizeof(struct ip_set_req_nethash_create),
5790 + .list_header = &list_header,
5791 + .list_members_size = &list_members_size,
5792 + .list_members = &list_members,
5793 + .me = THIS_MODULE,
5796 +MODULE_LICENSE("GPL");
5797 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
5798 +MODULE_DESCRIPTION("nethash type of IP sets");
5799 +module_param(limit, int, 0600);
5800 +MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
5802 +static int __init init(void)
5804 + return ip_set_register_set_type(&ip_set_nethash);
5807 +static void __exit fini(void)
5809 + /* FIXME: possible race with ip_set_create() */
5810 + ip_set_unregister_set_type(&ip_set_nethash);
5815 diff -ruN linux-2.6.21.1/net/ipv4/netfilter/ip_set_portmap.c linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_portmap.c
5816 --- linux-2.6.21.1/net/ipv4/netfilter/ip_set_portmap.c 1969-12-31 18:00:00.000000000 -0600
5817 +++ linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_portmap.c 2007-06-08 16:29:31.833808500 -0500
5819 +/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
5821 + * This program is free software; you can redistribute it and/or modify
5822 + * it under the terms of the GNU General Public License version 2 as
5823 + * published by the Free Software Foundation.
5826 +/* Kernel module implementing a port set type as a bitmap */
5828 +#include <linux/module.h>
5829 +#include <linux/ip.h>
5830 +#include <linux/tcp.h>
5831 +#include <linux/udp.h>
5832 +#include <linux/skbuff.h>
5833 +#include <linux/netfilter_ipv4/ip_tables.h>
5834 +#include <linux/netfilter_ipv4/ip_set.h>
5835 +#include <linux/errno.h>
5836 +#include <asm/uaccess.h>
5837 +#include <asm/bitops.h>
5838 +#include <linux/spinlock.h>
5840 +#include <net/ip.h>
5842 +#include <linux/netfilter_ipv4/ip_set_portmap.h>
5844 +/* We must handle non-linear skbs */
5845 +static inline ip_set_ip_t
5846 +get_port(const struct sk_buff *skb, u_int32_t flags)
5848 + struct iphdr *iph = skb->nh.iph;
5849 + u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
5851 + switch (iph->protocol) {
5852 + case IPPROTO_TCP: {
5853 + struct tcphdr tcph;
5855 + /* See comments at tcp_match in ip_tables.c */
5857 + return INVALID_PORT;
5859 + if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &tcph, sizeof(tcph)) < 0)
5860 + /* No choice either */
5861 + return INVALID_PORT;
5863 + return ntohs(flags & IPSET_SRC ?
5864 + tcph.source : tcph.dest);
5866 + case IPPROTO_UDP: {
5867 + struct udphdr udph;
5870 + return INVALID_PORT;
5872 + if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &udph, sizeof(udph)) < 0)
5873 + /* No choice either */
5874 + return INVALID_PORT;
5876 + return ntohs(flags & IPSET_SRC ?
5877 + udph.source : udph.dest);
5880 + return INVALID_PORT;
5885 +__testport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
5887 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
5889 + if (port < map->first_port || port > map->last_port)
5892 + *hash_port = port;
5893 + DP("set: %s, port:%u, %u", set->name, port, *hash_port);
5894 + return !!test_bit(port - map->first_port, map->members);
5898 +testport(struct ip_set *set, const void *data, size_t size,
5899 + ip_set_ip_t *hash_port)
5901 + struct ip_set_req_portmap *req =
5902 + (struct ip_set_req_portmap *) data;
5904 + if (size != sizeof(struct ip_set_req_portmap)) {
5905 + ip_set_printk("data length wrong (want %zu, have %zu)",
5906 + sizeof(struct ip_set_req_portmap),
5910 + return __testport(set, req->port, hash_port);
5914 +testport_kernel(struct ip_set *set,
5915 + const struct sk_buff *skb,
5916 + ip_set_ip_t *hash_port,
5917 + const u_int32_t *flags,
5918 + unsigned char index)
5921 + ip_set_ip_t port = get_port(skb, flags[index]);
5923 + DP("flag %s port %u", flags[index] & IPSET_SRC ? "SRC" : "DST", port);
5924 + if (port == INVALID_PORT)
5927 + res = __testport(set, port, hash_port);
5929 + return (res < 0 ? 0 : res);
5933 +__addport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
5935 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
5937 + if (port < map->first_port || port > map->last_port)
5939 + if (test_and_set_bit(port - map->first_port, map->members))
5942 + *hash_port = port;
5943 + DP("port %u", port);
5948 +addport(struct ip_set *set, const void *data, size_t size,
5949 + ip_set_ip_t *hash_port)
5951 + struct ip_set_req_portmap *req =
5952 + (struct ip_set_req_portmap *) data;
5954 + if (size != sizeof(struct ip_set_req_portmap)) {
5955 + ip_set_printk("data length wrong (want %zu, have %zu)",
5956 + sizeof(struct ip_set_req_portmap),
5960 + return __addport(set, req->port, hash_port);
5964 +addport_kernel(struct ip_set *set,
5965 + const struct sk_buff *skb,
5966 + ip_set_ip_t *hash_port,
5967 + const u_int32_t *flags,
5968 + unsigned char index)
5970 + ip_set_ip_t port = get_port(skb, flags[index]);
5972 + if (port == INVALID_PORT)
5975 + return __addport(set, port, hash_port);
5979 +__delport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
5981 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
5983 + if (port < map->first_port || port > map->last_port)
5985 + if (!test_and_clear_bit(port - map->first_port, map->members))
5988 + *hash_port = port;
5989 + DP("port %u", port);
5994 +delport(struct ip_set *set, const void *data, size_t size,
5995 + ip_set_ip_t *hash_port)
5997 + struct ip_set_req_portmap *req =
5998 + (struct ip_set_req_portmap *) data;
6000 + if (size != sizeof(struct ip_set_req_portmap)) {
6001 + ip_set_printk("data length wrong (want %zu, have %zu)",
6002 + sizeof(struct ip_set_req_portmap),
6006 + return __delport(set, req->port, hash_port);
6010 +delport_kernel(struct ip_set *set,
6011 + const struct sk_buff *skb,
6012 + ip_set_ip_t *hash_port,
6013 + const u_int32_t *flags,
6014 + unsigned char index)
6016 + ip_set_ip_t port = get_port(skb, flags[index]);
6018 + if (port == INVALID_PORT)
6021 + return __delport(set, port, hash_port);
6024 +static int create(struct ip_set *set, const void *data, size_t size)
6027 + struct ip_set_req_portmap_create *req =
6028 + (struct ip_set_req_portmap_create *) data;
6029 + struct ip_set_portmap *map;
6031 + if (size != sizeof(struct ip_set_req_portmap_create)) {
6032 + ip_set_printk("data length wrong (want %zu, have %zu)",
6033 + sizeof(struct ip_set_req_portmap_create),
6038 + DP("from %u to %u", req->from, req->to);
6040 + if (req->from > req->to) {
6041 + DP("bad port range");
6045 + if (req->to - req->from > MAX_RANGE) {
6046 + ip_set_printk("range too big (max %d ports)",
6051 + map = kmalloc(sizeof(struct ip_set_portmap), GFP_KERNEL);
6053 + DP("out of memory for %d bytes",
6054 + sizeof(struct ip_set_portmap));
6057 + map->first_port = req->from;
6058 + map->last_port = req->to;
6059 + newbytes = bitmap_bytes(req->from, req->to);
6060 + map->members = kmalloc(newbytes, GFP_KERNEL);
6061 + if (!map->members) {
6062 + DP("out of memory for %d bytes", newbytes);
6066 + memset(map->members, 0, newbytes);
6072 +static void destroy(struct ip_set *set)
6074 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
6076 + kfree(map->members);
6082 +static void flush(struct ip_set *set)
6084 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
6085 + memset(map->members, 0, bitmap_bytes(map->first_port, map->last_port));
6088 +static void list_header(const struct ip_set *set, void *data)
6090 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
6091 + struct ip_set_req_portmap_create *header =
6092 + (struct ip_set_req_portmap_create *) data;
6094 + DP("list_header %u %u", map->first_port, map->last_port);
6096 + header->from = map->first_port;
6097 + header->to = map->last_port;
6100 +static int list_members_size(const struct ip_set *set)
6102 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
6104 + return bitmap_bytes(map->first_port, map->last_port);
6107 +static void list_members(const struct ip_set *set, void *data)
6109 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
6110 + int bytes = bitmap_bytes(map->first_port, map->last_port);
6112 + memcpy(data, map->members, bytes);
6115 +static struct ip_set_type ip_set_portmap = {
6116 + .typename = SETTYPE_NAME,
6117 + .features = IPSET_TYPE_PORT | IPSET_DATA_SINGLE,
6118 + .protocol_version = IP_SET_PROTOCOL_VERSION,
6119 + .create = &create,
6120 + .destroy = &destroy,
6122 + .reqsize = sizeof(struct ip_set_req_portmap),
6123 + .addip = &addport,
6124 + .addip_kernel = &addport_kernel,
6125 + .delip = &delport,
6126 + .delip_kernel = &delport_kernel,
6127 + .testip = &testport,
6128 + .testip_kernel = &testport_kernel,
6129 + .header_size = sizeof(struct ip_set_req_portmap_create),
6130 + .list_header = &list_header,
6131 + .list_members_size = &list_members_size,
6132 + .list_members = &list_members,
6133 + .me = THIS_MODULE,
6136 +MODULE_LICENSE("GPL");
6137 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
6138 +MODULE_DESCRIPTION("portmap type of IP sets");
6140 +static int __init init(void)
6142 + return ip_set_register_set_type(&ip_set_portmap);
6145 +static void __exit fini(void)
6147 + /* FIXME: possible race with ip_set_create() */
6148 + ip_set_unregister_set_type(&ip_set_portmap);
6153 diff -ruN linux-2.6.21.1/net/ipv4/netfilter/ipt_set.c linux-2.6.21.1.new/net/ipv4/netfilter/ipt_set.c
6154 --- linux-2.6.21.1/net/ipv4/netfilter/ipt_set.c 1969-12-31 18:00:00.000000000 -0600
6155 +++ linux-2.6.21.1.new/net/ipv4/netfilter/ipt_set.c 2007-06-08 16:29:31.833808500 -0500
6157 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
6158 + * Patrick Schaaf <bof@bof.de>
6159 + * Martin Josefsson <gandalf@wlug.westbo.se>
6160 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
6162 + * This program is free software; you can redistribute it and/or modify
6163 + * it under the terms of the GNU General Public License version 2 as
6164 + * published by the Free Software Foundation.
6167 +/* Kernel module to match an IP set. */
6169 +#include <linux/module.h>
6170 +#include <linux/ip.h>
6171 +#include <linux/skbuff.h>
6172 +#include <linux/version.h>
6174 +#include <linux/netfilter_ipv4/ip_tables.h>
6175 +#include <linux/netfilter_ipv4/ip_set.h>
6176 +#include <linux/netfilter_ipv4/ipt_set.h>
6179 +match_set(const struct ipt_set_info *info,
6180 + const struct sk_buff *skb,
6183 + if (ip_set_testip_kernel(info->index, skb, info->flags))
6189 +match(const struct sk_buff *skb,
6190 + const struct net_device *in,
6191 + const struct net_device *out,
6192 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
6193 + const struct xt_match *match,
6195 + const void *matchinfo,
6196 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
6197 + int offset, unsigned int protoff, int *hotdrop)
6199 + int offset, int *hotdrop)
6202 + const struct ipt_set_info_match *info = matchinfo;
6204 + return match_set(&info->match_set,
6206 + info->match_set.flags[0] & IPSET_MATCH_INV);
6210 +checkentry(const char *tablename,
6211 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
6214 + const struct ipt_ip *ip,
6216 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
6217 + const struct xt_match *match,
6220 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
6221 + unsigned int matchsize,
6223 + unsigned int hook_mask)
6225 + struct ipt_set_info_match *info =
6226 + (struct ipt_set_info_match *) matchinfo;
6227 + ip_set_id_t index;
6229 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
6230 + if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
6231 + ip_set_printk("invalid matchsize %d", matchsize);
6236 + index = ip_set_get_byindex(info->match_set.index);
6238 + if (index == IP_SET_INVALID_ID) {
6239 + ip_set_printk("Cannot find set indentified by id %u to match",
6240 + info->match_set.index);
6241 + return 0; /* error */
6243 + if (info->match_set.flags[IP_SET_MAX_BINDINGS] != 0) {
6244 + ip_set_printk("That's nasty!");
6245 + return 0; /* error */
6251 +static void destroy(
6252 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
6253 + const struct xt_match *match,
6255 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
6256 + void *matchinfo, unsigned int matchsize)
6261 + struct ipt_set_info_match *info = matchinfo;
6263 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
6264 + if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
6265 + ip_set_printk("invalid matchsize %d", matchsize);
6269 + ip_set_put(info->match_set.index);
6272 +static struct ipt_match set_match = {
6274 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
6275 + .family = AF_INET,
6278 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
6279 + .matchsize = sizeof(struct ipt_set_info_match),
6281 + .checkentry = &checkentry,
6282 + .destroy = &destroy,
6286 +MODULE_LICENSE("GPL");
6287 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
6288 +MODULE_DESCRIPTION("iptables IP set match module");
6290 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
6291 +#define ipt_register_match xt_register_match
6292 +#define ipt_unregister_match xt_unregister_match
6295 +static int __init ipt_ipset_init(void)
6297 + return ipt_register_match(&set_match);
6300 +static void __exit ipt_ipset_fini(void)
6302 + ipt_unregister_match(&set_match);
6305 +module_init(ipt_ipset_init);
6306 +module_exit(ipt_ipset_fini);
6307 diff -ruN linux-2.6.21.1/net/ipv4/netfilter/ipt_SET.c linux-2.6.21.1.new/net/ipv4/netfilter/ipt_SET.c
6308 --- linux-2.6.21.1/net/ipv4/netfilter/ipt_SET.c 1969-12-31 18:00:00.000000000 -0600
6309 +++ linux-2.6.21.1.new/net/ipv4/netfilter/ipt_SET.c 2007-06-08 16:29:31.833808500 -0500
6311 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
6312 + * Patrick Schaaf <bof@bof.de>
6313 + * Martin Josefsson <gandalf@wlug.westbo.se>
6314 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
6316 + * This program is free software; you can redistribute it and/or modify
6317 + * it under the terms of the GNU General Public License version 2 as
6318 + * published by the Free Software Foundation.
6321 +/* ipt_SET.c - netfilter target to manipulate IP sets */
6323 +#include <linux/types.h>
6324 +#include <linux/ip.h>
6325 +#include <linux/timer.h>
6326 +#include <linux/module.h>
6327 +#include <linux/netfilter.h>
6328 +#include <linux/netdevice.h>
6329 +#include <linux/if.h>
6330 +#include <linux/inetdevice.h>
6331 +#include <linux/version.h>
6332 +#include <net/protocol.h>
6333 +#include <net/checksum.h>
6334 +#include <linux/netfilter_ipv4.h>
6335 +#include <linux/netfilter_ipv4/ip_nat_rule.h>
6336 +#include <linux/netfilter_ipv4/ipt_set.h>
6338 +static unsigned int
6339 +target(struct sk_buff **pskb,
6340 + const struct net_device *in,
6341 + const struct net_device *out,
6342 + unsigned int hooknum,
6343 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
6344 + const struct xt_target *target,
6346 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
6347 + const void *targinfo,
6350 + const void *targinfo)
6353 + const struct ipt_set_info_target *info = targinfo;
6355 + if (info->add_set.index != IP_SET_INVALID_ID)
6356 + ip_set_addip_kernel(info->add_set.index,
6358 + info->add_set.flags);
6359 + if (info->del_set.index != IP_SET_INVALID_ID)
6360 + ip_set_delip_kernel(info->del_set.index,
6362 + info->del_set.flags);
6364 + return IPT_CONTINUE;
6368 +checkentry(const char *tablename,
6369 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
6372 + const struct ipt_entry *e,
6374 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
6375 + const struct xt_target *target,
6378 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
6379 + unsigned int targinfosize,
6381 + unsigned int hook_mask)
6383 + struct ipt_set_info_target *info =
6384 + (struct ipt_set_info_target *) targinfo;
6385 + ip_set_id_t index;
6387 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
6388 + if (targinfosize != IPT_ALIGN(sizeof(*info))) {
6389 + DP("bad target info size %u", targinfosize);
6394 + if (info->add_set.index != IP_SET_INVALID_ID) {
6395 + index = ip_set_get_byindex(info->add_set.index);
6396 + if (index == IP_SET_INVALID_ID) {
6397 + ip_set_printk("cannot find add_set index %u as target",
6398 + info->add_set.index);
6399 + return 0; /* error */
6403 + if (info->del_set.index != IP_SET_INVALID_ID) {
6404 + index = ip_set_get_byindex(info->del_set.index);
6405 + if (index == IP_SET_INVALID_ID) {
6406 + ip_set_printk("cannot find del_set index %u as target",
6407 + info->del_set.index);
6408 + return 0; /* error */
6411 + if (info->add_set.flags[IP_SET_MAX_BINDINGS] != 0
6412 + || info->del_set.flags[IP_SET_MAX_BINDINGS] != 0) {
6413 + ip_set_printk("That's nasty!");
6414 + return 0; /* error */
6420 +static void destroy(
6421 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
6422 + const struct xt_target *target,
6424 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
6425 + void *targetinfo, unsigned int targetsize)
6430 + struct ipt_set_info_target *info = targetinfo;
6432 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
6433 + if (targetsize != IPT_ALIGN(sizeof(struct ipt_set_info_target))) {
6434 + ip_set_printk("invalid targetsize %d", targetsize);
6438 + if (info->add_set.index != IP_SET_INVALID_ID)
6439 + ip_set_put(info->add_set.index);
6440 + if (info->del_set.index != IP_SET_INVALID_ID)
6441 + ip_set_put(info->del_set.index);
6444 +static struct ipt_target SET_target = {
6446 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
6447 + .family = AF_INET,
6450 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
6451 + .targetsize = sizeof(struct ipt_set_info_target),
6453 + .checkentry = checkentry,
6454 + .destroy = destroy,
6458 +MODULE_LICENSE("GPL");
6459 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
6460 +MODULE_DESCRIPTION("iptables IP set target module");
6462 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
6463 +#define ipt_register_target xt_register_target
6464 +#define ipt_unregister_target xt_unregister_target
6467 +static int __init ipt_SET_init(void)
6469 + return ipt_register_target(&SET_target);
6472 +static void __exit ipt_SET_fini(void)
6474 + ipt_unregister_target(&SET_target);
6477 +module_init(ipt_SET_init);
6478 +module_exit(ipt_SET_fini);
6479 diff -ruN linux-2.6.21.1/net/ipv4/netfilter/Kconfig linux-2.6.21.1.new/net/ipv4/netfilter/Kconfig
6480 --- linux-2.6.21.1/net/ipv4/netfilter/Kconfig 2007-04-27 16:49:26.000000000 -0500
6481 +++ linux-2.6.21.1.new/net/ipv4/netfilter/Kconfig 2007-06-08 16:29:31.833808500 -0500
6482 @@ -657,5 +657,114 @@
6483 Allows altering the ARP packet payload: source and destination
6484 hardware and network addresses.
6487 + tristate "IP set support"
6488 + depends on INET && NETFILTER
6490 + This option adds IP set support to the kernel.
6491 + In order to define and use sets, you need the userspace utility
6494 + To compile it as a module, choose M here. If unsure, say N.
6496 +config IP_NF_SET_MAX
6497 + int "Maximum number of IP sets"
6500 + depends on IP_NF_SET
6502 + You can define here default value of the maximum number
6503 + of IP sets for the kernel.
6505 + The value can be overriden by the 'max_sets' module
6506 + parameter of the 'ip_set' module.
6508 +config IP_NF_SET_HASHSIZE
6509 + int "Hash size for bindings of IP sets"
6511 + depends on IP_NF_SET
6513 + You can define here default value of the hash size for
6514 + bindings of IP sets.
6516 + The value can be overriden by the 'hash_size' module
6517 + parameter of the 'ip_set' module.
6519 +config IP_NF_SET_IPMAP
6520 + tristate "ipmap set support"
6521 + depends on IP_NF_SET
6523 + This option adds the ipmap set type support.
6525 + To compile it as a module, choose M here. If unsure, say N.
6527 +config IP_NF_SET_MACIPMAP
6528 + tristate "macipmap set support"
6529 + depends on IP_NF_SET
6531 + This option adds the macipmap set type support.
6533 + To compile it as a module, choose M here. If unsure, say N.
6535 +config IP_NF_SET_PORTMAP
6536 + tristate "portmap set support"
6537 + depends on IP_NF_SET
6539 + This option adds the portmap set type support.
6541 + To compile it as a module, choose M here. If unsure, say N.
6543 +config IP_NF_SET_IPHASH
6544 + tristate "iphash set support"
6545 + depends on IP_NF_SET
6547 + This option adds the iphash set type support.
6549 + To compile it as a module, choose M here. If unsure, say N.
6551 +config IP_NF_SET_NETHASH
6552 + tristate "nethash set support"
6553 + depends on IP_NF_SET
6555 + This option adds the nethash set type support.
6557 + To compile it as a module, choose M here. If unsure, say N.
6559 +config IP_NF_SET_IPPORTHASH
6560 + tristate "ipporthash set support"
6561 + depends on IP_NF_SET
6563 + This option adds the ipporthash set type support.
6565 + To compile it as a module, choose M here. If unsure, say N.
6567 +config IP_NF_SET_IPTREE
6568 + tristate "iptree set support"
6569 + depends on IP_NF_SET
6571 + This option adds the iptree set type support.
6573 + To compile it as a module, choose M here. If unsure, say N.
6575 +config IP_NF_MATCH_SET
6576 + tristate "set match support"
6577 + depends on IP_NF_SET
6579 + Set matching matches against given IP sets.
6580 + You need the ipset utility to create and set up the sets.
6582 + To compile it as a module, choose M here. If unsure, say N.
6584 +config IP_NF_TARGET_SET
6585 + tristate "SET target support"
6586 + depends on IP_NF_SET
6588 + The SET target makes possible to add/delete entries
6590 + You need the ipset utility to create and set up the sets.
6592 + To compile it as a module, choose M here. If unsure, say N.
6597 diff -ruN linux-2.6.21.1/net/ipv4/netfilter/Makefile linux-2.6.21.1.new/net/ipv4/netfilter/Makefile
6598 --- linux-2.6.21.1/net/ipv4/netfilter/Makefile 2007-04-27 16:49:26.000000000 -0500
6599 +++ linux-2.6.21.1.new/net/ipv4/netfilter/Makefile 2007-06-08 16:29:31.837808750 -0500
6601 obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o
6602 obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
6603 obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o
6604 +obj-$(CONFIG_IP_NF_MATCH_SET) += ipt_set.o
6605 obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
6608 @@ -105,6 +106,17 @@
6609 obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
6610 obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
6611 obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o
6612 +obj-$(CONFIG_IP_NF_TARGET_SET) += ipt_SET.o
6615 +obj-$(CONFIG_IP_NF_SET) += ip_set.o
6616 +obj-$(CONFIG_IP_NF_SET_IPMAP) += ip_set_ipmap.o
6617 +obj-$(CONFIG_IP_NF_SET_PORTMAP) += ip_set_portmap.o
6618 +obj-$(CONFIG_IP_NF_SET_MACIPMAP) += ip_set_macipmap.o
6619 +obj-$(CONFIG_IP_NF_SET_IPHASH) += ip_set_iphash.o
6620 +obj-$(CONFIG_IP_NF_SET_NETHASH) += ip_set_nethash.o
6621 +obj-$(CONFIG_IP_NF_SET_IPPORTHASH) += ip_set_ipporthash.o
6622 +obj-$(CONFIG_IP_NF_SET_IPTREE) += ip_set_iptree.o
6624 # generic ARP tables
6625 obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o