1 --- a/include/linux/netfilter_ipv4/Kbuild
2 +++ b/include/linux/netfilter_ipv4/Kbuild
3 @@ -45,3 +45,20 @@ header-y += ipt_ttl.h
5 unifdef-y += ip_queue.h
6 unifdef-y += ip_tables.h
9 +header-y += ip_set_iphash.h
10 +unifdef-y += ip_set_bitmaps.h
11 +unifdef-y += ip_set_getport.h
12 +unifdef-y += ip_set_hashes.h
13 +header-y += ip_set_ipmap.h
14 +header-y += ip_set_ipporthash.h
15 +header-y += ip_set_ipportiphash.h
16 +header-y += ip_set_ipportnethash.h
17 +unifdef-y += ip_set_iptree.h
18 +unifdef-y += ip_set_iptreemap.h
19 +header-y += ip_set_jhash.h
20 +header-y += ip_set_macipmap.h
21 +header-y += ip_set_nethash.h
22 +header-y += ip_set_portmap.h
23 +header-y += ip_set_setlist.h
25 +++ b/include/linux/netfilter_ipv4/ip_set.h
30 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
31 + * Patrick Schaaf <bof@bof.de>
32 + * Martin Josefsson <gandalf@wlug.westbo.se>
33 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
35 + * This program is free software; you can redistribute it and/or modify
36 + * it under the terms of the GNU General Public License version 2 as
37 + * published by the Free Software Foundation.
45 + * A sockopt of such quality has hardly ever been seen before on the open
46 + * market! This little beauty, hardly ever used: above 64, so it's
47 + * traditionally used for firewalling, not touched (even once!) by the
48 + * 2.0, 2.2 and 2.4 kernels!
50 + * Comes with its own certificate of authenticity, valid anywhere in the
58 + * Heavily modify by Joakim Axelsson 08.03.2002
59 + * - Made it more modulebased
61 + * Additional heavy modifications by Jozsef Kadlecsik 22.02.2004
63 + * - in order to "deal with" backward compatibility, renamed to ipset
67 + * Used so that the kernel module and ipset-binary can match their versions
69 +#define IP_SET_PROTOCOL_VERSION 3
71 +#define IP_SET_MAXNAMELEN 32 /* set names and set typenames */
73 +/* Lets work with our own typedef for representing an IP address.
74 + * We hope to make the code more portable, possibly to IPv6...
76 + * The representation works in HOST byte order, because most set types
77 + * will perform arithmetic operations and compare operations.
79 + * For now the type is an uint32_t.
81 + * Make sure to ONLY use the functions when translating and parsing
82 + * in order to keep the host byte order and make it more portable:
87 + * (Joakim: where are they???)
90 +typedef uint32_t ip_set_ip_t;
92 +/* Sets are identified by an id in kernel space. Tweak with ip_set_id_t
93 + * and IP_SET_INVALID_ID if you want to increase the max number of sets.
95 +typedef uint16_t ip_set_id_t;
97 +#define IP_SET_INVALID_ID 65535
99 +/* How deep we follow bindings */
100 +#define IP_SET_MAX_BINDINGS 6
103 + * Option flags for kernel operations (ipt_set_info)
105 +#define IPSET_SRC 0x01 /* Source match/add */
106 +#define IPSET_DST 0x02 /* Destination match/add */
107 +#define IPSET_MATCH_INV 0x04 /* Inverse matching */
112 +#define IPSET_TYPE_IP 0x01 /* IP address type of set */
113 +#define IPSET_TYPE_PORT 0x02 /* Port type of set */
114 +#define IPSET_DATA_SINGLE 0x04 /* Single data storage */
115 +#define IPSET_DATA_DOUBLE 0x08 /* Double data storage */
116 +#define IPSET_DATA_TRIPLE 0x10 /* Triple data storage */
117 +#define IPSET_TYPE_IP1 0x20 /* IP address type of set */
118 +#define IPSET_TYPE_SETNAME 0x40 /* setname type of set */
120 +/* Reserved keywords */
121 +#define IPSET_TOKEN_DEFAULT ":default:"
122 +#define IPSET_TOKEN_ALL ":all:"
124 +/* SO_IP_SET operation constants, and their request struct types.
127 + * 0-99: commands with version checking
128 + * 100-199: add/del/test/bind/unbind
129 + * 200-299: list, save, restore
132 +/* Single shot operations:
133 + * version, create, destroy, flush, rename and swap
135 + * Sets are identified by name.
138 +#define IP_SET_REQ_STD \
140 + unsigned version; \
141 + char name[IP_SET_MAXNAMELEN]
143 +#define IP_SET_OP_CREATE 0x00000001 /* Create a new (empty) set */
144 +struct ip_set_req_create {
146 + char typename[IP_SET_MAXNAMELEN];
149 +#define IP_SET_OP_DESTROY 0x00000002 /* Remove a (empty) set */
150 +struct ip_set_req_std {
154 +#define IP_SET_OP_FLUSH 0x00000003 /* Remove all IPs in a set */
155 +/* Uses ip_set_req_std */
157 +#define IP_SET_OP_RENAME 0x00000004 /* Rename a set */
158 +/* Uses ip_set_req_create */
160 +#define IP_SET_OP_SWAP 0x00000005 /* Swap two sets */
161 +/* Uses ip_set_req_create */
163 +union ip_set_name_index {
164 + char name[IP_SET_MAXNAMELEN];
168 +#define IP_SET_OP_GET_BYNAME 0x00000006 /* Get set index by name */
169 +struct ip_set_req_get_set {
172 + union ip_set_name_index set;
175 +#define IP_SET_OP_GET_BYINDEX 0x00000007 /* Get set name by index */
176 +/* Uses ip_set_req_get_set */
178 +#define IP_SET_OP_VERSION 0x00000100 /* Ask kernel version */
179 +struct ip_set_req_version {
184 +/* Double shots operations:
185 + * add, del, test, bind and unbind.
187 + * First we query the kernel to get the index and type of the target set,
188 + * then issue the command. Validity of IP is checked in kernel in order
189 + * to minimalize sockopt operations.
192 +/* Get minimal set data for add/del/test/bind/unbind IP */
193 +#define IP_SET_OP_ADT_GET 0x00000010 /* Get set and type */
194 +struct ip_set_req_adt_get {
197 + union ip_set_name_index set;
198 + char typename[IP_SET_MAXNAMELEN];
201 +#define IP_SET_REQ_BYINDEX \
205 +struct ip_set_req_adt {
206 + IP_SET_REQ_BYINDEX;
209 +#define IP_SET_OP_ADD_IP 0x00000101 /* Add an IP to a set */
210 +/* Uses ip_set_req_adt, with type specific addage */
212 +#define IP_SET_OP_DEL_IP 0x00000102 /* Remove an IP from a set */
213 +/* Uses ip_set_req_adt, with type specific addage */
215 +#define IP_SET_OP_TEST_IP 0x00000103 /* Test an IP in a set */
216 +/* Uses ip_set_req_adt, with type specific addage */
218 +#define IP_SET_OP_BIND_SET 0x00000104 /* Bind an IP to a set */
219 +/* Uses ip_set_req_bind, with type specific addage */
220 +struct ip_set_req_bind {
221 + IP_SET_REQ_BYINDEX;
222 + char binding[IP_SET_MAXNAMELEN];
225 +#define IP_SET_OP_UNBIND_SET 0x00000105 /* Unbind an IP from a set */
226 +/* Uses ip_set_req_bind, with type speficic addage
227 + * index = 0 means unbinding for all sets */
229 +#define IP_SET_OP_TEST_BIND_SET 0x00000106 /* Test binding an IP to a set */
230 +/* Uses ip_set_req_bind, with type specific addage */
232 +/* Multiple shots operations: list, save, restore.
234 + * - check kernel version and query the max number of sets
235 + * - get the basic information on all sets
236 + * and size required for the next step
237 + * - get actual set data: header, data, bindings
240 +/* Get max_sets and the index of a queried set
242 +#define IP_SET_OP_MAX_SETS 0x00000020
243 +struct ip_set_req_max_sets {
246 + ip_set_id_t max_sets; /* max_sets */
247 + ip_set_id_t sets; /* real number of sets */
248 + union ip_set_name_index set; /* index of set if name used */
251 +/* Get the id and name of the sets plus size for next step */
252 +#define IP_SET_OP_LIST_SIZE 0x00000201
253 +#define IP_SET_OP_SAVE_SIZE 0x00000202
254 +struct ip_set_req_setnames {
256 + ip_set_id_t index; /* set to list/save */
257 + u_int32_t size; /* size to get setdata/bindings */
258 + /* followed by sets number of struct ip_set_name_list */
261 +struct ip_set_name_list {
262 + char name[IP_SET_MAXNAMELEN];
263 + char typename[IP_SET_MAXNAMELEN];
268 +/* The actual list operation */
269 +#define IP_SET_OP_LIST 0x00000203
270 +struct ip_set_req_list {
271 + IP_SET_REQ_BYINDEX;
272 + /* sets number of struct ip_set_list in reply */
275 +struct ip_set_list {
277 + ip_set_id_t binding;
279 + u_int32_t header_size; /* Set header data of header_size */
280 + u_int32_t members_size; /* Set members data of members_size */
281 + u_int32_t bindings_size;/* Set bindings data of bindings_size */
284 +struct ip_set_hash_list {
286 + ip_set_id_t binding;
289 +/* The save operation */
290 +#define IP_SET_OP_SAVE 0x00000204
291 +/* Uses ip_set_req_list, in the reply replaced by
292 + * sets number of struct ip_set_save plus a marker
293 + * ip_set_save followed by ip_set_hash_save structures.
295 +struct ip_set_save {
297 + ip_set_id_t binding;
298 + u_int32_t header_size; /* Set header data of header_size */
299 + u_int32_t members_size; /* Set members data of members_size */
302 +/* At restoring, ip == 0 means default binding for the given set: */
303 +struct ip_set_hash_save {
306 + ip_set_id_t binding;
309 +/* The restore operation */
310 +#define IP_SET_OP_RESTORE 0x00000205
311 +/* Uses ip_set_req_setnames followed by ip_set_restore structures
312 + * plus a marker ip_set_restore, followed by ip_set_hash_save
315 +struct ip_set_restore {
316 + char name[IP_SET_MAXNAMELEN];
317 + char typename[IP_SET_MAXNAMELEN];
319 + u_int32_t header_size; /* Create data of header_size */
320 + u_int32_t members_size; /* Set members data of members_size */
323 +static inline int bitmap_bytes(ip_set_ip_t a, ip_set_ip_t b)
325 + return 4 * ((((b - a + 8) / 8) + 3) / 4);
328 +/* General limit for the elements in a set */
329 +#define MAX_RANGE 0x0000FFFF
332 +#include <linux/netfilter_ipv4/ip_set_compat.h>
333 +#include <linux/netfilter_ipv4/ip_set_malloc.h>
335 +#define ip_set_printk(format, args...) \
337 + printk("%s: %s: ", __FILE__, __FUNCTION__); \
338 + printk(format "\n" , ## args); \
341 +#if defined(IP_SET_DEBUG)
342 +#define DP(format, args...) \
344 + printk("%s: %s (DBG): ", __FILE__, __FUNCTION__);\
345 + printk(format "\n" , ## args); \
347 +#define IP_SET_ASSERT(x) \
350 + printk("IP_SET_ASSERT: %s:%i(%s)\n", \
351 + __FILE__, __LINE__, __FUNCTION__); \
354 +#define DP(format, args...)
355 +#define IP_SET_ASSERT(x)
361 + * The ip_set_type definition - one per set type, e.g. "ipmap".
363 + * Each individual set has a pointer, set->type, going to one
364 + * of these structures. Function pointers inside the structure implement
365 + * the real behaviour of the sets.
367 + * If not mentioned differently, the implementation behind the function
368 + * pointers of a set_type, is expected to return 0 if ok, and a negative
369 + * errno (e.g. -EINVAL) on error.
371 +struct ip_set_type {
372 + struct list_head list; /* next in list of set types */
374 + /* test for IP in set (kernel: iptables -m set src|dst)
375 + * return 0 if not in set, 1 if in set.
377 + int (*testip_kernel) (struct ip_set *set,
378 + const struct sk_buff * skb,
380 + const u_int32_t *flags,
381 + unsigned char index);
383 + /* test for IP in set (userspace: ipset -T set IP)
384 + * return 0 if not in set, 1 if in set.
386 + int (*testip) (struct ip_set *set,
387 + const void *data, u_int32_t size,
391 + * Size of the data structure passed by when
392 + * adding/deletin/testing an entry.
396 + /* Add IP into set (userspace: ipset -A set IP)
397 + * Return -EEXIST if the address is already in the set,
398 + * and -ERANGE if the address lies outside the set bounds.
399 + * If the address was not already in the set, 0 is returned.
401 + int (*addip) (struct ip_set *set,
402 + const void *data, u_int32_t size,
405 + /* Add IP into set (kernel: iptables ... -j SET set src|dst)
406 + * Return -EEXIST if the address is already in the set,
407 + * and -ERANGE if the address lies outside the set bounds.
408 + * If the address was not already in the set, 0 is returned.
410 + int (*addip_kernel) (struct ip_set *set,
411 + const struct sk_buff * skb,
413 + const u_int32_t *flags,
414 + unsigned char index);
416 + /* remove IP from set (userspace: ipset -D set --entry x)
417 + * Return -EEXIST if the address is NOT in the set,
418 + * and -ERANGE if the address lies outside the set bounds.
419 + * If the address really was in the set, 0 is returned.
421 + int (*delip) (struct ip_set *set,
422 + const void *data, u_int32_t size,
425 + /* remove IP from set (kernel: iptables ... -j SET --entry x)
426 + * Return -EEXIST if the address is NOT in the set,
427 + * and -ERANGE if the address lies outside the set bounds.
428 + * If the address really was in the set, 0 is returned.
430 + int (*delip_kernel) (struct ip_set *set,
431 + const struct sk_buff * skb,
433 + const u_int32_t *flags,
434 + unsigned char index);
436 + /* new set creation - allocated type specific items
438 + int (*create) (struct ip_set *set,
439 + const void *data, u_int32_t size);
441 + /* retry the operation after successfully tweaking the set
443 + int (*retry) (struct ip_set *set);
445 + /* set destruction - free type specific items
446 + * There is no return value.
447 + * Can be called only when child sets are destroyed.
449 + void (*destroy) (struct ip_set *set);
451 + /* set flushing - reset all bits in the set, or something similar.
452 + * There is no return value.
454 + void (*flush) (struct ip_set *set);
456 + /* Listing: size needed for header
458 + u_int32_t header_size;
460 + /* Listing: Get the header
462 + * Fill in the information in "data".
463 + * This function is always run after list_header_size() under a
464 + * writelock on the set. Therefor is the length of "data" always
467 + void (*list_header) (const struct ip_set *set,
470 + /* Listing: Get the size for the set members
472 + int (*list_members_size) (const struct ip_set *set);
474 + /* Listing: Get the set members
476 + * Fill in the information in "data".
477 + * This function is always run after list_member_size() under a
478 + * writelock on the set. Therefor is the length of "data" always
481 + void (*list_members) (const struct ip_set *set,
484 + char typename[IP_SET_MAXNAMELEN];
485 + unsigned char features;
486 + int protocol_version;
488 + /* Set this to THIS_MODULE if you are a module, otherwise NULL */
492 +extern int ip_set_register_set_type(struct ip_set_type *set_type);
493 +extern void ip_set_unregister_set_type(struct ip_set_type *set_type);
495 +/* A generic ipset */
497 + char name[IP_SET_MAXNAMELEN]; /* the name of the set */
498 + rwlock_t lock; /* lock for concurrency control */
499 + ip_set_id_t id; /* set id for swapping */
500 + ip_set_id_t binding; /* default binding for the set */
501 + atomic_t ref; /* in kernel and in hash references */
502 + struct ip_set_type *type; /* the set types */
503 + void *data; /* pooltype specific data */
506 +/* Structure to bind set elements to sets */
507 +struct ip_set_hash {
508 + struct list_head list; /* list of clashing entries in hash */
509 + ip_set_ip_t ip; /* ip from set */
510 + ip_set_id_t id; /* set id */
511 + ip_set_id_t binding; /* set we bind the element to */
514 +/* register and unregister set references */
515 +extern ip_set_id_t ip_set_get_byname(const char name[IP_SET_MAXNAMELEN]);
516 +extern ip_set_id_t ip_set_get_byindex(ip_set_id_t index);
517 +extern void ip_set_put_byindex(ip_set_id_t index);
518 +extern ip_set_id_t ip_set_id(ip_set_id_t index);
519 +extern ip_set_id_t __ip_set_get_byname(const char name[IP_SET_MAXNAMELEN],
520 + struct ip_set **set);
521 +extern void __ip_set_put_byindex(ip_set_id_t index);
523 +/* API for iptables set match, and SET target */
524 +extern int ip_set_addip_kernel(ip_set_id_t id,
525 + const struct sk_buff *skb,
526 + const u_int32_t *flags);
527 +extern int ip_set_delip_kernel(ip_set_id_t id,
528 + const struct sk_buff *skb,
529 + const u_int32_t *flags);
530 +extern int ip_set_testip_kernel(ip_set_id_t id,
531 + const struct sk_buff *skb,
532 + const u_int32_t *flags);
534 +/* Macros to generate functions */
536 +#define STRUCT(pre, type) CONCAT2(pre, type)
537 +#define CONCAT2(pre, type) struct pre##type
539 +#define FNAME(pre, mid, post) CONCAT3(pre, mid, post)
540 +#define CONCAT3(pre, mid, post) pre##mid##post
542 +#define UADT0(type, adt, args...) \
544 +FNAME(type,_u,adt)(struct ip_set *set, const void *data, u_int32_t size,\
545 + ip_set_ip_t *hash_ip) \
547 + const STRUCT(ip_set_req_,type) *req = data; \
549 + return FNAME(type,_,adt)(set, hash_ip , ## args); \
552 +#define UADT(type, adt, args...) \
553 + UADT0(type, adt, req->ip , ## args)
555 +#define KADT(type, adt, getfn, args...) \
557 +FNAME(type,_k,adt)(struct ip_set *set, \
558 + const struct sk_buff *skb, \
559 + ip_set_ip_t *hash_ip, \
560 + const u_int32_t *flags, \
561 + unsigned char index) \
563 + ip_set_ip_t ip = getfn(skb, flags[index]); \
566 + return FNAME(type,_,adt)(set, hash_ip, ip , ##args); \
569 +#define REGISTER_MODULE(type) \
570 +static int __init ip_set_##type##_init(void) \
572 + init_max_page_size(); \
573 + return ip_set_register_set_type(&ip_set_##type); \
576 +static void __exit ip_set_##type##_fini(void) \
578 + /* FIXME: possible race with ip_set_create() */ \
579 + ip_set_unregister_set_type(&ip_set_##type); \
582 +module_init(ip_set_##type##_init); \
583 +module_exit(ip_set_##type##_fini);
585 +/* Common functions */
587 +static inline ip_set_ip_t
588 +ipaddr(const struct sk_buff *skb, u_int32_t flag)
590 + return ntohl(flag & IPSET_SRC ? ip_hdr(skb)->saddr : ip_hdr(skb)->daddr);
593 +#define jhash_ip(map, i, ip) jhash_1word(ip, *(map->initval + i))
595 +#define pack_ip_port(map, ip, port) \
596 + (port + ((ip - ((map)->first_ip)) << 16))
598 +#endif /* __KERNEL__ */
600 +#endif /*_IP_SET_H*/
602 +++ b/include/linux/netfilter_ipv4/ip_set_bitmaps.h
604 +#ifndef __IP_SET_BITMAPS_H
605 +#define __IP_SET_BITMAPS_H
607 +/* Macros to generate functions */
610 +#define BITMAP_CREATE(type) \
612 +type##_create(struct ip_set *set, const void *data, u_int32_t size) \
615 + const struct ip_set_req_##type##_create *req = data; \
616 + struct ip_set_##type *map; \
618 + if (req->from > req->to) { \
623 + map = kmalloc(sizeof(struct ip_set_##type), GFP_KERNEL); \
625 + DP("out of memory for %zu bytes", \
626 + sizeof(struct ip_set_##type)); \
629 + map->first_ip = req->from; \
630 + map->last_ip = req->to; \
632 + newbytes = __##type##_create(req, map); \
633 + if (newbytes < 0) { \
638 + map->size = newbytes; \
639 + map->members = ip_set_malloc(newbytes); \
640 + if (!map->members) { \
641 + DP("out of memory for %i bytes", newbytes); \
645 + memset(map->members, 0, newbytes); \
651 +#define BITMAP_DESTROY(type) \
653 +type##_destroy(struct ip_set *set) \
655 + struct ip_set_##type *map = set->data; \
657 + ip_set_free(map->members, map->size); \
660 + set->data = NULL; \
663 +#define BITMAP_FLUSH(type) \
665 +type##_flush(struct ip_set *set) \
667 + struct ip_set_##type *map = set->data; \
668 + memset(map->members, 0, map->size); \
671 +#define BITMAP_LIST_HEADER(type) \
673 +type##_list_header(const struct ip_set *set, void *data) \
675 + const struct ip_set_##type *map = set->data; \
676 + struct ip_set_req_##type##_create *header = data; \
678 + header->from = map->first_ip; \
679 + header->to = map->last_ip; \
680 + __##type##_list_header(map, header); \
683 +#define BITMAP_LIST_MEMBERS_SIZE(type) \
685 +type##_list_members_size(const struct ip_set *set) \
687 + const struct ip_set_##type *map = set->data; \
689 + return map->size; \
692 +#define BITMAP_LIST_MEMBERS(type) \
694 +type##_list_members(const struct ip_set *set, void *data) \
696 + const struct ip_set_##type *map = set->data; \
698 + memcpy(data, map->members, map->size); \
701 +#define IP_SET_TYPE(type, __features) \
702 +struct ip_set_type ip_set_##type = { \
703 + .typename = #type, \
704 + .features = __features, \
705 + .protocol_version = IP_SET_PROTOCOL_VERSION, \
706 + .create = &type##_create, \
707 + .destroy = &type##_destroy, \
708 + .flush = &type##_flush, \
709 + .reqsize = sizeof(struct ip_set_req_##type), \
710 + .addip = &type##_uadd, \
711 + .addip_kernel = &type##_kadd, \
712 + .delip = &type##_udel, \
713 + .delip_kernel = &type##_kdel, \
714 + .testip = &type##_utest, \
715 + .testip_kernel = &type##_ktest, \
716 + .header_size = sizeof(struct ip_set_req_##type##_create),\
717 + .list_header = &type##_list_header, \
718 + .list_members_size = &type##_list_members_size, \
719 + .list_members = &type##_list_members, \
720 + .me = THIS_MODULE, \
722 +#endif /* __KERNEL */
724 +#endif /* __IP_SET_BITMAPS_H */
726 +++ b/include/linux/netfilter_ipv4/ip_set_compat.h
728 +#ifndef _IP_SET_COMPAT_H
729 +#define _IP_SET_COMPAT_H
732 +#include <linux/version.h>
736 +#define __MOD_INC(foo) __MOD_INC_USE_COUNT(foo)
737 +#define __MOD_DEC(foo) __MOD_DEC_USE_COUNT(foo)
739 +#define __MOD_INC(foo) 1
740 +#define __MOD_DEC(foo)
743 +/* Backward compatibility */
751 +/* Compatibility glue code */
752 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
753 +#include <linux/interrupt.h>
754 +#define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED
755 +#define try_module_get(x) __MOD_INC(x)
756 +#define module_put(x) __MOD_DEC(x)
757 +#define __clear_bit(nr, addr) clear_bit(nr, addr)
758 +#define __set_bit(nr, addr) set_bit(nr, addr)
759 +#define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr)
760 +#define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr)
762 +typedef unsigned __bitwise__ gfp_t;
764 +static inline void *kzalloc(size_t size, gfp_t flags)
766 + void *data = kmalloc(size, flags);
769 + memset(data, 0, size);
775 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
776 +#define __KMEM_CACHE_T__ kmem_cache_t
778 +#define __KMEM_CACHE_T__ struct kmem_cache
781 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
782 +#define ip_hdr(skb) ((skb)->nh.iph)
783 +#define skb_mac_header(skb) ((skb)->mac.raw)
784 +#define eth_hdr(skb) ((struct ethhdr *)skb_mac_header(skb))
787 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
788 +#include <linux/netfilter.h>
789 +#define KMEM_CACHE_CREATE(name, size) \
790 + kmem_cache_create(name, size, 0, 0, NULL, NULL)
792 +#define KMEM_CACHE_CREATE(name, size) \
793 + kmem_cache_create(name, size, 0, 0, NULL)
797 +#endif /* __KERNEL__ */
798 +#endif /* _IP_SET_COMPAT_H */
800 +++ b/include/linux/netfilter_ipv4/ip_set_getport.h
802 +#ifndef _IP_SET_GETPORT_H
803 +#define _IP_SET_GETPORT_H
807 +#define INVALID_PORT (MAX_RANGE + 1)
809 +/* We must handle non-linear skbs */
810 +static inline ip_set_ip_t
811 +get_port(const struct sk_buff *skb, u_int32_t flags)
813 + struct iphdr *iph = ip_hdr(skb);
814 + u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
815 + switch (iph->protocol) {
816 + case IPPROTO_TCP: {
817 + struct tcphdr tcph;
819 + /* See comments at tcp_match in ip_tables.c */
821 + return INVALID_PORT;
823 + if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &tcph, sizeof(tcph)) < 0)
824 + /* No choice either */
825 + return INVALID_PORT;
827 + return ntohs(flags & IPSET_SRC ?
828 + tcph.source : tcph.dest);
830 + case IPPROTO_UDP: {
831 + struct udphdr udph;
834 + return INVALID_PORT;
836 + if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &udph, sizeof(udph)) < 0)
837 + /* No choice either */
838 + return INVALID_PORT;
840 + return ntohs(flags & IPSET_SRC ?
841 + udph.source : udph.dest);
844 + return INVALID_PORT;
847 +#endif /* __KERNEL__ */
849 +#endif /*_IP_SET_GETPORT_H*/
851 +++ b/include/linux/netfilter_ipv4/ip_set_hashes.h
853 +#ifndef __IP_SET_HASHES_H
854 +#define __IP_SET_HASHES_H
856 +#define initval_t uint32_t
858 +/* Macros to generate functions */
861 +#define HASH_RETRY0(type, dtype, cond) \
863 +type##_retry(struct ip_set *set) \
865 + struct ip_set_##type *map = set->data, *tmp; \
868 + u_int32_t i, hashsize = map->hashsize; \
871 + if (map->resize == 0) \
877 + /* Calculate new hash size */ \
878 + hashsize += (hashsize * map->resize)/100; \
879 + if (hashsize == map->hashsize) \
882 + ip_set_printk("rehashing of set %s triggered: " \
883 + "hashsize grows from %lu to %lu", \
885 + (long unsigned)map->hashsize, \
886 + (long unsigned)hashsize); \
888 + tmp = kmalloc(sizeof(struct ip_set_##type) \
889 + + map->probes * sizeof(initval_t), GFP_ATOMIC); \
891 + DP("out of memory for %zu bytes", \
892 + sizeof(struct ip_set_##type) \
893 + + map->probes * sizeof(initval_t)); \
896 + tmp->members = harray_malloc(hashsize, sizeof(dtype), GFP_ATOMIC);\
897 + if (!tmp->members) { \
898 + DP("out of memory for %zu bytes", hashsize * sizeof(dtype));\
902 + tmp->hashsize = hashsize; \
903 + tmp->elements = 0; \
904 + tmp->probes = map->probes; \
905 + tmp->resize = map->resize; \
906 + memcpy(tmp->initval, map->initval, map->probes * sizeof(initval_t));\
907 + __##type##_retry(tmp, map); \
909 + write_lock_bh(&set->lock); \
910 + map = set->data; /* Play safe */ \
911 + for (i = 0; i < map->hashsize && res == 0; i++) { \
912 + elem = HARRAY_ELEM(map->members, dtype *, i); \
914 + res = __##type##_add(tmp, elem); \
917 + /* Failure, try again */ \
918 + write_unlock_bh(&set->lock); \
919 + harray_free(tmp->members); \
924 + /* Success at resizing! */ \
925 + members = map->members; \
927 + map->hashsize = tmp->hashsize; \
928 + map->members = tmp->members; \
929 + write_unlock_bh(&set->lock); \
931 + harray_free(members); \
937 +#define HASH_RETRY(type, dtype) \
938 + HASH_RETRY0(type, dtype, *elem)
940 +#define HASH_RETRY2(type, dtype) \
941 + HASH_RETRY0(type, dtype, elem->ip || elem->ip1)
943 +#define HASH_CREATE(type, dtype) \
945 +type##_create(struct ip_set *set, const void *data, u_int32_t size) \
947 + const struct ip_set_req_##type##_create *req = data; \
948 + struct ip_set_##type *map; \
951 + if (req->hashsize < 1) { \
952 + ip_set_printk("hashsize too small"); \
956 + if (req->probes < 1) { \
957 + ip_set_printk("probes too small"); \
961 + map = kmalloc(sizeof(struct ip_set_##type) \
962 + + req->probes * sizeof(initval_t), GFP_KERNEL); \
964 + DP("out of memory for %zu bytes", \
965 + sizeof(struct ip_set_##type) \
966 + + req->probes * sizeof(initval_t)); \
969 + for (i = 0; i < req->probes; i++) \
970 + get_random_bytes(((initval_t *) map->initval)+i, 4); \
971 + map->elements = 0; \
972 + map->hashsize = req->hashsize; \
973 + map->probes = req->probes; \
974 + map->resize = req->resize; \
975 + if (__##type##_create(req, map)) { \
979 + map->members = harray_malloc(map->hashsize, sizeof(dtype), GFP_KERNEL);\
980 + if (!map->members) { \
981 + DP("out of memory for %zu bytes", map->hashsize * sizeof(dtype));\
990 +#define HASH_DESTROY(type) \
992 +type##_destroy(struct ip_set *set) \
994 + struct ip_set_##type *map = set->data; \
996 + harray_free(map->members); \
999 + set->data = NULL; \
1002 +#define HASH_FLUSH(type, dtype) \
1004 +type##_flush(struct ip_set *set) \
1006 + struct ip_set_##type *map = set->data; \
1007 + harray_flush(map->members, map->hashsize, sizeof(dtype)); \
1008 + map->elements = 0; \
1011 +#define HASH_FLUSH_CIDR(type, dtype) \
1013 +type##_flush(struct ip_set *set) \
1015 + struct ip_set_##type *map = set->data; \
1016 + harray_flush(map->members, map->hashsize, sizeof(dtype)); \
1017 + memset(map->cidr, 0, sizeof(map->cidr)); \
1018 + memset(map->nets, 0, sizeof(map->nets)); \
1019 + map->elements = 0; \
1022 +#define HASH_LIST_HEADER(type) \
1024 +type##_list_header(const struct ip_set *set, void *data) \
1026 + const struct ip_set_##type *map = set->data; \
1027 + struct ip_set_req_##type##_create *header = data; \
1029 + header->hashsize = map->hashsize; \
1030 + header->probes = map->probes; \
1031 + header->resize = map->resize; \
1032 + __##type##_list_header(map, header); \
1035 +#define HASH_LIST_MEMBERS_SIZE(type, dtype) \
1037 +type##_list_members_size(const struct ip_set *set) \
1039 + const struct ip_set_##type *map = set->data; \
1041 + return (map->hashsize * sizeof(dtype)); \
1044 +#define HASH_LIST_MEMBERS(type, dtype) \
1046 +type##_list_members(const struct ip_set *set, void *data) \
1048 + const struct ip_set_##type *map = set->data; \
1052 + for (i = 0; i < map->hashsize; i++) { \
1053 + elem = HARRAY_ELEM(map->members, dtype *, i); \
1054 + ((dtype *)data)[i] = *elem; \
1058 +#define HASH_LIST_MEMBERS_MEMCPY(type, dtype) \
1060 +type##_list_members(const struct ip_set *set, void *data) \
1062 + const struct ip_set_##type *map = set->data; \
1066 + for (i = 0; i < map->hashsize; i++) { \
1067 + elem = HARRAY_ELEM(map->members, dtype *, i); \
1068 + memcpy((((dtype *)data)+i), elem, sizeof(dtype)); \
1072 +#define IP_SET_RTYPE(type, __features) \
1073 +struct ip_set_type ip_set_##type = { \
1074 + .typename = #type, \
1075 + .features = __features, \
1076 + .protocol_version = IP_SET_PROTOCOL_VERSION, \
1077 + .create = &type##_create, \
1078 + .retry = &type##_retry, \
1079 + .destroy = &type##_destroy, \
1080 + .flush = &type##_flush, \
1081 + .reqsize = sizeof(struct ip_set_req_##type), \
1082 + .addip = &type##_uadd, \
1083 + .addip_kernel = &type##_kadd, \
1084 + .delip = &type##_udel, \
1085 + .delip_kernel = &type##_kdel, \
1086 + .testip = &type##_utest, \
1087 + .testip_kernel = &type##_ktest, \
1088 + .header_size = sizeof(struct ip_set_req_##type##_create),\
1089 + .list_header = &type##_list_header, \
1090 + .list_members_size = &type##_list_members_size, \
1091 + .list_members = &type##_list_members, \
1092 + .me = THIS_MODULE, \
1095 +/* Helper functions */
1097 +add_cidr_size(uint8_t *cidr, uint8_t size)
1102 + for (i = 0; i < 30 && cidr[i]; i++) {
1103 + if (cidr[i] < size) {
1114 +del_cidr_size(uint8_t *cidr, uint8_t size)
1118 + for (i = 0; i < 29 && cidr[i]; i++) {
1119 + if (cidr[i] == size)
1120 + cidr[i] = size = cidr[i+1];
1125 +#include <arpa/inet.h>
1126 +#endif /* __KERNEL */
1129 +#define UINT16_MAX 65535
1132 +static unsigned char shifts[] = {255, 253, 249, 241, 225, 193, 129, 1};
1134 +static inline ip_set_ip_t
1135 +pack_ip_cidr(ip_set_ip_t ip, unsigned char cidr)
1137 + ip_set_ip_t addr, *paddr = &addr;
1138 + unsigned char n, t, *a;
1140 + addr = htonl(ip & (0xFFFFFFFF << (32 - (cidr))));
1142 + DP("ip:%u.%u.%u.%u/%u", NIPQUAD(addr), cidr);
1146 + a = &((unsigned char *)paddr)[n];
1147 + *a = *a /(1 << (8 - t)) + shifts[t];
1149 + DP("n: %u, t: %u, a: %u", n, t, *a);
1150 + DP("ip:%u.%u.%u.%u/%u, %u.%u.%u.%u",
1151 + HIPQUAD(ip), cidr, NIPQUAD(addr));
1154 + return ntohl(addr);
1158 +#endif /* __IP_SET_HASHES_H */
1160 +++ b/include/linux/netfilter_ipv4/ip_set_iphash.h
1162 +#ifndef __IP_SET_IPHASH_H
1163 +#define __IP_SET_IPHASH_H
1165 +#include <linux/netfilter_ipv4/ip_set.h>
1166 +#include <linux/netfilter_ipv4/ip_set_hashes.h>
1168 +#define SETTYPE_NAME "iphash"
1170 +struct ip_set_iphash {
1171 + ip_set_ip_t *members; /* the iphash proper */
1172 + uint32_t elements; /* number of elements */
1173 + uint32_t hashsize; /* hash size */
1174 + uint16_t probes; /* max number of probes */
1175 + uint16_t resize; /* resize factor in percent */
1176 + ip_set_ip_t netmask; /* netmask */
1177 + initval_t initval[0]; /* initvals for jhash_1word */
1180 +struct ip_set_req_iphash_create {
1181 + uint32_t hashsize;
1184 + ip_set_ip_t netmask;
1187 +struct ip_set_req_iphash {
1191 +#endif /* __IP_SET_IPHASH_H */
1193 +++ b/include/linux/netfilter_ipv4/ip_set_ipmap.h
1195 +#ifndef __IP_SET_IPMAP_H
1196 +#define __IP_SET_IPMAP_H
1198 +#include <linux/netfilter_ipv4/ip_set.h>
1199 +#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
1201 +#define SETTYPE_NAME "ipmap"
1203 +struct ip_set_ipmap {
1204 + void *members; /* the ipmap proper */
1205 + ip_set_ip_t first_ip; /* host byte order, included in range */
1206 + ip_set_ip_t last_ip; /* host byte order, included in range */
1207 + ip_set_ip_t netmask; /* subnet netmask */
1208 + ip_set_ip_t sizeid; /* size of set in IPs */
1209 + ip_set_ip_t hosts; /* number of hosts in a subnet */
1210 + u_int32_t size; /* size of the ipmap proper */
1213 +struct ip_set_req_ipmap_create {
1216 + ip_set_ip_t netmask;
1219 +struct ip_set_req_ipmap {
1223 +static inline unsigned int
1224 +mask_to_bits(ip_set_ip_t mask)
1226 + unsigned int bits = 32;
1227 + ip_set_ip_t maskaddr;
1229 + if (mask == 0xFFFFFFFF)
1232 + maskaddr = 0xFFFFFFFE;
1233 + while (--bits > 0 && maskaddr != mask)
1239 +static inline ip_set_ip_t
1240 +range_to_mask(ip_set_ip_t from, ip_set_ip_t to, unsigned int *bits)
1242 + ip_set_ip_t mask = 0xFFFFFFFE;
1245 + while (--(*bits) > 0 && mask && (to & mask) != from)
1251 +#endif /* __IP_SET_IPMAP_H */
1253 +++ b/include/linux/netfilter_ipv4/ip_set_ipporthash.h
1255 +#ifndef __IP_SET_IPPORTHASH_H
1256 +#define __IP_SET_IPPORTHASH_H
1258 +#include <linux/netfilter_ipv4/ip_set.h>
1259 +#include <linux/netfilter_ipv4/ip_set_hashes.h>
1261 +#define SETTYPE_NAME "ipporthash"
1263 +struct ip_set_ipporthash {
1264 + ip_set_ip_t *members; /* the ipporthash proper */
1265 + uint32_t elements; /* number of elements */
1266 + uint32_t hashsize; /* hash size */
1267 + uint16_t probes; /* max number of probes */
1268 + uint16_t resize; /* resize factor in percent */
1269 + ip_set_ip_t first_ip; /* host byte order, included in range */
1270 + ip_set_ip_t last_ip; /* host byte order, included in range */
1271 + initval_t initval[0]; /* initvals for jhash_1word */
1274 +struct ip_set_req_ipporthash_create {
1275 + uint32_t hashsize;
1282 +struct ip_set_req_ipporthash {
1287 +#endif /* __IP_SET_IPPORTHASH_H */
1289 +++ b/include/linux/netfilter_ipv4/ip_set_ipportiphash.h
1291 +#ifndef __IP_SET_IPPORTIPHASH_H
1292 +#define __IP_SET_IPPORTIPHASH_H
1294 +#include <linux/netfilter_ipv4/ip_set.h>
1295 +#include <linux/netfilter_ipv4/ip_set_hashes.h>
1297 +#define SETTYPE_NAME "ipportiphash"
1304 +struct ip_set_ipportiphash {
1305 + struct ipportip *members; /* the ipportip proper */
1306 + uint32_t elements; /* number of elements */
1307 + uint32_t hashsize; /* hash size */
1308 + uint16_t probes; /* max number of probes */
1309 + uint16_t resize; /* resize factor in percent */
1310 + ip_set_ip_t first_ip; /* host byte order, included in range */
1311 + ip_set_ip_t last_ip; /* host byte order, included in range */
1312 + initval_t initval[0]; /* initvals for jhash_1word */
1315 +struct ip_set_req_ipportiphash_create {
1316 + uint32_t hashsize;
1323 +struct ip_set_req_ipportiphash {
1329 +#endif /* __IP_SET_IPPORTIPHASH_H */
1331 +++ b/include/linux/netfilter_ipv4/ip_set_ipportnethash.h
1333 +#ifndef __IP_SET_IPPORTNETHASH_H
1334 +#define __IP_SET_IPPORTNETHASH_H
1336 +#include <linux/netfilter_ipv4/ip_set.h>
1337 +#include <linux/netfilter_ipv4/ip_set_hashes.h>
1339 +#define SETTYPE_NAME "ipportnethash"
1346 +struct ip_set_ipportnethash {
1347 + struct ipportip *members; /* the ipportip proper */
1348 + uint32_t elements; /* number of elements */
1349 + uint32_t hashsize; /* hash size */
1350 + uint16_t probes; /* max number of probes */
1351 + uint16_t resize; /* resize factor in percent */
1352 + ip_set_ip_t first_ip; /* host byte order, included in range */
1353 + ip_set_ip_t last_ip; /* host byte order, included in range */
1354 + uint8_t cidr[30]; /* CIDR sizes */
1355 + uint16_t nets[30]; /* nr of nets by CIDR sizes */
1356 + initval_t initval[0]; /* initvals for jhash_1word */
1359 +struct ip_set_req_ipportnethash_create {
1360 + uint32_t hashsize;
1367 +struct ip_set_req_ipportnethash {
1374 +#endif /* __IP_SET_IPPORTNETHASH_H */
1376 +++ b/include/linux/netfilter_ipv4/ip_set_iptree.h
1378 +#ifndef __IP_SET_IPTREE_H
1379 +#define __IP_SET_IPTREE_H
1381 +#include <linux/netfilter_ipv4/ip_set.h>
1383 +#define SETTYPE_NAME "iptree"
1385 +struct ip_set_iptreed {
1386 + unsigned long expires[256]; /* x.x.x.ADDR */
1389 +struct ip_set_iptreec {
1390 + struct ip_set_iptreed *tree[256]; /* x.x.ADDR.* */
1393 +struct ip_set_iptreeb {
1394 + struct ip_set_iptreec *tree[256]; /* x.ADDR.*.* */
1397 +struct ip_set_iptree {
1398 + unsigned int timeout;
1399 + unsigned int gc_interval;
1401 + uint32_t elements; /* number of elements */
1402 + struct timer_list gc;
1403 + struct ip_set_iptreeb *tree[256]; /* ADDR.*.*.* */
1407 +struct ip_set_req_iptree_create {
1408 + unsigned int timeout;
1411 +struct ip_set_req_iptree {
1413 + unsigned int timeout;
1416 +#endif /* __IP_SET_IPTREE_H */
1418 +++ b/include/linux/netfilter_ipv4/ip_set_iptreemap.h
1420 +#ifndef __IP_SET_IPTREEMAP_H
1421 +#define __IP_SET_IPTREEMAP_H
1423 +#include <linux/netfilter_ipv4/ip_set.h>
1425 +#define SETTYPE_NAME "iptreemap"
1428 +struct ip_set_iptreemap_d {
1429 + unsigned char bitmap[32]; /* x.x.x.y */
1432 +struct ip_set_iptreemap_c {
1433 + struct ip_set_iptreemap_d *tree[256]; /* x.x.y.x */
1436 +struct ip_set_iptreemap_b {
1437 + struct ip_set_iptreemap_c *tree[256]; /* x.y.x.x */
1438 + unsigned char dirty[32];
1442 +struct ip_set_iptreemap {
1443 + unsigned int gc_interval;
1445 + struct timer_list gc;
1446 + struct ip_set_iptreemap_b *tree[256]; /* y.x.x.x */
1450 +struct ip_set_req_iptreemap_create {
1451 + unsigned int gc_interval;
1454 +struct ip_set_req_iptreemap {
1459 +#endif /* __IP_SET_IPTREEMAP_H */
1461 +++ b/include/linux/netfilter_ipv4/ip_set_jhash.h
1463 +#ifndef _LINUX_JHASH_H
1464 +#define _LINUX_JHASH_H
1466 +/* jhash.h: Jenkins hash support.
1468 + * Copyright (C) 2006. Bob Jenkins (bob_jenkins@burtleburtle.net)
1470 + * http://burtleburtle.net/bob/hash/
1472 + * These are the credits from Bob's sources:
1474 + * lookup3.c, by Bob Jenkins, May 2006, Public Domain.
1476 + * These are functions for producing 32-bit hashes for hash table lookup.
1477 + * hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final()
1478 + * are externally useful functions. Routines to test the hash are included
1479 + * if SELF_TEST is defined. You can use this free for any purpose. It's in
1480 + * the public domain. It has no warranty.
1482 + * Copyright (C) 2009 Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
1484 + * I've modified Bob's hash to be useful in the Linux kernel, and
1485 + * any bugs present are my fault. Jozsef
1488 +#define __rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
1490 +/* __jhash_mix - mix 3 32-bit values reversibly. */
1491 +#define __jhash_mix(a,b,c) \
1493 + a -= c; a ^= __rot(c, 4); c += b; \
1494 + b -= a; b ^= __rot(a, 6); a += c; \
1495 + c -= b; c ^= __rot(b, 8); b += a; \
1496 + a -= c; a ^= __rot(c,16); c += b; \
1497 + b -= a; b ^= __rot(a,19); a += c; \
1498 + c -= b; c ^= __rot(b, 4); b += a; \
1501 +/* __jhash_final - final mixing of 3 32-bit values (a,b,c) into c */
1502 +#define __jhash_final(a,b,c) \
1504 + c ^= b; c -= __rot(b,14); \
1505 + a ^= c; a -= __rot(c,11); \
1506 + b ^= a; b -= __rot(a,25); \
1507 + c ^= b; c -= __rot(b,16); \
1508 + a ^= c; a -= __rot(c,4); \
1509 + b ^= a; b -= __rot(a,14); \
1510 + c ^= b; c -= __rot(b,24); \
1513 +/* The golden ration: an arbitrary value */
1514 +#define JHASH_GOLDEN_RATIO 0xdeadbeef
1516 +/* The most generic version, hashes an arbitrary sequence
1517 + * of bytes. No alignment or length assumptions are made about
1518 + * the input key. The result depends on endianness.
1520 +static inline u32 jhash(const void *key, u32 length, u32 initval)
1523 + const u8 *k = key;
1525 + /* Set up the internal state */
1526 + a = b = c = JHASH_GOLDEN_RATIO + length + initval;
1528 + /* all but the last block: affect some 32 bits of (a,b,c) */
1529 + while (length > 12) {
1530 + a += (k[0] + ((u32)k[1]<<8) + ((u32)k[2]<<16) + ((u32)k[3]<<24));
1531 + b += (k[4] + ((u32)k[5]<<8) + ((u32)k[6]<<16) + ((u32)k[7]<<24));
1532 + c += (k[8] + ((u32)k[9]<<8) + ((u32)k[10]<<16) + ((u32)k[11]<<24));
1533 + __jhash_mix(a, b, c);
1538 + /* last block: affect all 32 bits of (c) */
1539 + /* all the case statements fall through */
1541 + case 12: c += (u32)k[11]<<24;
1542 + case 11: c += (u32)k[10]<<16;
1543 + case 10: c += (u32)k[9]<<8;
1544 + case 9 : c += k[8];
1545 + case 8 : b += (u32)k[7]<<24;
1546 + case 7 : b += (u32)k[6]<<16;
1547 + case 6 : b += (u32)k[5]<<8;
1548 + case 5 : b += k[4];
1549 + case 4 : a += (u32)k[3]<<24;
1550 + case 3 : a += (u32)k[2]<<16;
1551 + case 2 : a += (u32)k[1]<<8;
1552 + case 1 : a += k[0];
1553 + __jhash_final(a, b, c);
1561 +/* A special optimized version that handles 1 or more of u32s.
1562 + * The length parameter here is the number of u32s in the key.
1564 +static inline u32 jhash2(const u32 *k, u32 length, u32 initval)
1568 + /* Set up the internal state */
1569 + a = b = c = JHASH_GOLDEN_RATIO + (length<<2) + initval;
1571 + /* handle most of the key */
1572 + while (length > 3) {
1576 + __jhash_mix(a, b, c);
1581 + /* handle the last 3 u32's */
1582 + /* all the case statements fall through */
1584 + case 3: c += k[2];
1585 + case 2: b += k[1];
1586 + case 1: a += k[0];
1587 + __jhash_final(a, b, c);
1588 + case 0: /* case 0: nothing left to add */
1595 +/* A special ultra-optimized versions that knows they are hashing exactly
1596 + * 3, 2 or 1 word(s).
1598 +static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
1600 + a += JHASH_GOLDEN_RATIO + initval;
1601 + b += JHASH_GOLDEN_RATIO + initval;
1602 + c += JHASH_GOLDEN_RATIO + initval;
1604 + __jhash_final(a, b, c);
1609 +static inline u32 jhash_2words(u32 a, u32 b, u32 initval)
1611 + return jhash_3words(0, a, b, initval);
1614 +static inline u32 jhash_1word(u32 a, u32 initval)
1616 + return jhash_3words(0, 0, a, initval);
1619 +#endif /* _LINUX_JHASH_H */
1621 +++ b/include/linux/netfilter_ipv4/ip_set_macipmap.h
1623 +#ifndef __IP_SET_MACIPMAP_H
1624 +#define __IP_SET_MACIPMAP_H
1626 +#include <linux/netfilter_ipv4/ip_set.h>
1627 +#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
1629 +#define SETTYPE_NAME "macipmap"
1631 +/* general flags */
1632 +#define IPSET_MACIP_MATCHUNSET 1
1635 +#define IPSET_MACIP_ISSET 1
1637 +struct ip_set_macipmap {
1638 + void *members; /* the macipmap proper */
1639 + ip_set_ip_t first_ip; /* host byte order, included in range */
1640 + ip_set_ip_t last_ip; /* host byte order, included in range */
1642 + u_int32_t size; /* size of the ipmap proper */
1645 +struct ip_set_req_macipmap_create {
1651 +struct ip_set_req_macipmap {
1653 + unsigned char ethernet[ETH_ALEN];
1656 +struct ip_set_macip {
1657 + unsigned short match;
1658 + unsigned char ethernet[ETH_ALEN];
1661 +#endif /* __IP_SET_MACIPMAP_H */
1663 +++ b/include/linux/netfilter_ipv4/ip_set_malloc.h
1665 +#ifndef _IP_SET_MALLOC_H
1666 +#define _IP_SET_MALLOC_H
1669 +#include <linux/vmalloc.h>
1671 +static size_t max_malloc_size = 0, max_page_size = 0;
1672 +static size_t default_max_malloc_size = 131072; /* Guaranteed: slab.c */
1674 +static inline int init_max_page_size(void)
1676 +/* Compatibility glues to support 2.4.36 */
1677 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
1678 +#define __GFP_NOWARN 0
1680 + /* Guaranteed: slab.c */
1681 + max_malloc_size = max_page_size = default_max_malloc_size;
1683 + size_t page_size = 0;
1685 +#define CACHE(x) if (max_page_size == 0 || x < max_page_size) \
1687 +#include <linux/kmalloc_sizes.h>
1690 + if (max_malloc_size == 0)
1691 + max_malloc_size = page_size;
1693 + max_page_size = page_size;
1702 + size_t max_elements;
1706 +static inline void *
1707 +__harray_malloc(size_t hashsize, size_t typesize, gfp_t flags)
1709 + struct harray *harray;
1710 + size_t max_elements, size, i, j;
1712 + BUG_ON(max_page_size == 0);
1714 + if (typesize > max_page_size)
1717 + max_elements = max_page_size/typesize;
1718 + size = hashsize/max_elements;
1719 + if (hashsize % max_elements)
1722 + /* Last pointer signals end of arrays */
1723 + harray = kmalloc(sizeof(struct harray) + (size + 1) * sizeof(void *),
1729 + for (i = 0; i < size - 1; i++) {
1730 + harray->arrays[i] = kmalloc(max_elements * typesize, flags);
1731 + if (!harray->arrays[i])
1733 + memset(harray->arrays[i], 0, max_elements * typesize);
1735 + harray->arrays[i] = kmalloc((hashsize - i * max_elements) * typesize,
1737 + if (!harray->arrays[i])
1739 + memset(harray->arrays[i], 0, (hashsize - i * max_elements) * typesize);
1741 + harray->max_elements = max_elements;
1742 + harray->arrays[size] = NULL;
1744 + return (void *)harray;
1747 + for (j = 0; j < i; j++) {
1748 + kfree(harray->arrays[j]);
1754 +static inline void *
1755 +harray_malloc(size_t hashsize, size_t typesize, gfp_t flags)
1760 + harray = __harray_malloc(hashsize, typesize, flags|__GFP_NOWARN);
1761 + } while (harray == NULL && init_max_page_size());
1766 +static inline void harray_free(void *h)
1768 + struct harray *harray = (struct harray *) h;
1771 + for (i = 0; harray->arrays[i] != NULL; i++)
1772 + kfree(harray->arrays[i]);
1776 +static inline void harray_flush(void *h, size_t hashsize, size_t typesize)
1778 + struct harray *harray = (struct harray *) h;
1781 + for (i = 0; harray->arrays[i+1] != NULL; i++)
1782 + memset(harray->arrays[i], 0, harray->max_elements * typesize);
1783 + memset(harray->arrays[i], 0,
1784 + (hashsize - i * harray->max_elements) * typesize);
1787 +#define HARRAY_ELEM(h, type, which) \
1789 + struct harray *__h = (struct harray *)(h); \
1790 + ((type)((__h)->arrays[(which)/(__h)->max_elements]) \
1791 + + (which)%(__h)->max_elements); \
1794 +/* General memory allocation and deallocation */
1795 +static inline void * ip_set_malloc(size_t bytes)
1797 + BUG_ON(max_malloc_size == 0);
1799 + if (bytes > default_max_malloc_size)
1800 + return vmalloc(bytes);
1802 + return kmalloc(bytes, GFP_KERNEL | __GFP_NOWARN);
1805 +static inline void ip_set_free(void * data, size_t bytes)
1807 + BUG_ON(max_malloc_size == 0);
1809 + if (bytes > default_max_malloc_size)
1815 +#endif /* __KERNEL__ */
1817 +#endif /*_IP_SET_MALLOC_H*/
1819 +++ b/include/linux/netfilter_ipv4/ip_set_nethash.h
1821 +#ifndef __IP_SET_NETHASH_H
1822 +#define __IP_SET_NETHASH_H
1824 +#include <linux/netfilter_ipv4/ip_set.h>
1825 +#include <linux/netfilter_ipv4/ip_set_hashes.h>
1827 +#define SETTYPE_NAME "nethash"
1829 +struct ip_set_nethash {
1830 + ip_set_ip_t *members; /* the nethash proper */
1831 + uint32_t elements; /* number of elements */
1832 + uint32_t hashsize; /* hash size */
1833 + uint16_t probes; /* max number of probes */
1834 + uint16_t resize; /* resize factor in percent */
1835 + uint8_t cidr[30]; /* CIDR sizes */
1836 + uint16_t nets[30]; /* nr of nets by CIDR sizes */
1837 + initval_t initval[0]; /* initvals for jhash_1word */
1840 +struct ip_set_req_nethash_create {
1841 + uint32_t hashsize;
1846 +struct ip_set_req_nethash {
1851 +#endif /* __IP_SET_NETHASH_H */
1853 +++ b/include/linux/netfilter_ipv4/ip_set_portmap.h
1855 +#ifndef __IP_SET_PORTMAP_H
1856 +#define __IP_SET_PORTMAP_H
1858 +#include <linux/netfilter_ipv4/ip_set.h>
1859 +#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
1861 +#define SETTYPE_NAME "portmap"
1863 +struct ip_set_portmap {
1864 + void *members; /* the portmap proper */
1865 + ip_set_ip_t first_ip; /* host byte order, included in range */
1866 + ip_set_ip_t last_ip; /* host byte order, included in range */
1867 + u_int32_t size; /* size of the ipmap proper */
1870 +struct ip_set_req_portmap_create {
1875 +struct ip_set_req_portmap {
1879 +#endif /* __IP_SET_PORTMAP_H */
1881 +++ b/include/linux/netfilter_ipv4/ip_set_setlist.h
1883 +#ifndef __IP_SET_SETLIST_H
1884 +#define __IP_SET_SETLIST_H
1886 +#include <linux/netfilter_ipv4/ip_set.h>
1888 +#define SETTYPE_NAME "setlist"
1890 +#define IP_SET_SETLIST_ADD_AFTER 0
1891 +#define IP_SET_SETLIST_ADD_BEFORE 1
1893 +struct ip_set_setlist {
1895 + ip_set_id_t index[0];
1898 +struct ip_set_req_setlist_create {
1902 +struct ip_set_req_setlist {
1903 + char name[IP_SET_MAXNAMELEN];
1904 + char ref[IP_SET_MAXNAMELEN];
1908 +#endif /* __IP_SET_SETLIST_H */
1910 +++ b/include/linux/netfilter_ipv4/ipt_set.h
1915 +#include <linux/netfilter_ipv4/ip_set.h>
1917 +struct ipt_set_info {
1918 + ip_set_id_t index;
1919 + u_int32_t flags[IP_SET_MAX_BINDINGS + 1];
1923 +struct ipt_set_info_match {
1924 + struct ipt_set_info match_set;
1927 +struct ipt_set_info_target {
1928 + struct ipt_set_info add_set;
1929 + struct ipt_set_info del_set;
1932 +#endif /*_IPT_SET_H*/
1934 +++ b/net/ipv4/netfilter/ip_set.c
1936 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
1937 + * Patrick Schaaf <bof@bof.de>
1938 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
1940 + * This program is free software; you can redistribute it and/or modify
1941 + * it under the terms of the GNU General Public License version 2 as
1942 + * published by the Free Software Foundation.
1945 +/* Kernel module for IP set management */
1947 +#include <linux/version.h>
1948 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
1949 +#include <linux/config.h>
1951 +#include <linux/module.h>
1952 +#include <linux/moduleparam.h>
1953 +#include <linux/kmod.h>
1954 +#include <linux/ip.h>
1955 +#include <linux/skbuff.h>
1956 +#include <linux/random.h>
1957 +#include <linux/netfilter_ipv4/ip_set_jhash.h>
1958 +#include <linux/errno.h>
1959 +#include <linux/capability.h>
1960 +#include <asm/uaccess.h>
1961 +#include <asm/bitops.h>
1962 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
1963 +#include <asm/semaphore.h>
1965 +#include <linux/semaphore.h>
1967 +#include <linux/spinlock.h>
1969 +#define ASSERT_READ_LOCK(x)
1970 +#define ASSERT_WRITE_LOCK(x)
1971 +#include <linux/netfilter.h>
1972 +#include <linux/netfilter_ipv4/ip_set.h>
1974 +static struct list_head set_type_list; /* all registered sets */
1975 +static struct ip_set **ip_set_list; /* all individual sets */
1976 +static DEFINE_RWLOCK(ip_set_lock); /* protects the lists and the hash */
1977 +static DECLARE_MUTEX(ip_set_app_mutex); /* serializes user access */
1978 +static ip_set_id_t ip_set_max = CONFIG_IP_NF_SET_MAX;
1979 +static ip_set_id_t ip_set_bindings_hash_size = CONFIG_IP_NF_SET_HASHSIZE;
1980 +static struct list_head *ip_set_hash; /* hash of bindings */
1981 +static unsigned int ip_set_hash_random; /* random seed */
1983 +#define SETNAME_EQ(a,b) (strncmp(a,b,IP_SET_MAXNAMELEN) == 0)
1986 + * Sets are identified either by the index in ip_set_list or by id.
1987 + * The id never changes and is used to find a key in the hash.
1988 + * The index may change by swapping and used at all other places
1989 + * (set/SET netfilter modules, binding value, etc.)
1991 + * Userspace requests are serialized by ip_set_mutex and sets can
1992 + * be deleted only from userspace. Therefore ip_set_list locking
1993 + * must obey the following rules:
1995 + * - kernel requests: read and write locking mandatory
1996 + * - user requests: read locking optional, write locking mandatory
2000 +__ip_set_get(ip_set_id_t index)
2002 + atomic_inc(&ip_set_list[index]->ref);
2006 +__ip_set_put(ip_set_id_t index)
2008 + atomic_dec(&ip_set_list[index]->ref);
2012 + * Binding routines
2015 +static inline struct ip_set_hash *
2016 +__ip_set_find(u_int32_t key, ip_set_id_t id, ip_set_ip_t ip)
2018 + struct ip_set_hash *set_hash;
2020 + list_for_each_entry(set_hash, &ip_set_hash[key], list)
2021 + if (set_hash->id == id && set_hash->ip == ip)
2028 +ip_set_find_in_hash(ip_set_id_t id, ip_set_ip_t ip)
2030 + u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
2031 + % ip_set_bindings_hash_size;
2032 + struct ip_set_hash *set_hash;
2034 + ASSERT_READ_LOCK(&ip_set_lock);
2035 + IP_SET_ASSERT(ip_set_list[id]);
2036 + DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
2038 + set_hash = __ip_set_find(key, id, ip);
2040 + DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
2042 + set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
2044 + return (set_hash != NULL ? set_hash->binding : IP_SET_INVALID_ID);
2048 +__set_hash_del(struct ip_set_hash *set_hash)
2050 + ASSERT_WRITE_LOCK(&ip_set_lock);
2051 + IP_SET_ASSERT(ip_set_list[set_hash->binding]);
2053 + __ip_set_put(set_hash->binding);
2054 + list_del(&set_hash->list);
2059 +ip_set_hash_del(ip_set_id_t id, ip_set_ip_t ip)
2061 + u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
2062 + % ip_set_bindings_hash_size;
2063 + struct ip_set_hash *set_hash;
2065 + IP_SET_ASSERT(ip_set_list[id]);
2066 + DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
2067 + write_lock_bh(&ip_set_lock);
2068 + set_hash = __ip_set_find(key, id, ip);
2069 + DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
2071 + set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
2073 + if (set_hash != NULL)
2074 + __set_hash_del(set_hash);
2075 + write_unlock_bh(&ip_set_lock);
2080 +ip_set_hash_add(ip_set_id_t id, ip_set_ip_t ip, ip_set_id_t binding)
2082 + u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
2083 + % ip_set_bindings_hash_size;
2084 + struct ip_set_hash *set_hash;
2087 + IP_SET_ASSERT(ip_set_list[id]);
2088 + IP_SET_ASSERT(ip_set_list[binding]);
2089 + DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
2090 + HIPQUAD(ip), ip_set_list[binding]->name);
2091 + write_lock_bh(&ip_set_lock);
2092 + set_hash = __ip_set_find(key, id, ip);
2094 + set_hash = kmalloc(sizeof(struct ip_set_hash), GFP_ATOMIC);
2099 + INIT_LIST_HEAD(&set_hash->list);
2100 + set_hash->id = id;
2101 + set_hash->ip = ip;
2102 + list_add(&set_hash->list, &ip_set_hash[key]);
2104 + IP_SET_ASSERT(ip_set_list[set_hash->binding]);
2105 + DP("overwrite binding: %s",
2106 + ip_set_list[set_hash->binding]->name);
2107 + __ip_set_put(set_hash->binding);
2109 + set_hash->binding = binding;
2110 + __ip_set_get(set_hash->binding);
2111 + DP("stored: key %u, id %u (%s), ip %u.%u.%u.%u, binding %u (%s)",
2112 + key, id, ip_set_list[id]->name,
2113 + HIPQUAD(ip), binding, ip_set_list[binding]->name);
2115 + write_unlock_bh(&ip_set_lock);
2119 +#define FOREACH_HASH_DO(fn, args...) \
2121 + ip_set_id_t __key; \
2122 + struct ip_set_hash *__set_hash; \
2124 + for (__key = 0; __key < ip_set_bindings_hash_size; __key++) { \
2125 + list_for_each_entry(__set_hash, &ip_set_hash[__key], list) \
2126 + fn(__set_hash , ## args); \
2130 +#define FOREACH_HASH_RW_DO(fn, args...) \
2132 + ip_set_id_t __key; \
2133 + struct ip_set_hash *__set_hash, *__n; \
2135 + ASSERT_WRITE_LOCK(&ip_set_lock); \
2136 + for (__key = 0; __key < ip_set_bindings_hash_size; __key++) { \
2137 + list_for_each_entry_safe(__set_hash, __n, &ip_set_hash[__key], list)\
2138 + fn(__set_hash , ## args); \
2142 +/* Add, del and test set entries from kernel */
2144 +#define follow_bindings(index, set, ip) \
2145 +((index = ip_set_find_in_hash((set)->id, ip)) != IP_SET_INVALID_ID \
2146 + || (index = (set)->binding) != IP_SET_INVALID_ID)
2149 +ip_set_testip_kernel(ip_set_id_t index,
2150 + const struct sk_buff *skb,
2151 + const u_int32_t *flags)
2153 + struct ip_set *set;
2156 + unsigned char i = 0;
2158 + IP_SET_ASSERT(flags[i]);
2159 + read_lock_bh(&ip_set_lock);
2161 + set = ip_set_list[index];
2162 + IP_SET_ASSERT(set);
2163 + DP("set %s, index %u", set->name, index);
2164 + read_lock_bh(&set->lock);
2165 + res = set->type->testip_kernel(set, skb, &ip, flags, i++);
2166 + read_unlock_bh(&set->lock);
2167 + i += !!(set->type->features & IPSET_DATA_DOUBLE);
2170 + && follow_bindings(index, set, ip));
2171 + read_unlock_bh(&ip_set_lock);
2173 + return (res < 0 ? 0 : res);
2177 +ip_set_addip_kernel(ip_set_id_t index,
2178 + const struct sk_buff *skb,
2179 + const u_int32_t *flags)
2181 + struct ip_set *set;
2184 + unsigned char i = 0;
2186 + IP_SET_ASSERT(flags[i]);
2188 + read_lock_bh(&ip_set_lock);
2190 + set = ip_set_list[index];
2191 + IP_SET_ASSERT(set);
2192 + DP("set %s, index %u", set->name, index);
2193 + write_lock_bh(&set->lock);
2194 + res = set->type->addip_kernel(set, skb, &ip, flags, i++);
2195 + write_unlock_bh(&set->lock);
2196 + i += !!(set->type->features & IPSET_DATA_DOUBLE);
2197 + } while ((res == 0 || res == -EEXIST)
2199 + && follow_bindings(index, set, ip));
2200 + read_unlock_bh(&ip_set_lock);
2202 + if (res == -EAGAIN
2203 + && set->type->retry
2204 + && (res = set->type->retry(set)) == 0)
2211 +ip_set_delip_kernel(ip_set_id_t index,
2212 + const struct sk_buff *skb,
2213 + const u_int32_t *flags)
2215 + struct ip_set *set;
2218 + unsigned char i = 0;
2220 + IP_SET_ASSERT(flags[i]);
2221 + read_lock_bh(&ip_set_lock);
2223 + set = ip_set_list[index];
2224 + IP_SET_ASSERT(set);
2225 + DP("set %s, index %u", set->name, index);
2226 + write_lock_bh(&set->lock);
2227 + res = set->type->delip_kernel(set, skb, &ip, flags, i++);
2228 + write_unlock_bh(&set->lock);
2229 + i += !!(set->type->features & IPSET_DATA_DOUBLE);
2230 + } while ((res == 0 || res == -EEXIST)
2232 + && follow_bindings(index, set, ip));
2233 + read_unlock_bh(&ip_set_lock);
2238 +/* Register and deregister settype */
2240 +static inline struct ip_set_type *
2241 +find_set_type(const char *name)
2243 + struct ip_set_type *set_type;
2245 + list_for_each_entry(set_type, &set_type_list, list)
2246 + if (!strncmp(set_type->typename, name, IP_SET_MAXNAMELEN - 1))
2252 +ip_set_register_set_type(struct ip_set_type *set_type)
2256 + if (set_type->protocol_version != IP_SET_PROTOCOL_VERSION) {
2257 + ip_set_printk("'%s' uses wrong protocol version %u (want %u)",
2258 + set_type->typename,
2259 + set_type->protocol_version,
2260 + IP_SET_PROTOCOL_VERSION);
2264 + write_lock_bh(&ip_set_lock);
2265 + if (find_set_type(set_type->typename)) {
2267 + ip_set_printk("'%s' already registered!",
2268 + set_type->typename);
2272 + if (!try_module_get(THIS_MODULE)) {
2276 + list_add(&set_type->list, &set_type_list);
2277 + DP("'%s' registered.", set_type->typename);
2279 + write_unlock_bh(&ip_set_lock);
2284 +ip_set_unregister_set_type(struct ip_set_type *set_type)
2286 + write_lock_bh(&ip_set_lock);
2287 + if (!find_set_type(set_type->typename)) {
2288 + ip_set_printk("'%s' not registered?",
2289 + set_type->typename);
2292 + list_del(&set_type->list);
2293 + module_put(THIS_MODULE);
2294 + DP("'%s' unregistered.", set_type->typename);
2296 + write_unlock_bh(&ip_set_lock);
2301 +__ip_set_get_byname(const char *name, struct ip_set **set)
2303 + ip_set_id_t i, index = IP_SET_INVALID_ID;
2305 + for (i = 0; i < ip_set_max; i++) {
2306 + if (ip_set_list[i] != NULL
2307 + && SETNAME_EQ(ip_set_list[i]->name, name)) {
2310 + *set = ip_set_list[i];
2317 +void __ip_set_put_byindex(ip_set_id_t index)
2319 + if (ip_set_list[index])
2320 + __ip_set_put(index);
2324 + * Userspace routines
2328 + * Find set by name, reference it once. The reference makes sure the
2329 + * thing pointed to, does not go away under our feet. Drop the reference
2330 + * later, using ip_set_put().
2333 +ip_set_get_byname(const char *name)
2335 + ip_set_id_t i, index = IP_SET_INVALID_ID;
2337 + down(&ip_set_app_mutex);
2338 + for (i = 0; i < ip_set_max; i++) {
2339 + if (ip_set_list[i] != NULL
2340 + && SETNAME_EQ(ip_set_list[i]->name, name)) {
2346 + up(&ip_set_app_mutex);
2351 + * Find set by index, reference it once. The reference makes sure the
2352 + * thing pointed to, does not go away under our feet. Drop the reference
2353 + * later, using ip_set_put().
2356 +ip_set_get_byindex(ip_set_id_t index)
2358 + down(&ip_set_app_mutex);
2360 + if (index >= ip_set_max)
2361 + return IP_SET_INVALID_ID;
2363 + if (ip_set_list[index])
2364 + __ip_set_get(index);
2366 + index = IP_SET_INVALID_ID;
2368 + up(&ip_set_app_mutex);
2373 + * Find the set id belonging to the index.
2374 + * We are protected by the mutex, so we do not need to use
2375 + * ip_set_lock. There is no need to reference the sets either.
2378 +ip_set_id(ip_set_id_t index)
2380 + if (index >= ip_set_max || !ip_set_list[index])
2381 + return IP_SET_INVALID_ID;
2383 + return ip_set_list[index]->id;
2387 + * If the given set pointer points to a valid set, decrement
2388 + * reference count by 1. The caller shall not assume the index
2389 + * to be valid, after calling this function.
2391 +void ip_set_put_byindex(ip_set_id_t index)
2393 + down(&ip_set_app_mutex);
2394 + if (ip_set_list[index])
2395 + __ip_set_put(index);
2396 + up(&ip_set_app_mutex);
2399 +/* Find a set by name or index */
2401 +ip_set_find_byname(const char *name)
2403 + ip_set_id_t i, index = IP_SET_INVALID_ID;
2405 + for (i = 0; i < ip_set_max; i++) {
2406 + if (ip_set_list[i] != NULL
2407 + && SETNAME_EQ(ip_set_list[i]->name, name)) {
2416 +ip_set_find_byindex(ip_set_id_t index)
2418 + if (index >= ip_set_max || ip_set_list[index] == NULL)
2419 + index = IP_SET_INVALID_ID;
2425 + * Add, del, test, bind and unbind
2429 +__ip_set_testip(struct ip_set *set,
2436 + read_lock_bh(&set->lock);
2437 + res = set->type->testip(set, data, size, ip);
2438 + read_unlock_bh(&set->lock);
2444 +__ip_set_addip(ip_set_id_t index,
2448 + struct ip_set *set = ip_set_list[index];
2452 + IP_SET_ASSERT(set);
2454 + write_lock_bh(&set->lock);
2455 + res = set->type->addip(set, data, size, &ip);
2456 + write_unlock_bh(&set->lock);
2457 + } while (res == -EAGAIN
2458 + && set->type->retry
2459 + && (res = set->type->retry(set)) == 0);
2465 +ip_set_addip(ip_set_id_t index,
2469 + struct ip_set *set = ip_set_list[index];
2471 + IP_SET_ASSERT(set);
2473 + if (size - sizeof(struct ip_set_req_adt) != set->type->reqsize) {
2474 + ip_set_printk("data length wrong (want %lu, have %zu)",
2475 + (long unsigned)set->type->reqsize,
2476 + size - sizeof(struct ip_set_req_adt));
2479 + return __ip_set_addip(index,
2480 + data + sizeof(struct ip_set_req_adt),
2481 + size - sizeof(struct ip_set_req_adt));
2485 +ip_set_delip(ip_set_id_t index,
2489 + struct ip_set *set = ip_set_list[index];
2493 + IP_SET_ASSERT(set);
2495 + if (size - sizeof(struct ip_set_req_adt) != set->type->reqsize) {
2496 + ip_set_printk("data length wrong (want %lu, have %zu)",
2497 + (long unsigned)set->type->reqsize,
2498 + size - sizeof(struct ip_set_req_adt));
2501 + write_lock_bh(&set->lock);
2502 + res = set->type->delip(set,
2503 + data + sizeof(struct ip_set_req_adt),
2504 + size - sizeof(struct ip_set_req_adt),
2506 + write_unlock_bh(&set->lock);
2512 +ip_set_testip(ip_set_id_t index,
2516 + struct ip_set *set = ip_set_list[index];
2520 + IP_SET_ASSERT(set);
2522 + if (size - sizeof(struct ip_set_req_adt) != set->type->reqsize) {
2523 + ip_set_printk("data length wrong (want %lu, have %zu)",
2524 + (long unsigned)set->type->reqsize,
2525 + size - sizeof(struct ip_set_req_adt));
2528 + res = __ip_set_testip(set,
2529 + data + sizeof(struct ip_set_req_adt),
2530 + size - sizeof(struct ip_set_req_adt),
2533 + return (res > 0 ? -EEXIST : res);
2537 +ip_set_bindip(ip_set_id_t index,
2541 + struct ip_set *set = ip_set_list[index];
2542 + const struct ip_set_req_bind *req_bind;
2543 + ip_set_id_t binding;
2547 + IP_SET_ASSERT(set);
2548 + if (size < sizeof(struct ip_set_req_bind))
2553 + if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
2554 + /* Default binding of a set */
2555 + const char *binding_name;
2557 + if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
2560 + binding_name = data + sizeof(struct ip_set_req_bind);
2562 + binding = ip_set_find_byname(binding_name);
2563 + if (binding == IP_SET_INVALID_ID)
2566 + write_lock_bh(&ip_set_lock);
2567 + /* Sets as binding values are referenced */
2568 + if (set->binding != IP_SET_INVALID_ID)
2569 + __ip_set_put(set->binding);
2570 + set->binding = binding;
2571 + __ip_set_get(set->binding);
2572 + write_unlock_bh(&ip_set_lock);
2576 + binding = ip_set_find_byname(req_bind->binding);
2577 + if (binding == IP_SET_INVALID_ID)
2580 + res = __ip_set_testip(set,
2581 + data + sizeof(struct ip_set_req_bind),
2582 + size - sizeof(struct ip_set_req_bind),
2584 + DP("set %s, ip: %u.%u.%u.%u, binding %s",
2585 + set->name, HIPQUAD(ip), ip_set_list[binding]->name);
2588 + res = ip_set_hash_add(set->id, ip, binding);
2593 +#define FOREACH_SET_DO(fn, args...) \
2595 + ip_set_id_t __i; \
2596 + struct ip_set *__set; \
2598 + for (__i = 0; __i < ip_set_max; __i++) { \
2599 + __set = ip_set_list[__i]; \
2600 + if (__set != NULL) \
2601 + fn(__set , ##args); \
2606 +__set_hash_del_byid(struct ip_set_hash *set_hash, ip_set_id_t id)
2608 + if (set_hash->id == id)
2609 + __set_hash_del(set_hash);
2613 +__unbind_default(struct ip_set *set)
2615 + if (set->binding != IP_SET_INVALID_ID) {
2616 + /* Sets as binding values are referenced */
2617 + __ip_set_put(set->binding);
2618 + set->binding = IP_SET_INVALID_ID;
2623 +ip_set_unbindip(ip_set_id_t index,
2627 + struct ip_set *set;
2628 + const struct ip_set_req_bind *req_bind;
2633 + if (size < sizeof(struct ip_set_req_bind))
2638 + DP("%u %s", index, req_bind->binding);
2639 + if (index == IP_SET_INVALID_ID) {
2640 + /* unbind :all: */
2641 + if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
2642 + /* Default binding of sets */
2643 + write_lock_bh(&ip_set_lock);
2644 + FOREACH_SET_DO(__unbind_default);
2645 + write_unlock_bh(&ip_set_lock);
2647 + } else if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_ALL)) {
2648 + /* Flush all bindings of all sets*/
2649 + write_lock_bh(&ip_set_lock);
2650 + FOREACH_HASH_RW_DO(__set_hash_del);
2651 + write_unlock_bh(&ip_set_lock);
2654 + DP("unreachable reached!");
2658 + set = ip_set_list[index];
2659 + IP_SET_ASSERT(set);
2660 + if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
2661 + /* Default binding of set */
2662 + ip_set_id_t binding = ip_set_find_byindex(set->binding);
2664 + if (binding == IP_SET_INVALID_ID)
2667 + write_lock_bh(&ip_set_lock);
2668 + /* Sets in hash values are referenced */
2669 + __ip_set_put(set->binding);
2670 + set->binding = IP_SET_INVALID_ID;
2671 + write_unlock_bh(&ip_set_lock);
2674 + } else if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_ALL)) {
2675 + /* Flush all bindings */
2677 + write_lock_bh(&ip_set_lock);
2678 + FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
2679 + write_unlock_bh(&ip_set_lock);
2683 + res = __ip_set_testip(set,
2684 + data + sizeof(struct ip_set_req_bind),
2685 + size - sizeof(struct ip_set_req_bind),
2688 + DP("set %s, ip: %u.%u.%u.%u", set->name, HIPQUAD(ip));
2690 + res = ip_set_hash_del(set->id, ip);
2696 +ip_set_testbind(ip_set_id_t index,
2700 + struct ip_set *set = ip_set_list[index];
2701 + const struct ip_set_req_bind *req_bind;
2702 + ip_set_id_t binding;
2706 + IP_SET_ASSERT(set);
2707 + if (size < sizeof(struct ip_set_req_bind))
2712 + if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
2713 + /* Default binding of set */
2714 + const char *binding_name;
2716 + if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
2719 + binding_name = data + sizeof(struct ip_set_req_bind);
2721 + binding = ip_set_find_byname(binding_name);
2722 + if (binding == IP_SET_INVALID_ID)
2725 + res = (set->binding == binding) ? -EEXIST : 0;
2729 + binding = ip_set_find_byname(req_bind->binding);
2730 + if (binding == IP_SET_INVALID_ID)
2734 + res = __ip_set_testip(set,
2735 + data + sizeof(struct ip_set_req_bind),
2736 + size - sizeof(struct ip_set_req_bind),
2738 + DP("set %s, ip: %u.%u.%u.%u, binding %s",
2739 + set->name, HIPQUAD(ip), ip_set_list[binding]->name);
2742 + res = (ip_set_find_in_hash(set->id, ip) == binding)
2748 +static struct ip_set_type *
2749 +find_set_type_rlock(const char *typename)
2751 + struct ip_set_type *type;
2753 + read_lock_bh(&ip_set_lock);
2754 + type = find_set_type(typename);
2756 + read_unlock_bh(&ip_set_lock);
2762 +find_free_id(const char *name,
2763 + ip_set_id_t *index,
2768 + *id = IP_SET_INVALID_ID;
2769 + for (i = 0; i < ip_set_max; i++) {
2770 + if (ip_set_list[i] == NULL) {
2771 + if (*id == IP_SET_INVALID_ID)
2773 + } else if (SETNAME_EQ(name, ip_set_list[i]->name))
2777 + if (*id == IP_SET_INVALID_ID)
2778 + /* No free slot remained */
2780 + /* Check that index is usable as id (swapping) */
2782 + for (i = 0; i < ip_set_max; i++) {
2783 + if (ip_set_list[i] != NULL
2784 + && ip_set_list[i]->id == *id) {
2796 +ip_set_create(const char *name,
2797 + const char *typename,
2798 + ip_set_id_t restore,
2802 + struct ip_set *set;
2803 + ip_set_id_t index = 0, id;
2806 + DP("setname: %s, typename: %s, id: %u", name, typename, restore);
2809 + * First, and without any locks, allocate and initialize
2810 + * a normal base set structure.
2812 + set = kmalloc(sizeof(struct ip_set), GFP_KERNEL);
2815 + rwlock_init(&set->lock);
2816 + strncpy(set->name, name, IP_SET_MAXNAMELEN);
2817 + set->binding = IP_SET_INVALID_ID;
2818 + atomic_set(&set->ref, 0);
2821 + * Next, take the &ip_set_lock, check that we know the type,
2822 + * and take a reference on the type, to make sure it
2823 + * stays available while constructing our new set.
2825 + * After referencing the type, we drop the &ip_set_lock,
2826 + * and let the new set construction run without locks.
2828 + set->type = find_set_type_rlock(typename);
2829 + if (set->type == NULL) {
2830 + /* Try loading the module */
2831 + char modulename[IP_SET_MAXNAMELEN + strlen("ip_set_") + 1];
2832 + strcpy(modulename, "ip_set_");
2833 + strcat(modulename, typename);
2834 + DP("try to load %s", modulename);
2835 + request_module(modulename);
2836 + set->type = find_set_type_rlock(typename);
2838 + if (set->type == NULL) {
2839 + ip_set_printk("no set type '%s', set '%s' not created",
2844 + if (!try_module_get(set->type->me)) {
2845 + read_unlock_bh(&ip_set_lock);
2849 + read_unlock_bh(&ip_set_lock);
2851 + /* Check request size */
2852 + if (size != set->type->header_size) {
2853 + ip_set_printk("data length wrong (want %lu, have %lu)",
2854 + (long unsigned)set->type->header_size,
2855 + (long unsigned)size);
2860 + * Without holding any locks, create private part.
2862 + res = set->type->create(set, data, size);
2866 + /* BTW, res==0 here. */
2869 + * Here, we have a valid, constructed set. &ip_set_lock again,
2870 + * find free id/index and check that it is not already in
2873 + write_lock_bh(&ip_set_lock);
2874 + if ((res = find_free_id(set->name, &index, &id)) != 0) {
2875 + DP("no free id!");
2879 + /* Make sure restore gets the same index */
2880 + if (restore != IP_SET_INVALID_ID && index != restore) {
2881 + DP("Can't restore, sets are screwed up");
2887 + * Finally! Add our shiny new set to the list, and be done.
2889 + DP("create: '%s' created with index %u, id %u!", set->name, index, id);
2891 + ip_set_list[index] = set;
2892 + write_unlock_bh(&ip_set_lock);
2896 + write_unlock_bh(&ip_set_lock);
2897 + set->type->destroy(set);
2899 + module_put(set->type->me);
2906 + * Destroy a given existing set
2909 +ip_set_destroy_set(ip_set_id_t index)
2911 + struct ip_set *set = ip_set_list[index];
2913 + IP_SET_ASSERT(set);
2914 + DP("set: %s", set->name);
2915 + write_lock_bh(&ip_set_lock);
2916 + FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
2917 + if (set->binding != IP_SET_INVALID_ID)
2918 + __ip_set_put(set->binding);
2919 + ip_set_list[index] = NULL;
2920 + write_unlock_bh(&ip_set_lock);
2922 + /* Must call it without holding any lock */
2923 + set->type->destroy(set);
2924 + module_put(set->type->me);
2929 + * Destroy a set - or all sets
2930 + * Sets must not be referenced/used.
2933 +ip_set_destroy(ip_set_id_t index)
2937 + /* ref modification always protected by the mutex */
2938 + if (index != IP_SET_INVALID_ID) {
2939 + if (atomic_read(&ip_set_list[index]->ref))
2941 + ip_set_destroy_set(index);
2943 + for (i = 0; i < ip_set_max; i++) {
2944 + if (ip_set_list[i] != NULL
2945 + && (atomic_read(&ip_set_list[i]->ref)))
2949 + for (i = 0; i < ip_set_max; i++) {
2950 + if (ip_set_list[i] != NULL)
2951 + ip_set_destroy_set(i);
2958 +ip_set_flush_set(struct ip_set *set)
2960 + DP("set: %s %u", set->name, set->id);
2962 + write_lock_bh(&set->lock);
2963 + set->type->flush(set);
2964 + write_unlock_bh(&set->lock);
2968 + * Flush data in a set - or in all sets
2971 +ip_set_flush(ip_set_id_t index)
2973 + if (index != IP_SET_INVALID_ID) {
2974 + IP_SET_ASSERT(ip_set_list[index]);
2975 + ip_set_flush_set(ip_set_list[index]);
2977 + FOREACH_SET_DO(ip_set_flush_set);
2984 +ip_set_rename(ip_set_id_t index, const char *name)
2986 + struct ip_set *set = ip_set_list[index];
2990 + DP("set: %s to %s", set->name, name);
2991 + write_lock_bh(&ip_set_lock);
2992 + for (i = 0; i < ip_set_max; i++) {
2993 + if (ip_set_list[i] != NULL
2994 + && SETNAME_EQ(ip_set_list[i]->name, name)) {
2999 + strncpy(set->name, name, IP_SET_MAXNAMELEN);
3001 + write_unlock_bh(&ip_set_lock);
3006 + * Swap two sets so that name/index points to the other.
3007 + * References are also swapped.
3010 +ip_set_swap(ip_set_id_t from_index, ip_set_id_t to_index)
3012 + struct ip_set *from = ip_set_list[from_index];
3013 + struct ip_set *to = ip_set_list[to_index];
3014 + char from_name[IP_SET_MAXNAMELEN];
3015 + u_int32_t from_ref;
3017 + DP("set: %s to %s", from->name, to->name);
3018 + /* Features must not change.
3019 + * Not an artifical restriction anymore, as we must prevent
3020 + * possible loops created by swapping in setlist type of sets. */
3021 + if (from->type->features != to->type->features)
3024 + /* No magic here: ref munging protected by the mutex */
3025 + write_lock_bh(&ip_set_lock);
3026 + strncpy(from_name, from->name, IP_SET_MAXNAMELEN);
3027 + from_ref = atomic_read(&from->ref);
3029 + strncpy(from->name, to->name, IP_SET_MAXNAMELEN);
3030 + atomic_set(&from->ref, atomic_read(&to->ref));
3031 + strncpy(to->name, from_name, IP_SET_MAXNAMELEN);
3032 + atomic_set(&to->ref, from_ref);
3034 + ip_set_list[from_index] = to;
3035 + ip_set_list[to_index] = from;
3037 + write_unlock_bh(&ip_set_lock);
3046 +__set_hash_bindings_size_list(struct ip_set_hash *set_hash,
3047 + ip_set_id_t id, u_int32_t *size)
3049 + if (set_hash->id == id)
3050 + *size += sizeof(struct ip_set_hash_list);
3054 +__set_hash_bindings_size_save(struct ip_set_hash *set_hash,
3055 + ip_set_id_t id, u_int32_t *size)
3057 + if (set_hash->id == id)
3058 + *size += sizeof(struct ip_set_hash_save);
3062 +__set_hash_bindings(struct ip_set_hash *set_hash,
3063 + ip_set_id_t id, void *data, int *used)
3065 + if (set_hash->id == id) {
3066 + struct ip_set_hash_list *hash_list = data + *used;
3068 + hash_list->ip = set_hash->ip;
3069 + hash_list->binding = set_hash->binding;
3070 + *used += sizeof(struct ip_set_hash_list);
3074 +static int ip_set_list_set(ip_set_id_t index,
3079 + struct ip_set *set = ip_set_list[index];
3080 + struct ip_set_list *set_list;
3082 + /* Pointer to our header */
3083 + set_list = data + *used;
3085 + DP("set: %s, used: %d %p %p", set->name, *used, data, data + *used);
3087 + /* Get and ensure header size */
3088 + if (*used + sizeof(struct ip_set_list) > len)
3089 + goto not_enough_mem;
3090 + *used += sizeof(struct ip_set_list);
3092 + read_lock_bh(&set->lock);
3093 + /* Get and ensure set specific header size */
3094 + set_list->header_size = set->type->header_size;
3095 + if (*used + set_list->header_size > len)
3098 + /* Fill in the header */
3099 + set_list->index = index;
3100 + set_list->binding = set->binding;
3101 + set_list->ref = atomic_read(&set->ref);
3103 + /* Fill in set spefific header data */
3104 + set->type->list_header(set, data + *used);
3105 + *used += set_list->header_size;
3107 + /* Get and ensure set specific members size */
3108 + set_list->members_size = set->type->list_members_size(set);
3109 + if (*used + set_list->members_size > len)
3112 + /* Fill in set spefific members data */
3113 + set->type->list_members(set, data + *used);
3114 + *used += set_list->members_size;
3115 + read_unlock_bh(&set->lock);
3119 + /* Get and ensure set specific bindings size */
3120 + set_list->bindings_size = 0;
3121 + FOREACH_HASH_DO(__set_hash_bindings_size_list,
3122 + set->id, &set_list->bindings_size);
3123 + if (*used + set_list->bindings_size > len)
3124 + goto not_enough_mem;
3126 + /* Fill in set spefific bindings data */
3127 + FOREACH_HASH_DO(__set_hash_bindings, set->id, data, used);
3132 + read_unlock_bh(&set->lock);
3134 + DP("not enough mem, try again");
3141 +static int ip_set_save_set(ip_set_id_t index,
3146 + struct ip_set *set;
3147 + struct ip_set_save *set_save;
3149 + /* Pointer to our header */
3150 + set_save = data + *used;
3152 + /* Get and ensure header size */
3153 + if (*used + sizeof(struct ip_set_save) > len)
3154 + goto not_enough_mem;
3155 + *used += sizeof(struct ip_set_save);
3157 + set = ip_set_list[index];
3158 + DP("set: %s, used: %d(%d) %p %p", set->name, *used, len,
3159 + data, data + *used);
3161 + read_lock_bh(&set->lock);
3162 + /* Get and ensure set specific header size */
3163 + set_save->header_size = set->type->header_size;
3164 + if (*used + set_save->header_size > len)
3167 + /* Fill in the header */
3168 + set_save->index = index;
3169 + set_save->binding = set->binding;
3171 + /* Fill in set spefific header data */
3172 + set->type->list_header(set, data + *used);
3173 + *used += set_save->header_size;
3175 + DP("set header filled: %s, used: %d(%lu) %p %p", set->name, *used,
3176 + (unsigned long)set_save->header_size, data, data + *used);
3177 + /* Get and ensure set specific members size */
3178 + set_save->members_size = set->type->list_members_size(set);
3179 + if (*used + set_save->members_size > len)
3182 + /* Fill in set spefific members data */
3183 + set->type->list_members(set, data + *used);
3184 + *used += set_save->members_size;
3185 + read_unlock_bh(&set->lock);
3186 + DP("set members filled: %s, used: %d(%lu) %p %p", set->name, *used,
3187 + (unsigned long)set_save->members_size, data, data + *used);
3191 + read_unlock_bh(&set->lock);
3193 + DP("not enough mem, try again");
3198 +__set_hash_save_bindings(struct ip_set_hash *set_hash,
3206 + && (id == IP_SET_INVALID_ID || set_hash->id == id)) {
3207 + struct ip_set_hash_save *hash_save = data + *used;
3208 + /* Ensure bindings size */
3209 + if (*used + sizeof(struct ip_set_hash_save) > len) {
3213 + hash_save->id = set_hash->id;
3214 + hash_save->ip = set_hash->ip;
3215 + hash_save->binding = set_hash->binding;
3216 + *used += sizeof(struct ip_set_hash_save);
3220 +static int ip_set_save_bindings(ip_set_id_t index,
3226 + struct ip_set_save *set_save;
3228 + DP("used %u, len %u", *used, len);
3229 + /* Get and ensure header size */
3230 + if (*used + sizeof(struct ip_set_save) > len)
3234 + set_save = data + *used;
3235 + set_save->index = IP_SET_INVALID_ID;
3236 + set_save->header_size = 0;
3237 + set_save->members_size = 0;
3238 + *used += sizeof(struct ip_set_save);
3240 + DP("marker added used %u, len %u", *used, len);
3241 + /* Fill in bindings data */
3242 + if (index != IP_SET_INVALID_ID)
3243 + /* Sets are identified by id in hash */
3244 + index = ip_set_list[index]->id;
3245 + FOREACH_HASH_DO(__set_hash_save_bindings, index, data, used, len, &res);
3253 +static int ip_set_restore(void *data,
3257 + int line = 0, used = 0, members_size;
3258 + struct ip_set *set;
3259 + struct ip_set_hash_save *hash_save;
3260 + struct ip_set_restore *set_restore;
3261 + ip_set_id_t index;
3263 + /* Loop to restore sets */
3267 + DP("%d %zu %d", used, sizeof(struct ip_set_restore), len);
3268 + /* Get and ensure header size */
3269 + if (used + sizeof(struct ip_set_restore) > len)
3271 + set_restore = data + used;
3272 + used += sizeof(struct ip_set_restore);
3274 + /* Ensure data size */
3276 + + set_restore->header_size
3277 + + set_restore->members_size > len)
3280 + /* Check marker */
3281 + if (set_restore->index == IP_SET_INVALID_ID) {
3286 + /* Try to create the set */
3287 + DP("restore %s %s", set_restore->name, set_restore->typename);
3288 + res = ip_set_create(set_restore->name,
3289 + set_restore->typename,
3290 + set_restore->index,
3292 + set_restore->header_size);
3296 + used += set_restore->header_size;
3298 + index = ip_set_find_byindex(set_restore->index);
3299 + DP("index %u, restore_index %u", index, set_restore->index);
3300 + if (index != set_restore->index)
3302 + /* Try to restore members data */
3303 + set = ip_set_list[index];
3305 + DP("members_size %lu reqsize %lu",
3306 + (unsigned long)set_restore->members_size,
3307 + (unsigned long)set->type->reqsize);
3308 + while (members_size + set->type->reqsize <=
3309 + set_restore->members_size) {
3311 + DP("members: %d, line %d", members_size, line);
3312 + res = __ip_set_addip(index,
3313 + data + used + members_size,
3314 + set->type->reqsize);
3315 + if (!(res == 0 || res == -EEXIST))
3317 + members_size += set->type->reqsize;
3320 + DP("members_size %lu %d",
3321 + (unsigned long)set_restore->members_size, members_size);
3322 + if (members_size != set_restore->members_size)
3324 + used += set_restore->members_size;
3328 + /* Loop to restore bindings */
3329 + while (used < len) {
3332 + DP("restore binding, line %u", line);
3333 + /* Get and ensure size */
3334 + if (used + sizeof(struct ip_set_hash_save) > len)
3336 + hash_save = data + used;
3337 + used += sizeof(struct ip_set_hash_save);
3339 + /* hash_save->id is used to store the index */
3340 + index = ip_set_find_byindex(hash_save->id);
3341 + DP("restore binding index %u, id %u, %u -> %u",
3342 + index, hash_save->id, hash_save->ip, hash_save->binding);
3343 + if (index != hash_save->id)
3345 + if (ip_set_find_byindex(hash_save->binding) == IP_SET_INVALID_ID) {
3346 + DP("corrupt binding set index %u", hash_save->binding);
3349 + set = ip_set_list[hash_save->id];
3350 + /* Null valued IP means default binding */
3351 + if (hash_save->ip)
3352 + res = ip_set_hash_add(set->id,
3354 + hash_save->binding);
3356 + IP_SET_ASSERT(set->binding == IP_SET_INVALID_ID);
3357 + write_lock_bh(&ip_set_lock);
3358 + set->binding = hash_save->binding;
3359 + __ip_set_get(set->binding);
3360 + write_unlock_bh(&ip_set_lock);
3361 + DP("default binding: %u", set->binding);
3373 +ip_set_sockfn_set(struct sock *sk, int optval, void *user, unsigned int len)
3376 + int res = 0; /* Assume OK */
3378 + struct ip_set_req_adt *req_adt;
3379 + ip_set_id_t index = IP_SET_INVALID_ID;
3380 + int (*adtfn)(ip_set_id_t index,
3381 + const void *data, u_int32_t size);
3383 + int (*fn)(ip_set_id_t index,
3384 + const void *data, u_int32_t size);
3386 + { { ip_set_addip }, { ip_set_delip }, { ip_set_testip},
3387 + { ip_set_bindip}, { ip_set_unbindip }, { ip_set_testbind },
3390 + DP("optval=%d, user=%p, len=%d", optval, user, len);
3391 + if (!capable(CAP_NET_ADMIN))
3393 + if (optval != SO_IP_SET)
3395 + if (len <= sizeof(unsigned)) {
3396 + ip_set_printk("short userdata (want >%zu, got %u)",
3397 + sizeof(unsigned), len);
3400 + data = vmalloc(len);
3402 + DP("out of mem for %u bytes", len);
3405 + if (copy_from_user(data, user, len) != 0) {
3409 + if (down_interruptible(&ip_set_app_mutex)) {
3414 + op = (unsigned *)data;
3417 + if (*op < IP_SET_OP_VERSION) {
3418 + /* Check the version at the beginning of operations */
3419 + struct ip_set_req_version *req_version = data;
3420 + if (req_version->version != IP_SET_PROTOCOL_VERSION) {
3427 + case IP_SET_OP_CREATE:{
3428 + struct ip_set_req_create *req_create = data;
3430 + if (len < sizeof(struct ip_set_req_create)) {
3431 + ip_set_printk("short CREATE data (want >=%zu, got %u)",
3432 + sizeof(struct ip_set_req_create), len);
3436 + req_create->name[IP_SET_MAXNAMELEN - 1] = '\0';
3437 + req_create->typename[IP_SET_MAXNAMELEN - 1] = '\0';
3438 + res = ip_set_create(req_create->name,
3439 + req_create->typename,
3440 + IP_SET_INVALID_ID,
3441 + data + sizeof(struct ip_set_req_create),
3442 + len - sizeof(struct ip_set_req_create));
3445 + case IP_SET_OP_DESTROY:{
3446 + struct ip_set_req_std *req_destroy = data;
3448 + if (len != sizeof(struct ip_set_req_std)) {
3449 + ip_set_printk("invalid DESTROY data (want %zu, got %u)",
3450 + sizeof(struct ip_set_req_std), len);
3454 + if (SETNAME_EQ(req_destroy->name, IPSET_TOKEN_ALL)) {
3455 + /* Destroy all sets */
3456 + index = IP_SET_INVALID_ID;
3458 + req_destroy->name[IP_SET_MAXNAMELEN - 1] = '\0';
3459 + index = ip_set_find_byname(req_destroy->name);
3461 + if (index == IP_SET_INVALID_ID) {
3467 + res = ip_set_destroy(index);
3470 + case IP_SET_OP_FLUSH:{
3471 + struct ip_set_req_std *req_flush = data;
3473 + if (len != sizeof(struct ip_set_req_std)) {
3474 + ip_set_printk("invalid FLUSH data (want %zu, got %u)",
3475 + sizeof(struct ip_set_req_std), len);
3479 + if (SETNAME_EQ(req_flush->name, IPSET_TOKEN_ALL)) {
3480 + /* Flush all sets */
3481 + index = IP_SET_INVALID_ID;
3483 + req_flush->name[IP_SET_MAXNAMELEN - 1] = '\0';
3484 + index = ip_set_find_byname(req_flush->name);
3486 + if (index == IP_SET_INVALID_ID) {
3491 + res = ip_set_flush(index);
3494 + case IP_SET_OP_RENAME:{
3495 + struct ip_set_req_create *req_rename = data;
3497 + if (len != sizeof(struct ip_set_req_create)) {
3498 + ip_set_printk("invalid RENAME data (want %zu, got %u)",
3499 + sizeof(struct ip_set_req_create), len);
3504 + req_rename->name[IP_SET_MAXNAMELEN - 1] = '\0';
3505 + req_rename->typename[IP_SET_MAXNAMELEN - 1] = '\0';
3507 + index = ip_set_find_byname(req_rename->name);
3508 + if (index == IP_SET_INVALID_ID) {
3512 + res = ip_set_rename(index, req_rename->typename);
3515 + case IP_SET_OP_SWAP:{
3516 + struct ip_set_req_create *req_swap = data;
3517 + ip_set_id_t to_index;
3519 + if (len != sizeof(struct ip_set_req_create)) {
3520 + ip_set_printk("invalid SWAP data (want %zu, got %u)",
3521 + sizeof(struct ip_set_req_create), len);
3526 + req_swap->name[IP_SET_MAXNAMELEN - 1] = '\0';
3527 + req_swap->typename[IP_SET_MAXNAMELEN - 1] = '\0';
3529 + index = ip_set_find_byname(req_swap->name);
3530 + if (index == IP_SET_INVALID_ID) {
3534 + to_index = ip_set_find_byname(req_swap->typename);
3535 + if (to_index == IP_SET_INVALID_ID) {
3539 + res = ip_set_swap(index, to_index);
3543 + break; /* Set identified by id */
3546 + /* There we may have add/del/test/bind/unbind/test_bind operations */
3547 + if (*op < IP_SET_OP_ADD_IP || *op > IP_SET_OP_TEST_BIND_SET) {
3551 + adtfn = adtfn_table[*op - IP_SET_OP_ADD_IP].fn;
3553 + if (len < sizeof(struct ip_set_req_adt)) {
3554 + ip_set_printk("short data in adt request (want >=%zu, got %u)",
3555 + sizeof(struct ip_set_req_adt), len);
3561 + /* -U :all: :all:|:default: uses IP_SET_INVALID_ID */
3562 + if (!(*op == IP_SET_OP_UNBIND_SET
3563 + && req_adt->index == IP_SET_INVALID_ID)) {
3564 + index = ip_set_find_byindex(req_adt->index);
3565 + if (index == IP_SET_INVALID_ID) {
3570 + res = adtfn(index, data, len);
3573 + up(&ip_set_app_mutex);
3577 + DP("final result %d", res);
3582 +ip_set_sockfn_get(struct sock *sk, int optval, void *user, int *len)
3586 + ip_set_id_t index = IP_SET_INVALID_ID;
3588 + int copylen = *len;
3590 + DP("optval=%d, user=%p, len=%d", optval, user, *len);
3591 + if (!capable(CAP_NET_ADMIN))
3593 + if (optval != SO_IP_SET)
3595 + if (*len < sizeof(unsigned)) {
3596 + ip_set_printk("short userdata (want >=%zu, got %d)",
3597 + sizeof(unsigned), *len);
3600 + data = vmalloc(*len);
3602 + DP("out of mem for %d bytes", *len);
3605 + if (copy_from_user(data, user, *len) != 0) {
3609 + if (down_interruptible(&ip_set_app_mutex)) {
3614 + op = (unsigned *) data;
3617 + if (*op < IP_SET_OP_VERSION) {
3618 + /* Check the version at the beginning of operations */
3619 + struct ip_set_req_version *req_version = data;
3620 + if (req_version->version != IP_SET_PROTOCOL_VERSION) {
3627 + case IP_SET_OP_VERSION: {
3628 + struct ip_set_req_version *req_version = data;
3630 + if (*len != sizeof(struct ip_set_req_version)) {
3631 + ip_set_printk("invalid VERSION (want %zu, got %d)",
3632 + sizeof(struct ip_set_req_version),
3638 + req_version->version = IP_SET_PROTOCOL_VERSION;
3639 + res = copy_to_user(user, req_version,
3640 + sizeof(struct ip_set_req_version));
3643 + case IP_SET_OP_GET_BYNAME: {
3644 + struct ip_set_req_get_set *req_get = data;
3646 + if (*len != sizeof(struct ip_set_req_get_set)) {
3647 + ip_set_printk("invalid GET_BYNAME (want %zu, got %d)",
3648 + sizeof(struct ip_set_req_get_set), *len);
3652 + req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
3653 + index = ip_set_find_byname(req_get->set.name);
3654 + req_get->set.index = index;
3657 + case IP_SET_OP_GET_BYINDEX: {
3658 + struct ip_set_req_get_set *req_get = data;
3660 + if (*len != sizeof(struct ip_set_req_get_set)) {
3661 + ip_set_printk("invalid GET_BYINDEX (want %zu, got %d)",
3662 + sizeof(struct ip_set_req_get_set), *len);
3666 + req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
3667 + index = ip_set_find_byindex(req_get->set.index);
3668 + strncpy(req_get->set.name,
3669 + index == IP_SET_INVALID_ID ? ""
3670 + : ip_set_list[index]->name, IP_SET_MAXNAMELEN);
3673 + case IP_SET_OP_ADT_GET: {
3674 + struct ip_set_req_adt_get *req_get = data;
3676 + if (*len != sizeof(struct ip_set_req_adt_get)) {
3677 + ip_set_printk("invalid ADT_GET (want %zu, got %d)",
3678 + sizeof(struct ip_set_req_adt_get), *len);
3682 + req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
3683 + index = ip_set_find_byname(req_get->set.name);
3684 + if (index != IP_SET_INVALID_ID) {
3685 + req_get->set.index = index;
3686 + strncpy(req_get->typename,
3687 + ip_set_list[index]->type->typename,
3688 + IP_SET_MAXNAMELEN - 1);
3695 + case IP_SET_OP_MAX_SETS: {
3696 + struct ip_set_req_max_sets *req_max_sets = data;
3699 + if (*len != sizeof(struct ip_set_req_max_sets)) {
3700 + ip_set_printk("invalid MAX_SETS (want %zu, got %d)",
3701 + sizeof(struct ip_set_req_max_sets), *len);
3706 + if (SETNAME_EQ(req_max_sets->set.name, IPSET_TOKEN_ALL)) {
3707 + req_max_sets->set.index = IP_SET_INVALID_ID;
3709 + req_max_sets->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
3710 + req_max_sets->set.index =
3711 + ip_set_find_byname(req_max_sets->set.name);
3712 + if (req_max_sets->set.index == IP_SET_INVALID_ID) {
3717 + req_max_sets->max_sets = ip_set_max;
3718 + req_max_sets->sets = 0;
3719 + for (i = 0; i < ip_set_max; i++) {
3720 + if (ip_set_list[i] != NULL)
3721 + req_max_sets->sets++;
3725 + case IP_SET_OP_LIST_SIZE:
3726 + case IP_SET_OP_SAVE_SIZE: {
3727 + struct ip_set_req_setnames *req_setnames = data;
3728 + struct ip_set_name_list *name_list;
3729 + struct ip_set *set;
3733 + if (*len < sizeof(struct ip_set_req_setnames)) {
3734 + ip_set_printk("short LIST_SIZE (want >=%zu, got %d)",
3735 + sizeof(struct ip_set_req_setnames), *len);
3740 + req_setnames->size = 0;
3741 + used = sizeof(struct ip_set_req_setnames);
3742 + for (i = 0; i < ip_set_max; i++) {
3743 + if (ip_set_list[i] == NULL)
3745 + name_list = data + used;
3746 + used += sizeof(struct ip_set_name_list);
3747 + if (used > copylen) {
3751 + set = ip_set_list[i];
3752 + /* Fill in index, name, etc. */
3753 + name_list->index = i;
3754 + name_list->id = set->id;
3755 + strncpy(name_list->name,
3757 + IP_SET_MAXNAMELEN - 1);
3758 + strncpy(name_list->typename,
3759 + set->type->typename,
3760 + IP_SET_MAXNAMELEN - 1);
3761 + DP("filled %s of type %s, index %u\n",
3762 + name_list->name, name_list->typename,
3763 + name_list->index);
3764 + if (!(req_setnames->index == IP_SET_INVALID_ID
3765 + || req_setnames->index == i))
3769 + case IP_SET_OP_LIST_SIZE: {
3770 + req_setnames->size += sizeof(struct ip_set_list)
3771 + + set->type->header_size
3772 + + set->type->list_members_size(set);
3773 + /* Sets are identified by id in the hash */
3774 + FOREACH_HASH_DO(__set_hash_bindings_size_list,
3775 + set->id, &req_setnames->size);
3778 + case IP_SET_OP_SAVE_SIZE: {
3779 + req_setnames->size += sizeof(struct ip_set_save)
3780 + + set->type->header_size
3781 + + set->type->list_members_size(set);
3782 + FOREACH_HASH_DO(__set_hash_bindings_size_save,
3783 + set->id, &req_setnames->size);
3790 + if (copylen != used) {
3796 + case IP_SET_OP_LIST: {
3797 + struct ip_set_req_list *req_list = data;
3801 + if (*len < sizeof(struct ip_set_req_list)) {
3802 + ip_set_printk("short LIST (want >=%zu, got %d)",
3803 + sizeof(struct ip_set_req_list), *len);
3807 + index = req_list->index;
3808 + if (index != IP_SET_INVALID_ID
3809 + && ip_set_find_byindex(index) != index) {
3814 + if (index == IP_SET_INVALID_ID) {
3815 + /* List all sets */
3816 + for (i = 0; i < ip_set_max && res == 0; i++) {
3817 + if (ip_set_list[i] != NULL)
3818 + res = ip_set_list_set(i, data, &used, *len);
3821 + /* List an individual set */
3822 + res = ip_set_list_set(index, data, &used, *len);
3826 + else if (copylen != used) {
3832 + case IP_SET_OP_SAVE: {
3833 + struct ip_set_req_list *req_save = data;
3837 + if (*len < sizeof(struct ip_set_req_list)) {
3838 + ip_set_printk("short SAVE (want >=%zu, got %d)",
3839 + sizeof(struct ip_set_req_list), *len);
3843 + index = req_save->index;
3844 + if (index != IP_SET_INVALID_ID
3845 + && ip_set_find_byindex(index) != index) {
3850 +#define SETLIST(set) (strcmp(set->type->typename, "setlist") == 0)
3853 + if (index == IP_SET_INVALID_ID) {
3854 + /* Save all sets: ugly setlist type dependency */
3857 + for (i = 0; i < ip_set_max && res == 0; i++) {
3858 + if (ip_set_list[i] != NULL
3859 + && !(setlist ^ SETLIST(ip_set_list[i])))
3860 + res = ip_set_save_set(i, data, &used, *len);
3867 + /* Save an individual set */
3868 + res = ip_set_save_set(index, data, &used, *len);
3871 + res = ip_set_save_bindings(index, data, &used, *len);
3875 + else if (copylen != used) {
3881 + case IP_SET_OP_RESTORE: {
3882 + struct ip_set_req_setnames *req_restore = data;
3885 + if (*len < sizeof(struct ip_set_req_setnames)
3886 + || *len != req_restore->size) {
3887 + ip_set_printk("invalid RESTORE (want =%lu, got %d)",
3888 + (long unsigned)req_restore->size, *len);
3892 + line = ip_set_restore(data + sizeof(struct ip_set_req_setnames),
3893 + req_restore->size - sizeof(struct ip_set_req_setnames));
3894 + DP("ip_set_restore: %d", line);
3897 + req_restore->size = line;
3898 + copylen = sizeof(struct ip_set_req_setnames);
3906 + } /* end of switch(op) */
3909 + DP("set %s, copylen %d", index != IP_SET_INVALID_ID
3910 + && ip_set_list[index]
3911 + ? ip_set_list[index]->name
3912 + : ":all:", copylen);
3913 + res = copy_to_user(user, data, copylen);
3916 + up(&ip_set_app_mutex);
3920 + DP("final result %d", res);
3924 +static struct nf_sockopt_ops so_set = {
3926 + .set_optmin = SO_IP_SET,
3927 + .set_optmax = SO_IP_SET + 1,
3928 + .set = &ip_set_sockfn_set,
3929 + .get_optmin = SO_IP_SET,
3930 + .get_optmax = SO_IP_SET + 1,
3931 + .get = &ip_set_sockfn_get,
3932 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
3935 + .owner = THIS_MODULE,
3939 +static int max_sets, hash_size;
3941 +module_param(max_sets, int, 0600);
3942 +MODULE_PARM_DESC(max_sets, "maximal number of sets");
3943 +module_param(hash_size, int, 0600);
3944 +MODULE_PARM_DESC(hash_size, "hash size for bindings");
3945 +MODULE_LICENSE("GPL");
3946 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
3947 +MODULE_DESCRIPTION("module implementing core IP set support");
3949 +static int __init ip_set_init(void)
3954 + get_random_bytes(&ip_set_hash_random, 4);
3956 + ip_set_max = max_sets;
3957 + ip_set_list = vmalloc(sizeof(struct ip_set *) * ip_set_max);
3958 + if (!ip_set_list) {
3959 + printk(KERN_ERR "Unable to create ip_set_list\n");
3962 + memset(ip_set_list, 0, sizeof(struct ip_set *) * ip_set_max);
3964 + ip_set_bindings_hash_size = hash_size;
3965 + ip_set_hash = vmalloc(sizeof(struct list_head) * ip_set_bindings_hash_size);
3966 + if (!ip_set_hash) {
3967 + printk(KERN_ERR "Unable to create ip_set_hash\n");
3968 + vfree(ip_set_list);
3971 + for (i = 0; i < ip_set_bindings_hash_size; i++)
3972 + INIT_LIST_HEAD(&ip_set_hash[i]);
3974 + INIT_LIST_HEAD(&set_type_list);
3976 + res = nf_register_sockopt(&so_set);
3978 + ip_set_printk("SO_SET registry failed: %d", res);
3979 + vfree(ip_set_list);
3980 + vfree(ip_set_hash);
3987 +static void __exit ip_set_fini(void)
3989 + /* There can't be any existing set or binding */
3990 + nf_unregister_sockopt(&so_set);
3991 + vfree(ip_set_list);
3992 + vfree(ip_set_hash);
3993 + DP("these are the famous last words");
3996 +EXPORT_SYMBOL(ip_set_register_set_type);
3997 +EXPORT_SYMBOL(ip_set_unregister_set_type);
3999 +EXPORT_SYMBOL(ip_set_get_byname);
4000 +EXPORT_SYMBOL(ip_set_get_byindex);
4001 +EXPORT_SYMBOL(ip_set_put_byindex);
4002 +EXPORT_SYMBOL(ip_set_id);
4003 +EXPORT_SYMBOL(__ip_set_get_byname);
4004 +EXPORT_SYMBOL(__ip_set_put_byindex);
4006 +EXPORT_SYMBOL(ip_set_addip_kernel);
4007 +EXPORT_SYMBOL(ip_set_delip_kernel);
4008 +EXPORT_SYMBOL(ip_set_testip_kernel);
4010 +module_init(ip_set_init);
4011 +module_exit(ip_set_fini);
4013 +++ b/net/ipv4/netfilter/ip_set_iphash.c
4015 +/* Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
4017 + * This program is free software; you can redistribute it and/or modify
4018 + * it under the terms of the GNU General Public License version 2 as
4019 + * published by the Free Software Foundation.
4022 +/* Kernel module implementing an ip hash set */
4024 +#include <linux/module.h>
4025 +#include <linux/moduleparam.h>
4026 +#include <linux/ip.h>
4027 +#include <linux/skbuff.h>
4028 +#include <linux/netfilter_ipv4/ip_set_jhash.h>
4029 +#include <linux/errno.h>
4030 +#include <asm/uaccess.h>
4031 +#include <asm/bitops.h>
4032 +#include <linux/spinlock.h>
4033 +#include <linux/random.h>
4035 +#include <net/ip.h>
4037 +#include <linux/netfilter_ipv4/ip_set_iphash.h>
4039 +static int limit = MAX_RANGE;
4041 +static inline __u32
4042 +iphash_id(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
4044 + struct ip_set_iphash *map = set->data;
4047 + ip_set_ip_t *elem;
4049 + *hash_ip = ip & map->netmask;
4050 + DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u, %u.%u.%u.%u",
4051 + set->name, HIPQUAD(ip), HIPQUAD(*hash_ip), HIPQUAD(map->netmask));
4053 + for (i = 0; i < map->probes; i++) {
4054 + id = jhash_ip(map, i, *hash_ip) % map->hashsize;
4055 + DP("hash key: %u", id);
4056 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
4057 + if (*elem == *hash_ip)
4059 + /* No shortcut - there can be deleted entries. */
4065 +iphash_test(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
4067 + return (ip && iphash_id(set, hash_ip, ip) != UINT_MAX);
4070 +#define KADT_CONDITION
4073 +KADT(iphash, test, ipaddr)
4076 +__iphash_add(struct ip_set_iphash *map, ip_set_ip_t *ip)
4080 + ip_set_ip_t *elem, *slot = NULL;
4082 + for (i = 0; i < map->probes; i++) {
4083 + probe = jhash_ip(map, i, *ip) % map->hashsize;
4084 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
4087 + if (!(slot || *elem))
4089 + /* There can be deleted entries, must check all slots */
4096 + /* Trigger rehashing */
4101 +iphash_add(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
4103 + struct ip_set_iphash *map = set->data;
4105 + if (!ip || map->elements >= limit)
4108 + *hash_ip = ip & map->netmask;
4110 + return __iphash_add(map, hash_ip);
4114 +KADT(iphash, add, ipaddr)
4117 +__iphash_retry(struct ip_set_iphash *tmp, struct ip_set_iphash *map)
4119 + tmp->netmask = map->netmask;
4122 +HASH_RETRY(iphash, ip_set_ip_t)
4125 +iphash_del(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
4127 + struct ip_set_iphash *map = set->data;
4128 + ip_set_ip_t id, *elem;
4133 + id = iphash_id(set, hash_ip, ip);
4134 + if (id == UINT_MAX)
4137 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
4145 +KADT(iphash, del, ipaddr)
4148 +__iphash_create(const struct ip_set_req_iphash_create *req,
4149 + struct ip_set_iphash *map)
4151 + map->netmask = req->netmask;
4156 +HASH_CREATE(iphash, ip_set_ip_t)
4157 +HASH_DESTROY(iphash)
4159 +HASH_FLUSH(iphash, ip_set_ip_t)
4162 +__iphash_list_header(const struct ip_set_iphash *map,
4163 + struct ip_set_req_iphash_create *header)
4165 + header->netmask = map->netmask;
4168 +HASH_LIST_HEADER(iphash)
4169 +HASH_LIST_MEMBERS_SIZE(iphash, ip_set_ip_t)
4170 +HASH_LIST_MEMBERS(iphash, ip_set_ip_t)
4172 +IP_SET_RTYPE(iphash, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
4174 +MODULE_LICENSE("GPL");
4175 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
4176 +MODULE_DESCRIPTION("iphash type of IP sets");
4177 +module_param(limit, int, 0600);
4178 +MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
4180 +REGISTER_MODULE(iphash)
4182 +++ b/net/ipv4/netfilter/ip_set_ipmap.c
4184 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
4185 + * Patrick Schaaf <bof@bof.de>
4186 + * Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
4188 + * This program is free software; you can redistribute it and/or modify
4189 + * it under the terms of the GNU General Public License version 2 as
4190 + * published by the Free Software Foundation.
4193 +/* Kernel module implementing an IP set type: the single bitmap type */
4195 +#include <linux/module.h>
4196 +#include <linux/ip.h>
4197 +#include <linux/skbuff.h>
4198 +#include <linux/errno.h>
4199 +#include <asm/uaccess.h>
4200 +#include <asm/bitops.h>
4201 +#include <linux/spinlock.h>
4203 +#include <linux/netfilter_ipv4/ip_set_ipmap.h>
4205 +static inline ip_set_ip_t
4206 +ip_to_id(const struct ip_set_ipmap *map, ip_set_ip_t ip)
4208 + return (ip - map->first_ip)/map->hosts;
4212 +ipmap_test(const struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
4214 + const struct ip_set_ipmap *map = set->data;
4216 + if (ip < map->first_ip || ip > map->last_ip)
4219 + *hash_ip = ip & map->netmask;
4220 + DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
4221 + set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
4222 + return !!test_bit(ip_to_id(map, *hash_ip), map->members);
4225 +#define KADT_CONDITION
4228 +KADT(ipmap, test, ipaddr)
4231 +ipmap_add(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
4233 + struct ip_set_ipmap *map = set->data;
4235 + if (ip < map->first_ip || ip > map->last_ip)
4238 + *hash_ip = ip & map->netmask;
4239 + DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
4240 + if (test_and_set_bit(ip_to_id(map, *hash_ip), map->members))
4247 +KADT(ipmap, add, ipaddr)
4250 +ipmap_del(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
4252 + struct ip_set_ipmap *map = set->data;
4254 + if (ip < map->first_ip || ip > map->last_ip)
4257 + *hash_ip = ip & map->netmask;
4258 + DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
4259 + if (!test_and_clear_bit(ip_to_id(map, *hash_ip), map->members))
4266 +KADT(ipmap, del, ipaddr)
4269 +__ipmap_create(const struct ip_set_req_ipmap_create *req,
4270 + struct ip_set_ipmap *map)
4272 + map->netmask = req->netmask;
4274 + if (req->netmask == 0xFFFFFFFF) {
4276 + map->sizeid = map->last_ip - map->first_ip + 1;
4278 + unsigned int mask_bits, netmask_bits;
4281 + map->first_ip &= map->netmask; /* Should we better bark? */
4283 + mask = range_to_mask(map->first_ip, map->last_ip, &mask_bits);
4284 + netmask_bits = mask_to_bits(map->netmask);
4286 + if ((!mask && (map->first_ip || map->last_ip != 0xFFFFFFFF))
4287 + || netmask_bits <= mask_bits)
4290 + DP("mask_bits %u, netmask_bits %u",
4291 + mask_bits, netmask_bits);
4292 + map->hosts = 2 << (32 - netmask_bits - 1);
4293 + map->sizeid = 2 << (netmask_bits - mask_bits - 1);
4295 + if (map->sizeid > MAX_RANGE + 1) {
4296 + ip_set_printk("range too big, %d elements (max %d)",
4297 + map->sizeid, MAX_RANGE+1);
4300 + DP("hosts %u, sizeid %u", map->hosts, map->sizeid);
4301 + return bitmap_bytes(0, map->sizeid - 1);
4304 +BITMAP_CREATE(ipmap)
4305 +BITMAP_DESTROY(ipmap)
4306 +BITMAP_FLUSH(ipmap)
4309 +__ipmap_list_header(const struct ip_set_ipmap *map,
4310 + struct ip_set_req_ipmap_create *header)
4312 + header->netmask = map->netmask;
4315 +BITMAP_LIST_HEADER(ipmap)
4316 +BITMAP_LIST_MEMBERS_SIZE(ipmap)
4317 +BITMAP_LIST_MEMBERS(ipmap)
4319 +IP_SET_TYPE(ipmap, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
4321 +MODULE_LICENSE("GPL");
4322 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
4323 +MODULE_DESCRIPTION("ipmap type of IP sets");
4325 +REGISTER_MODULE(ipmap)
4327 +++ b/net/ipv4/netfilter/ip_set_ipporthash.c
4329 +/* Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
4331 + * This program is free software; you can redistribute it and/or modify
4332 + * it under the terms of the GNU General Public License version 2 as
4333 + * published by the Free Software Foundation.
4336 +/* Kernel module implementing an ip+port hash set */
4338 +#include <linux/module.h>
4339 +#include <linux/moduleparam.h>
4340 +#include <linux/ip.h>
4341 +#include <linux/tcp.h>
4342 +#include <linux/udp.h>
4343 +#include <linux/skbuff.h>
4344 +#include <linux/netfilter_ipv4/ip_set_jhash.h>
4345 +#include <linux/errno.h>
4346 +#include <asm/uaccess.h>
4347 +#include <asm/bitops.h>
4348 +#include <linux/spinlock.h>
4349 +#include <linux/random.h>
4351 +#include <net/ip.h>
4353 +#include <linux/netfilter_ipv4/ip_set_ipporthash.h>
4354 +#include <linux/netfilter_ipv4/ip_set_getport.h>
4356 +static int limit = MAX_RANGE;
4358 +static inline __u32
4359 +ipporthash_id(struct ip_set *set, ip_set_ip_t *hash_ip,
4360 + ip_set_ip_t ip, ip_set_ip_t port)
4362 + struct ip_set_ipporthash *map = set->data;
4365 + ip_set_ip_t *elem;
4367 + *hash_ip = pack_ip_port(map, ip, port);
4369 + DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
4370 + set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
4374 + for (i = 0; i < map->probes; i++) {
4375 + id = jhash_ip(map, i, *hash_ip) % map->hashsize;
4376 + DP("hash key: %u", id);
4377 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
4378 + if (*elem == *hash_ip)
4380 + /* No shortcut - there can be deleted entries. */
4386 +ipporthash_test(struct ip_set *set, ip_set_ip_t *hash_ip,
4387 + ip_set_ip_t ip, ip_set_ip_t port)
4389 + struct ip_set_ipporthash *map = set->data;
4391 + if (ip < map->first_ip || ip > map->last_ip)
4394 + return (ipporthash_id(set, hash_ip, ip, port) != UINT_MAX);
4397 +#define KADT_CONDITION \
4398 + ip_set_ip_t port; \
4400 + if (flags[index+1] == 0) \
4403 + port = get_port(skb, flags[index+1]); \
4405 + if (port == INVALID_PORT) \
4408 +UADT(ipporthash, test, req->port)
4409 +KADT(ipporthash, test, ipaddr, port)
4412 +__ipporthash_add(struct ip_set_ipporthash *map, ip_set_ip_t *ip)
4416 + ip_set_ip_t *elem, *slot = NULL;
4418 + for (i = 0; i < map->probes; i++) {
4419 + probe = jhash_ip(map, i, *ip) % map->hashsize;
4420 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
4423 + if (!(slot || *elem))
4425 + /* There can be deleted entries, must check all slots */
4432 + /* Trigger rehashing */
4437 +ipporthash_add(struct ip_set *set, ip_set_ip_t *hash_ip,
4438 + ip_set_ip_t ip, ip_set_ip_t port)
4440 + struct ip_set_ipporthash *map = set->data;
4441 + if (map->elements > limit)
4443 + if (ip < map->first_ip || ip > map->last_ip)
4446 + *hash_ip = pack_ip_port(map, ip, port);
4451 + return __ipporthash_add(map, hash_ip);
4454 +UADT(ipporthash, add, req->port)
4455 +KADT(ipporthash, add, ipaddr, port)
4458 +__ipporthash_retry(struct ip_set_ipporthash *tmp,
4459 + struct ip_set_ipporthash *map)
4461 + tmp->first_ip = map->first_ip;
4462 + tmp->last_ip = map->last_ip;
4465 +HASH_RETRY(ipporthash, ip_set_ip_t)
4468 +ipporthash_del(struct ip_set *set, ip_set_ip_t *hash_ip,
4469 + ip_set_ip_t ip, ip_set_ip_t port)
4471 + struct ip_set_ipporthash *map = set->data;
4473 + ip_set_ip_t *elem;
4475 + if (ip < map->first_ip || ip > map->last_ip)
4478 + id = ipporthash_id(set, hash_ip, ip, port);
4480 + if (id == UINT_MAX)
4483 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
4490 +UADT(ipporthash, del, req->port)
4491 +KADT(ipporthash, del, ipaddr, port)
4494 +__ipporthash_create(const struct ip_set_req_ipporthash_create *req,
4495 + struct ip_set_ipporthash *map)
4497 + if (req->to - req->from > MAX_RANGE) {
4498 + ip_set_printk("range too big, %d elements (max %d)",
4499 + req->to - req->from + 1, MAX_RANGE+1);
4502 + map->first_ip = req->from;
4503 + map->last_ip = req->to;
4507 +HASH_CREATE(ipporthash, ip_set_ip_t)
4508 +HASH_DESTROY(ipporthash)
4509 +HASH_FLUSH(ipporthash, ip_set_ip_t)
4512 +__ipporthash_list_header(const struct ip_set_ipporthash *map,
4513 + struct ip_set_req_ipporthash_create *header)
4515 + header->from = map->first_ip;
4516 + header->to = map->last_ip;
4519 +HASH_LIST_HEADER(ipporthash)
4520 +HASH_LIST_MEMBERS_SIZE(ipporthash, ip_set_ip_t)
4521 +HASH_LIST_MEMBERS(ipporthash, ip_set_ip_t)
4523 +IP_SET_RTYPE(ipporthash, IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_DATA_DOUBLE)
4525 +MODULE_LICENSE("GPL");
4526 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
4527 +MODULE_DESCRIPTION("ipporthash type of IP sets");
4528 +module_param(limit, int, 0600);
4529 +MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
4531 +REGISTER_MODULE(ipporthash)
4533 +++ b/net/ipv4/netfilter/ip_set_ipportiphash.c
4535 +/* Copyright (C) 2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
4537 + * This program is free software; you can redistribute it and/or modify
4538 + * it under the terms of the GNU General Public License version 2 as
4539 + * published by the Free Software Foundation.
4542 +/* Kernel module implementing an ip+port+ip hash set */
4544 +#include <linux/module.h>
4545 +#include <linux/moduleparam.h>
4546 +#include <linux/ip.h>
4547 +#include <linux/tcp.h>
4548 +#include <linux/udp.h>
4549 +#include <linux/skbuff.h>
4550 +#include <linux/netfilter_ipv4/ip_set_jhash.h>
4551 +#include <linux/errno.h>
4552 +#include <asm/uaccess.h>
4553 +#include <asm/bitops.h>
4554 +#include <linux/spinlock.h>
4555 +#include <linux/random.h>
4557 +#include <net/ip.h>
4559 +#include <linux/netfilter_ipv4/ip_set_ipportiphash.h>
4560 +#include <linux/netfilter_ipv4/ip_set_getport.h>
4562 +static int limit = MAX_RANGE;
4564 +#define jhash_ip2(map, i, ipport, ip1) \
4565 + jhash_2words(ipport, ip1, *(map->initval + i))
4567 +static inline __u32
4568 +ipportiphash_id(struct ip_set *set, ip_set_ip_t *hash_ip,
4569 + ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
4571 + struct ip_set_ipportiphash *map = set->data;
4574 + struct ipportip *elem;
4576 + *hash_ip = pack_ip_port(map, ip, port);
4577 + DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
4578 + set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
4579 + if (!(*hash_ip || ip1))
4582 + for (i = 0; i < map->probes; i++) {
4583 + id = jhash_ip2(map, i, *hash_ip, ip1) % map->hashsize;
4584 + DP("hash key: %u", id);
4585 + elem = HARRAY_ELEM(map->members, struct ipportip *, id);
4586 + if (elem->ip == *hash_ip && elem->ip1 == ip1)
4588 + /* No shortcut - there can be deleted entries. */
4594 +ipportiphash_test(struct ip_set *set, ip_set_ip_t *hash_ip,
4595 + ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
4597 + struct ip_set_ipportiphash *map = set->data;
4599 + if (ip < map->first_ip || ip > map->last_ip)
4602 + return (ipportiphash_id(set, hash_ip, ip, port, ip1) != UINT_MAX);
4605 +#define KADT_CONDITION \
4606 + ip_set_ip_t port, ip1; \
4608 + if (flags[index+2] == 0) \
4611 + port = get_port(skb, flags[index+1]); \
4612 + ip1 = ipaddr(skb, flags[index+2]); \
4614 + if (port == INVALID_PORT) \
4617 +UADT(ipportiphash, test, req->port, req->ip1)
4618 +KADT(ipportiphash, test, ipaddr, port, ip1)
4621 +__ipportip_add(struct ip_set_ipportiphash *map,
4622 + ip_set_ip_t hash_ip, ip_set_ip_t ip1)
4626 + struct ipportip *elem, *slot = NULL;
4628 + for (i = 0; i < map->probes; i++) {
4629 + probe = jhash_ip2(map, i, hash_ip, ip1) % map->hashsize;
4630 + elem = HARRAY_ELEM(map->members, struct ipportip *, probe);
4631 + if (elem->ip == hash_ip && elem->ip1 == ip1)
4633 + if (!(slot || elem->ip || elem->ip1))
4635 + /* There can be deleted entries, must check all slots */
4638 + slot->ip = hash_ip;
4643 + /* Trigger rehashing */
4648 +__ipportiphash_add(struct ip_set_ipportiphash *map,
4649 + struct ipportip *elem)
4651 + return __ipportip_add(map, elem->ip, elem->ip1);
4655 +ipportiphash_add(struct ip_set *set, ip_set_ip_t *hash_ip,
4656 + ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
4658 + struct ip_set_ipportiphash *map = set->data;
4660 + if (map->elements > limit)
4662 + if (ip < map->first_ip || ip > map->last_ip)
4665 + *hash_ip = pack_ip_port(map, ip, port);
4666 + if (!(*hash_ip || ip1))
4669 + return __ipportip_add(map, *hash_ip, ip1);
4672 +UADT(ipportiphash, add, req->port, req->ip1)
4673 +KADT(ipportiphash, add, ipaddr, port, ip1)
4676 +__ipportiphash_retry(struct ip_set_ipportiphash *tmp,
4677 + struct ip_set_ipportiphash *map)
4679 + tmp->first_ip = map->first_ip;
4680 + tmp->last_ip = map->last_ip;
4683 +HASH_RETRY2(ipportiphash, struct ipportip)
4686 +ipportiphash_del(struct ip_set *set, ip_set_ip_t *hash_ip,
4687 + ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
4689 + struct ip_set_ipportiphash *map = set->data;
4691 + struct ipportip *elem;
4693 + if (ip < map->first_ip || ip > map->last_ip)
4696 + id = ipportiphash_id(set, hash_ip, ip, port, ip1);
4698 + if (id == UINT_MAX)
4701 + elem = HARRAY_ELEM(map->members, struct ipportip *, id);
4702 + elem->ip = elem->ip1 = 0;
4708 +UADT(ipportiphash, del, req->port, req->ip1)
4709 +KADT(ipportiphash, del, ipaddr, port, ip1)
4712 +__ipportiphash_create(const struct ip_set_req_ipportiphash_create *req,
4713 + struct ip_set_ipportiphash *map)
4715 + if (req->to - req->from > MAX_RANGE) {
4716 + ip_set_printk("range too big, %d elements (max %d)",
4717 + req->to - req->from + 1, MAX_RANGE+1);
4720 + map->first_ip = req->from;
4721 + map->last_ip = req->to;
4725 +HASH_CREATE(ipportiphash, struct ipportip)
4726 +HASH_DESTROY(ipportiphash)
4727 +HASH_FLUSH(ipportiphash, struct ipportip)
4730 +__ipportiphash_list_header(const struct ip_set_ipportiphash *map,
4731 + struct ip_set_req_ipportiphash_create *header)
4733 + header->from = map->first_ip;
4734 + header->to = map->last_ip;
4737 +HASH_LIST_HEADER(ipportiphash)
4738 +HASH_LIST_MEMBERS_SIZE(ipportiphash, struct ipportip)
4739 +HASH_LIST_MEMBERS_MEMCPY(ipportiphash, struct ipportip)
4741 +IP_SET_RTYPE(ipportiphash, IPSET_TYPE_IP | IPSET_TYPE_PORT
4742 + | IPSET_TYPE_IP1 | IPSET_DATA_TRIPLE)
4744 +MODULE_LICENSE("GPL");
4745 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
4746 +MODULE_DESCRIPTION("ipportiphash type of IP sets");
4747 +module_param(limit, int, 0600);
4748 +MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
4750 +REGISTER_MODULE(ipportiphash)
4752 +++ b/net/ipv4/netfilter/ip_set_ipportnethash.c
4754 +/* Copyright (C) 2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
4756 + * This program is free software; you can redistribute it and/or modify
4757 + * it under the terms of the GNU General Public License version 2 as
4758 + * published by the Free Software Foundation.
4761 +/* Kernel module implementing an ip+port+net hash set */
4763 +#include <linux/module.h>
4764 +#include <linux/moduleparam.h>
4765 +#include <linux/ip.h>
4766 +#include <linux/tcp.h>
4767 +#include <linux/udp.h>
4768 +#include <linux/skbuff.h>
4769 +#include <linux/netfilter_ipv4/ip_set_jhash.h>
4770 +#include <linux/errno.h>
4771 +#include <asm/uaccess.h>
4772 +#include <asm/bitops.h>
4773 +#include <linux/spinlock.h>
4774 +#include <linux/random.h>
4776 +#include <net/ip.h>
4778 +#include <linux/netfilter_ipv4/ip_set_ipportnethash.h>
4779 +#include <linux/netfilter_ipv4/ip_set_getport.h>
4781 +static int limit = MAX_RANGE;
4783 +#define jhash_ip2(map, i, ipport, ip1) \
4784 + jhash_2words(ipport, ip1, *(map->initval + i))
4786 +static inline __u32
4787 +ipportnethash_id_cidr(struct ip_set *set, ip_set_ip_t *hash_ip,
4788 + ip_set_ip_t ip, ip_set_ip_t port,
4789 + ip_set_ip_t ip1, uint8_t cidr)
4791 + struct ip_set_ipportnethash *map = set->data;
4794 + struct ipportip *elem;
4796 + *hash_ip = pack_ip_port(map, ip, port);
4797 + DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
4798 + set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
4799 + ip1 = pack_ip_cidr(ip1, cidr);
4800 + if (!(*hash_ip || ip1))
4803 + for (i = 0; i < map->probes; i++) {
4804 + id = jhash_ip2(map, i, *hash_ip, ip1) % map->hashsize;
4805 + DP("hash key: %u", id);
4806 + elem = HARRAY_ELEM(map->members, struct ipportip *, id);
4807 + if (elem->ip == *hash_ip && elem->ip1 == ip1)
4809 + /* No shortcut - there can be deleted entries. */
4814 +static inline __u32
4815 +ipportnethash_id(struct ip_set *set, ip_set_ip_t *hash_ip,
4816 + ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
4818 + struct ip_set_ipportnethash *map = set->data;
4819 + __u32 id = UINT_MAX;
4822 + for (i = 0; i < 30 && map->cidr[i]; i++) {
4823 + id = ipportnethash_id_cidr(set, hash_ip, ip, port, ip1,
4825 + if (id != UINT_MAX)
4832 +ipportnethash_test_cidr(struct ip_set *set, ip_set_ip_t *hash_ip,
4833 + ip_set_ip_t ip, ip_set_ip_t port,
4834 + ip_set_ip_t ip1, uint8_t cidr)
4836 + struct ip_set_ipportnethash *map = set->data;
4838 + if (ip < map->first_ip || ip > map->last_ip)
4841 + return (ipportnethash_id_cidr(set, hash_ip, ip, port, ip1,
4842 + cidr) != UINT_MAX);
4846 +ipportnethash_test(struct ip_set *set, ip_set_ip_t *hash_ip,
4847 + ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
4849 + struct ip_set_ipportnethash *map = set->data;
4851 + if (ip < map->first_ip || ip > map->last_ip)
4854 + return (ipportnethash_id(set, hash_ip, ip, port, ip1) != UINT_MAX);
4858 +ipportnethash_utest(struct ip_set *set, const void *data, u_int32_t size,
4859 + ip_set_ip_t *hash_ip)
4861 + const struct ip_set_req_ipportnethash *req = data;
4863 + if (req->cidr <= 0 || req->cidr > 32)
4865 + return (req->cidr == 32
4866 + ? ipportnethash_test(set, hash_ip, req->ip, req->port,
4868 + : ipportnethash_test_cidr(set, hash_ip, req->ip, req->port,
4869 + req->ip1, req->cidr));
4872 +#define KADT_CONDITION \
4873 + ip_set_ip_t port, ip1; \
4875 + if (flags[index+2] == 0) \
4878 + port = get_port(skb, flags[index+1]); \
4879 + ip1 = ipaddr(skb, flags[index+2]); \
4881 + if (port == INVALID_PORT) \
4884 +KADT(ipportnethash, test, ipaddr, port, ip1)
4887 +__ipportnet_add(struct ip_set_ipportnethash *map,
4888 + ip_set_ip_t hash_ip, ip_set_ip_t ip1)
4892 + struct ipportip *elem, *slot = NULL;
4894 + for (i = 0; i < map->probes; i++) {
4895 + probe = jhash_ip2(map, i, hash_ip, ip1) % map->hashsize;
4896 + elem = HARRAY_ELEM(map->members, struct ipportip *, probe);
4897 + if (elem->ip == hash_ip && elem->ip1 == ip1)
4899 + if (!(slot || elem->ip || elem->ip1))
4901 + /* There can be deleted entries, must check all slots */
4904 + slot->ip = hash_ip;
4909 + /* Trigger rehashing */
4914 +__ipportnethash_add(struct ip_set_ipportnethash *map,
4915 + struct ipportip *elem)
4917 + return __ipportnet_add(map, elem->ip, elem->ip1);
4921 +ipportnethash_add(struct ip_set *set, ip_set_ip_t *hash_ip,
4922 + ip_set_ip_t ip, ip_set_ip_t port,
4923 + ip_set_ip_t ip1, uint8_t cidr)
4925 + struct ip_set_ipportnethash *map = set->data;
4929 + if (map->elements > limit)
4931 + if (ip < map->first_ip || ip > map->last_ip)
4933 + if (cidr <= 0 || cidr >= 32)
4935 + if (map->nets[cidr-1] == UINT16_MAX)
4938 + *hash_ip = pack_ip_port(map, ip, port);
4939 + ip1 = pack_ip_cidr(ip1, cidr);
4940 + if (!(*hash_ip || ip1))
4943 + ret =__ipportnet_add(map, *hash_ip, ip1);
4945 + if (!map->nets[cidr-1]++)
4946 + add_cidr_size(map->cidr, cidr);
4952 +#undef KADT_CONDITION
4953 +#define KADT_CONDITION \
4954 + struct ip_set_ipportnethash *map = set->data; \
4955 + uint8_t cidr = map->cidr[0] ? map->cidr[0] : 31; \
4956 + ip_set_ip_t port, ip1; \
4958 + if (flags[index+2] == 0) \
4961 + port = get_port(skb, flags[index+1]); \
4962 + ip1 = ipaddr(skb, flags[index+2]); \
4964 + if (port == INVALID_PORT) \
4967 +UADT(ipportnethash, add, req->port, req->ip1, req->cidr)
4968 +KADT(ipportnethash, add, ipaddr, port, ip1, cidr)
4971 +__ipportnethash_retry(struct ip_set_ipportnethash *tmp,
4972 + struct ip_set_ipportnethash *map)
4974 + tmp->first_ip = map->first_ip;
4975 + tmp->last_ip = map->last_ip;
4976 + memcpy(tmp->cidr, map->cidr, sizeof(tmp->cidr));
4977 + memcpy(tmp->nets, map->nets, sizeof(tmp->nets));
4980 +HASH_RETRY2(ipportnethash, struct ipportip)
4983 +ipportnethash_del(struct ip_set *set, ip_set_ip_t *hash_ip,
4984 + ip_set_ip_t ip, ip_set_ip_t port,
4985 + ip_set_ip_t ip1, uint8_t cidr)
4987 + struct ip_set_ipportnethash *map = set->data;
4989 + struct ipportip *elem;
4991 + if (ip < map->first_ip || ip > map->last_ip)
4995 + if (cidr <= 0 || cidr >= 32)
4998 + id = ipportnethash_id_cidr(set, hash_ip, ip, port, ip1, cidr);
5000 + if (id == UINT_MAX)
5003 + elem = HARRAY_ELEM(map->members, struct ipportip *, id);
5004 + elem->ip = elem->ip1 = 0;
5006 + if (!map->nets[cidr-1]--)
5007 + del_cidr_size(map->cidr, cidr);
5012 +UADT(ipportnethash, del, req->port, req->ip1, req->cidr)
5013 +KADT(ipportnethash, del, ipaddr, port, ip1, cidr)
5016 +__ipportnethash_create(const struct ip_set_req_ipportnethash_create *req,
5017 + struct ip_set_ipportnethash *map)
5019 + if (req->to - req->from > MAX_RANGE) {
5020 + ip_set_printk("range too big, %d elements (max %d)",
5021 + req->to - req->from + 1, MAX_RANGE+1);
5024 + map->first_ip = req->from;
5025 + map->last_ip = req->to;
5026 + memset(map->cidr, 0, sizeof(map->cidr));
5027 + memset(map->nets, 0, sizeof(map->nets));
5031 +HASH_CREATE(ipportnethash, struct ipportip)
5032 +HASH_DESTROY(ipportnethash)
5033 +HASH_FLUSH_CIDR(ipportnethash, struct ipportip);
5036 +__ipportnethash_list_header(const struct ip_set_ipportnethash *map,
5037 + struct ip_set_req_ipportnethash_create *header)
5039 + header->from = map->first_ip;
5040 + header->to = map->last_ip;
5043 +HASH_LIST_HEADER(ipportnethash)
5045 +HASH_LIST_MEMBERS_SIZE(ipportnethash, struct ipportip)
5046 +HASH_LIST_MEMBERS_MEMCPY(ipportnethash, struct ipportip)
5048 +IP_SET_RTYPE(ipportnethash, IPSET_TYPE_IP | IPSET_TYPE_PORT
5049 + | IPSET_TYPE_IP1 | IPSET_DATA_TRIPLE)
5051 +MODULE_LICENSE("GPL");
5052 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
5053 +MODULE_DESCRIPTION("ipportnethash type of IP sets");
5054 +module_param(limit, int, 0600);
5055 +MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
5057 +REGISTER_MODULE(ipportnethash)
5059 +++ b/net/ipv4/netfilter/ip_set_iptree.c
5061 +/* Copyright (C) 2005-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
5063 + * This program is free software; you can redistribute it and/or modify
5064 + * it under the terms of the GNU General Public License version 2 as
5065 + * published by the Free Software Foundation.
5068 +/* Kernel module implementing an IP set type: the iptree type */
5070 +#include <linux/module.h>
5071 +#include <linux/moduleparam.h>
5072 +#include <linux/ip.h>
5073 +#include <linux/skbuff.h>
5074 +#include <linux/slab.h>
5075 +#include <linux/delay.h>
5076 +#include <linux/errno.h>
5077 +#include <asm/uaccess.h>
5078 +#include <asm/bitops.h>
5079 +#include <linux/spinlock.h>
5080 +#include <linux/timer.h>
5082 +#include <linux/netfilter_ipv4/ip_set.h>
5083 +#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
5084 +#include <linux/netfilter_ipv4/ip_set_iptree.h>
5086 +static int limit = MAX_RANGE;
5088 +/* Garbage collection interval in seconds: */
5089 +#define IPTREE_GC_TIME 5*60
5090 +/* Sleep so many milliseconds before trying again
5091 + * to delete the gc timer at destroying/flushing a set */
5092 +#define IPTREE_DESTROY_SLEEP 100
5094 +static __KMEM_CACHE_T__ *branch_cachep;
5095 +static __KMEM_CACHE_T__ *leaf_cachep;
5098 +#if defined(__LITTLE_ENDIAN)
5099 +#define ABCD(a,b,c,d,addrp) do { \
5100 + a = ((unsigned char *)addrp)[3]; \
5101 + b = ((unsigned char *)addrp)[2]; \
5102 + c = ((unsigned char *)addrp)[1]; \
5103 + d = ((unsigned char *)addrp)[0]; \
5105 +#elif defined(__BIG_ENDIAN)
5106 +#define ABCD(a,b,c,d,addrp) do { \
5107 + a = ((unsigned char *)addrp)[0]; \
5108 + b = ((unsigned char *)addrp)[1]; \
5109 + c = ((unsigned char *)addrp)[2]; \
5110 + d = ((unsigned char *)addrp)[3]; \
5113 +#error "Please fix asm/byteorder.h"
5114 +#endif /* __LITTLE_ENDIAN */
5116 +#define TESTIP_WALK(map, elem, branch) do { \
5117 + if ((map)->tree[elem]) { \
5118 + branch = (map)->tree[elem]; \
5124 +iptree_test(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
5126 + struct ip_set_iptree *map = set->data;
5127 + struct ip_set_iptreeb *btree;
5128 + struct ip_set_iptreec *ctree;
5129 + struct ip_set_iptreed *dtree;
5130 + unsigned char a,b,c,d;
5136 + ABCD(a, b, c, d, hash_ip);
5137 + DP("%u %u %u %u timeout %u", a, b, c, d, map->timeout);
5138 + TESTIP_WALK(map, a, btree);
5139 + TESTIP_WALK(btree, b, ctree);
5140 + TESTIP_WALK(ctree, c, dtree);
5141 + DP("%lu %lu", dtree->expires[d], jiffies);
5142 + return dtree->expires[d]
5144 + || time_after(dtree->expires[d], jiffies));
5147 +#define KADT_CONDITION
5150 +KADT(iptree, test, ipaddr)
5152 +#define ADDIP_WALK(map, elem, branch, type, cachep) do { \
5153 + if ((map)->tree[elem]) { \
5154 + DP("found %u", elem); \
5155 + branch = (map)->tree[elem]; \
5157 + branch = (type *) \
5158 + kmem_cache_alloc(cachep, GFP_ATOMIC); \
5159 + if (branch == NULL) \
5161 + memset(branch, 0, sizeof(*branch)); \
5162 + (map)->tree[elem] = branch; \
5163 + DP("alloc %u", elem); \
5168 +iptree_add(struct ip_set *set, ip_set_ip_t *hash_ip,
5169 + ip_set_ip_t ip, unsigned int timeout)
5171 + struct ip_set_iptree *map = set->data;
5172 + struct ip_set_iptreeb *btree;
5173 + struct ip_set_iptreec *ctree;
5174 + struct ip_set_iptreed *dtree;
5175 + unsigned char a,b,c,d;
5178 + if (!ip || map->elements >= limit)
5179 + /* We could call the garbage collector
5180 + * but it's probably overkill */
5184 + ABCD(a, b, c, d, hash_ip);
5185 + DP("%u %u %u %u timeout %u", a, b, c, d, timeout);
5186 + ADDIP_WALK(map, a, btree, struct ip_set_iptreeb, branch_cachep);
5187 + ADDIP_WALK(btree, b, ctree, struct ip_set_iptreec, branch_cachep);
5188 + ADDIP_WALK(ctree, c, dtree, struct ip_set_iptreed, leaf_cachep);
5189 + if (dtree->expires[d]
5190 + && (!map->timeout || time_after(dtree->expires[d], jiffies)))
5192 + if (map->timeout && timeout == 0)
5193 + timeout = map->timeout;
5194 + dtree->expires[d] = map->timeout ? (timeout * HZ + jiffies) : 1;
5195 + /* Lottery: I won! */
5196 + if (dtree->expires[d] == 0)
5197 + dtree->expires[d] = 1;
5198 + DP("%u %lu", d, dtree->expires[d]);
5204 +UADT(iptree, add, req->timeout)
5205 +KADT(iptree, add, ipaddr, 0)
5207 +#define DELIP_WALK(map, elem, branch) do { \
5208 + if ((map)->tree[elem]) { \
5209 + branch = (map)->tree[elem]; \
5215 +iptree_del(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
5217 + struct ip_set_iptree *map = set->data;
5218 + struct ip_set_iptreeb *btree;
5219 + struct ip_set_iptreec *ctree;
5220 + struct ip_set_iptreed *dtree;
5221 + unsigned char a,b,c,d;
5227 + ABCD(a, b, c, d, hash_ip);
5228 + DELIP_WALK(map, a, btree);
5229 + DELIP_WALK(btree, b, ctree);
5230 + DELIP_WALK(ctree, c, dtree);
5232 + if (dtree->expires[d]) {
5233 + dtree->expires[d] = 0;
5241 +KADT(iptree, del, ipaddr)
5243 +#define LOOP_WALK_BEGIN(map, i, branch) \
5244 + for (i = 0; i < 256; i++) { \
5245 + if (!(map)->tree[i]) \
5247 + branch = (map)->tree[i]
5249 +#define LOOP_WALK_END }
5252 +ip_tree_gc(unsigned long ul_set)
5254 + struct ip_set *set = (struct ip_set *) ul_set;
5255 + struct ip_set_iptree *map = set->data;
5256 + struct ip_set_iptreeb *btree;
5257 + struct ip_set_iptreec *ctree;
5258 + struct ip_set_iptreed *dtree;
5259 + unsigned int a,b,c,d;
5260 + unsigned char i,j,k;
5263 + DP("gc: %s", set->name);
5264 + write_lock_bh(&set->lock);
5265 + LOOP_WALK_BEGIN(map, a, btree);
5266 + LOOP_WALK_BEGIN(btree, b, ctree);
5267 + LOOP_WALK_BEGIN(ctree, c, dtree);
5268 + for (d = 0; d < 256; d++) {
5269 + if (dtree->expires[d]) {
5270 + DP("gc: %u %u %u %u: expires %lu jiffies %lu",
5272 + dtree->expires[d], jiffies);
5274 + && time_before(dtree->expires[d], jiffies)) {
5275 + dtree->expires[d] = 0;
5282 + DP("gc: %s: leaf %u %u %u empty",
5283 + set->name, a, b, c);
5284 + kmem_cache_free(leaf_cachep, dtree);
5285 + ctree->tree[c] = NULL;
5287 + DP("gc: %s: leaf %u %u %u not empty",
5288 + set->name, a, b, c);
5294 + DP("gc: %s: branch %u %u empty",
5296 + kmem_cache_free(branch_cachep, ctree);
5297 + btree->tree[b] = NULL;
5299 + DP("gc: %s: branch %u %u not empty",
5306 + DP("gc: %s: branch %u empty",
5308 + kmem_cache_free(branch_cachep, btree);
5309 + map->tree[a] = NULL;
5311 + DP("gc: %s: branch %u not empty",
5316 + write_unlock_bh(&set->lock);
5318 + map->gc.expires = jiffies + map->gc_interval * HZ;
5319 + add_timer(&map->gc);
5323 +init_gc_timer(struct ip_set *set)
5325 + struct ip_set_iptree *map = set->data;
5327 + /* Even if there is no timeout for the entries,
5328 + * we still have to call gc because delete
5329 + * do not clean up empty branches */
5330 + map->gc_interval = IPTREE_GC_TIME;
5331 + init_timer(&map->gc);
5332 + map->gc.data = (unsigned long) set;
5333 + map->gc.function = ip_tree_gc;
5334 + map->gc.expires = jiffies + map->gc_interval * HZ;
5335 + add_timer(&map->gc);
5339 +iptree_create(struct ip_set *set, const void *data, u_int32_t size)
5341 + const struct ip_set_req_iptree_create *req = data;
5342 + struct ip_set_iptree *map;
5344 + if (size != sizeof(struct ip_set_req_iptree_create)) {
5345 + ip_set_printk("data length wrong (want %zu, have %lu)",
5346 + sizeof(struct ip_set_req_iptree_create),
5347 + (unsigned long)size);
5351 + map = kmalloc(sizeof(struct ip_set_iptree), GFP_KERNEL);
5353 + DP("out of memory for %zu bytes",
5354 + sizeof(struct ip_set_iptree));
5357 + memset(map, 0, sizeof(*map));
5358 + map->timeout = req->timeout;
5359 + map->elements = 0;
5362 + init_gc_timer(set);
5368 +__flush(struct ip_set_iptree *map)
5370 + struct ip_set_iptreeb *btree;
5371 + struct ip_set_iptreec *ctree;
5372 + struct ip_set_iptreed *dtree;
5373 + unsigned int a,b,c;
5375 + LOOP_WALK_BEGIN(map, a, btree);
5376 + LOOP_WALK_BEGIN(btree, b, ctree);
5377 + LOOP_WALK_BEGIN(ctree, c, dtree);
5378 + kmem_cache_free(leaf_cachep, dtree);
5380 + kmem_cache_free(branch_cachep, ctree);
5382 + kmem_cache_free(branch_cachep, btree);
5384 + map->elements = 0;
5388 +iptree_destroy(struct ip_set *set)
5390 + struct ip_set_iptree *map = set->data;
5392 + /* gc might be running */
5393 + while (!del_timer(&map->gc))
5394 + msleep(IPTREE_DESTROY_SLEEP);
5401 +iptree_flush(struct ip_set *set)
5403 + struct ip_set_iptree *map = set->data;
5404 + unsigned int timeout = map->timeout;
5406 + /* gc might be running */
5407 + while (!del_timer(&map->gc))
5408 + msleep(IPTREE_DESTROY_SLEEP);
5410 + memset(map, 0, sizeof(*map));
5411 + map->timeout = timeout;
5413 + init_gc_timer(set);
5417 +iptree_list_header(const struct ip_set *set, void *data)
5419 + const struct ip_set_iptree *map = set->data;
5420 + struct ip_set_req_iptree_create *header = data;
5422 + header->timeout = map->timeout;
5426 +iptree_list_members_size(const struct ip_set *set)
5428 + const struct ip_set_iptree *map = set->data;
5429 + struct ip_set_iptreeb *btree;
5430 + struct ip_set_iptreec *ctree;
5431 + struct ip_set_iptreed *dtree;
5432 + unsigned int a,b,c,d;
5433 + unsigned int count = 0;
5435 + LOOP_WALK_BEGIN(map, a, btree);
5436 + LOOP_WALK_BEGIN(btree, b, ctree);
5437 + LOOP_WALK_BEGIN(ctree, c, dtree);
5438 + for (d = 0; d < 256; d++) {
5439 + if (dtree->expires[d]
5440 + && (!map->timeout || time_after(dtree->expires[d], jiffies)))
5447 + DP("members %u", count);
5448 + return (count * sizeof(struct ip_set_req_iptree));
5452 +iptree_list_members(const struct ip_set *set, void *data)
5454 + const struct ip_set_iptree *map = set->data;
5455 + struct ip_set_iptreeb *btree;
5456 + struct ip_set_iptreec *ctree;
5457 + struct ip_set_iptreed *dtree;
5458 + unsigned int a,b,c,d;
5459 + size_t offset = 0;
5460 + struct ip_set_req_iptree *entry;
5462 + LOOP_WALK_BEGIN(map, a, btree);
5463 + LOOP_WALK_BEGIN(btree, b, ctree);
5464 + LOOP_WALK_BEGIN(ctree, c, dtree);
5465 + for (d = 0; d < 256; d++) {
5466 + if (dtree->expires[d]
5467 + && (!map->timeout || time_after(dtree->expires[d], jiffies))) {
5468 + entry = data + offset;
5469 + entry->ip = ((a << 24) | (b << 16) | (c << 8) | d);
5470 + entry->timeout = !map->timeout ? 0
5471 + : (dtree->expires[d] - jiffies)/HZ;
5472 + offset += sizeof(struct ip_set_req_iptree);
5480 +IP_SET_TYPE(iptree, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
5482 +MODULE_LICENSE("GPL");
5483 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
5484 +MODULE_DESCRIPTION("iptree type of IP sets");
5485 +module_param(limit, int, 0600);
5486 +MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
5488 +static int __init ip_set_iptree_init(void)
5492 + branch_cachep = KMEM_CACHE_CREATE("ip_set_iptreeb",
5493 + sizeof(struct ip_set_iptreeb));
5494 + if (!branch_cachep) {
5495 + printk(KERN_ERR "Unable to create ip_set_iptreeb slab cache\n");
5499 + leaf_cachep = KMEM_CACHE_CREATE("ip_set_iptreed",
5500 + sizeof(struct ip_set_iptreed));
5501 + if (!leaf_cachep) {
5502 + printk(KERN_ERR "Unable to create ip_set_iptreed slab cache\n");
5506 + ret = ip_set_register_set_type(&ip_set_iptree);
5510 + kmem_cache_destroy(leaf_cachep);
5512 + kmem_cache_destroy(branch_cachep);
5517 +static void __exit ip_set_iptree_fini(void)
5519 + /* FIXME: possible race with ip_set_create() */
5520 + ip_set_unregister_set_type(&ip_set_iptree);
5521 + kmem_cache_destroy(leaf_cachep);
5522 + kmem_cache_destroy(branch_cachep);
5525 +module_init(ip_set_iptree_init);
5526 +module_exit(ip_set_iptree_fini);
5528 +++ b/net/ipv4/netfilter/ip_set_iptreemap.c
5530 +/* Copyright (C) 2007 Sven Wegener <sven.wegener@stealer.net>
5532 + * This program is free software; you can redistribute it and/or modify it
5533 + * under the terms of the GNU General Public License version 2 as published by
5534 + * the Free Software Foundation.
5537 +/* This modules implements the iptreemap ipset type. It uses bitmaps to
5538 + * represent every single IPv4 address as a bit. The bitmaps are managed in a
5539 + * tree structure, where the first three octets of an address are used as an
5540 + * index to find the bitmap and the last octet is used as the bit number.
5543 +#include <linux/kernel.h>
5544 +#include <linux/module.h>
5545 +#include <linux/ip.h>
5546 +#include <linux/skbuff.h>
5547 +#include <linux/slab.h>
5548 +#include <linux/delay.h>
5549 +#include <linux/errno.h>
5550 +#include <asm/uaccess.h>
5551 +#include <asm/bitops.h>
5552 +#include <linux/spinlock.h>
5553 +#include <linux/timer.h>
5555 +#include <linux/netfilter_ipv4/ip_set.h>
5556 +#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
5557 +#include <linux/netfilter_ipv4/ip_set_iptreemap.h>
5559 +#define IPTREEMAP_DEFAULT_GC_TIME (5 * 60)
5560 +#define IPTREEMAP_DESTROY_SLEEP (100)
5562 +static __KMEM_CACHE_T__ *cachep_b;
5563 +static __KMEM_CACHE_T__ *cachep_c;
5564 +static __KMEM_CACHE_T__ *cachep_d;
5566 +static struct ip_set_iptreemap_d *fullbitmap_d;
5567 +static struct ip_set_iptreemap_c *fullbitmap_c;
5568 +static struct ip_set_iptreemap_b *fullbitmap_b;
5570 +#if defined(__LITTLE_ENDIAN)
5571 +#define ABCD(a, b, c, d, addr) \
5573 + a = ((unsigned char *)addr)[3]; \
5574 + b = ((unsigned char *)addr)[2]; \
5575 + c = ((unsigned char *)addr)[1]; \
5576 + d = ((unsigned char *)addr)[0]; \
5578 +#elif defined(__BIG_ENDIAN)
5579 +#define ABCD(a,b,c,d,addrp) do { \
5580 + a = ((unsigned char *)addrp)[0]; \
5581 + b = ((unsigned char *)addrp)[1]; \
5582 + c = ((unsigned char *)addrp)[2]; \
5583 + d = ((unsigned char *)addrp)[3]; \
5586 +#error "Please fix asm/byteorder.h"
5587 +#endif /* __LITTLE_ENDIAN */
5589 +#define TESTIP_WALK(map, elem, branch, full) \
5591 + branch = (map)->tree[elem]; \
5594 + else if (branch == full) \
5598 +#define ADDIP_WALK(map, elem, branch, type, cachep, full) \
5600 + branch = (map)->tree[elem]; \
5602 + branch = (type *) kmem_cache_alloc(cachep, GFP_ATOMIC); \
5605 + memset(branch, 0, sizeof(*branch)); \
5606 + (map)->tree[elem] = branch; \
5607 + } else if (branch == full) { \
5612 +#define ADDIP_RANGE_LOOP(map, a, a1, a2, hint, branch, full, cachep, free) \
5613 + for (a = a1; a <= a2; a++) { \
5614 + branch = (map)->tree[a]; \
5615 + if (branch != full) { \
5616 + if ((a > a1 && a < a2) || (hint)) { \
5619 + (map)->tree[a] = full; \
5621 + } else if (!branch) { \
5622 + branch = kmem_cache_alloc(cachep, GFP_ATOMIC); \
5625 + memset(branch, 0, sizeof(*branch)); \
5626 + (map)->tree[a] = branch; \
5629 +#define ADDIP_RANGE_LOOP_END() \
5633 +#define DELIP_WALK(map, elem, branch, cachep, full, flags) \
5635 + branch = (map)->tree[elem]; \
5638 + } else if (branch == full) { \
5639 + branch = kmem_cache_alloc(cachep, flags); \
5642 + memcpy(branch, full, sizeof(*full)); \
5643 + (map)->tree[elem] = branch; \
5647 +#define DELIP_RANGE_LOOP(map, a, a1, a2, hint, branch, full, cachep, free, flags) \
5648 + for (a = a1; a <= a2; a++) { \
5649 + branch = (map)->tree[a]; \
5651 + if ((a > a1 && a < a2) || (hint)) { \
5652 + if (branch != full) \
5654 + (map)->tree[a] = NULL; \
5656 + } else if (branch == full) { \
5657 + branch = kmem_cache_alloc(cachep, flags); \
5660 + memcpy(branch, full, sizeof(*branch)); \
5661 + (map)->tree[a] = branch; \
5664 +#define DELIP_RANGE_LOOP_END() \
5668 +#define LOOP_WALK_BEGIN(map, i, branch) \
5669 + for (i = 0; i < 256; i++) { \
5670 + branch = (map)->tree[i]; \
5671 + if (likely(!branch)) \
5674 +#define LOOP_WALK_END() \
5677 +#define LOOP_WALK_BEGIN_GC(map, i, branch, full, cachep, count) \
5679 + for (i = 0; i < 256; i++) { \
5680 + branch = (map)->tree[i]; \
5681 + if (likely(!branch)) \
5684 + if (branch == full) { \
5689 +#define LOOP_WALK_END_GC(map, i, branch, full, cachep, count) \
5690 + if (-256 == count) { \
5691 + kmem_cache_free(cachep, branch); \
5692 + (map)->tree[i] = NULL; \
5693 + } else if (256 == count) { \
5694 + kmem_cache_free(cachep, branch); \
5695 + (map)->tree[i] = full; \
5699 +#define LOOP_WALK_BEGIN_COUNT(map, i, branch, inrange, count) \
5700 + for (i = 0; i < 256; i++) { \
5701 + if (!(map)->tree[i]) { \
5708 + branch = (map)->tree[i];
5710 +#define LOOP_WALK_END_COUNT() \
5713 +#define GETVALUE1(a, a1, b1, r) \
5714 + (a == a1 ? b1 : r)
5716 +#define GETVALUE2(a, b, a1, b1, c1, r) \
5717 + (a == a1 && b == b1 ? c1 : r)
5719 +#define GETVALUE3(a, b, c, a1, b1, c1, d1, r) \
5720 + (a == a1 && b == b1 && c == c1 ? d1 : r)
5722 +#define CHECK1(a, a1, a2, b1, b2, c1, c2, d1, d2) \
5724 + GETVALUE1(a, a1, b1, 0) == 0 \
5725 + && GETVALUE1(a, a2, b2, 255) == 255 \
5732 +#define CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2) \
5734 + GETVALUE2(a, b, a1, b1, c1, 0) == 0 \
5735 + && GETVALUE2(a, b, a2, b2, c2, 255) == 255 \
5740 +#define CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2) \
5742 + GETVALUE3(a, b, c, a1, b1, c1, d1, 0) == 0 \
5743 + && GETVALUE3(a, b, c, a2, b2, c2, d2, 255) == 255 \
5748 +free_d(struct ip_set_iptreemap_d *map)
5750 + kmem_cache_free(cachep_d, map);
5754 +free_c(struct ip_set_iptreemap_c *map)
5756 + struct ip_set_iptreemap_d *dtree;
5759 + LOOP_WALK_BEGIN(map, i, dtree) {
5760 + if (dtree != fullbitmap_d)
5762 + } LOOP_WALK_END();
5764 + kmem_cache_free(cachep_c, map);
5768 +free_b(struct ip_set_iptreemap_b *map)
5770 + struct ip_set_iptreemap_c *ctree;
5773 + LOOP_WALK_BEGIN(map, i, ctree) {
5774 + if (ctree != fullbitmap_c)
5776 + } LOOP_WALK_END();
5778 + kmem_cache_free(cachep_b, map);
5782 +iptreemap_test(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
5784 + struct ip_set_iptreemap *map = set->data;
5785 + struct ip_set_iptreemap_b *btree;
5786 + struct ip_set_iptreemap_c *ctree;
5787 + struct ip_set_iptreemap_d *dtree;
5788 + unsigned char a, b, c, d;
5792 + ABCD(a, b, c, d, hash_ip);
5794 + TESTIP_WALK(map, a, btree, fullbitmap_b);
5795 + TESTIP_WALK(btree, b, ctree, fullbitmap_c);
5796 + TESTIP_WALK(ctree, c, dtree, fullbitmap_d);
5798 + return !!test_bit(d, (void *) dtree->bitmap);
5801 +#define KADT_CONDITION
5803 +UADT(iptreemap, test)
5804 +KADT(iptreemap, test, ipaddr)
5807 +__addip_single(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
5809 + struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5810 + struct ip_set_iptreemap_b *btree;
5811 + struct ip_set_iptreemap_c *ctree;
5812 + struct ip_set_iptreemap_d *dtree;
5813 + unsigned char a, b, c, d;
5817 + ABCD(a, b, c, d, hash_ip);
5819 + ADDIP_WALK(map, a, btree, struct ip_set_iptreemap_b, cachep_b, fullbitmap_b);
5820 + ADDIP_WALK(btree, b, ctree, struct ip_set_iptreemap_c, cachep_c, fullbitmap_c);
5821 + ADDIP_WALK(ctree, c, dtree, struct ip_set_iptreemap_d, cachep_d, fullbitmap_d);
5823 + if (__test_and_set_bit(d, (void *) dtree->bitmap))
5826 + __set_bit(b, (void *) btree->dirty);
5832 +iptreemap_add(struct ip_set *set, ip_set_ip_t *hash_ip,
5833 + ip_set_ip_t start, ip_set_ip_t end)
5835 + struct ip_set_iptreemap *map = set->data;
5836 + struct ip_set_iptreemap_b *btree;
5837 + struct ip_set_iptreemap_c *ctree;
5838 + struct ip_set_iptreemap_d *dtree;
5839 + unsigned int a, b, c, d;
5840 + unsigned char a1, b1, c1, d1;
5841 + unsigned char a2, b2, c2, d2;
5844 + return __addip_single(set, hash_ip, start);
5848 + ABCD(a1, b1, c1, d1, &start);
5849 + ABCD(a2, b2, c2, d2, &end);
5851 + /* This is sooo ugly... */
5852 + ADDIP_RANGE_LOOP(map, a, a1, a2, CHECK1(a, a1, a2, b1, b2, c1, c2, d1, d2), btree, fullbitmap_b, cachep_b, free_b) {
5853 + ADDIP_RANGE_LOOP(btree, b, GETVALUE1(a, a1, b1, 0), GETVALUE1(a, a2, b2, 255), CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2), ctree, fullbitmap_c, cachep_c, free_c) {
5854 + ADDIP_RANGE_LOOP(ctree, c, GETVALUE2(a, b, a1, b1, c1, 0), GETVALUE2(a, b, a2, b2, c2, 255), CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2), dtree, fullbitmap_d, cachep_d, free_d) {
5855 + for (d = GETVALUE3(a, b, c, a1, b1, c1, d1, 0); d <= GETVALUE3(a, b, c, a2, b2, c2, d2, 255); d++)
5856 + __set_bit(d, (void *) dtree->bitmap);
5857 + __set_bit(b, (void *) btree->dirty);
5858 + } ADDIP_RANGE_LOOP_END();
5859 + } ADDIP_RANGE_LOOP_END();
5860 + } ADDIP_RANGE_LOOP_END();
5865 +UADT0(iptreemap, add, min(req->ip, req->end), max(req->ip, req->end))
5866 +KADT(iptreemap, add, ipaddr, ip)
5869 +__delip_single(struct ip_set *set, ip_set_ip_t *hash_ip,
5870 + ip_set_ip_t ip, gfp_t flags)
5872 + struct ip_set_iptreemap *map = set->data;
5873 + struct ip_set_iptreemap_b *btree;
5874 + struct ip_set_iptreemap_c *ctree;
5875 + struct ip_set_iptreemap_d *dtree;
5876 + unsigned char a,b,c,d;
5880 + ABCD(a, b, c, d, hash_ip);
5882 + DELIP_WALK(map, a, btree, cachep_b, fullbitmap_b, flags);
5883 + DELIP_WALK(btree, b, ctree, cachep_c, fullbitmap_c, flags);
5884 + DELIP_WALK(ctree, c, dtree, cachep_d, fullbitmap_d, flags);
5886 + if (!__test_and_clear_bit(d, (void *) dtree->bitmap))
5889 + __set_bit(b, (void *) btree->dirty);
5895 +iptreemap_del(struct ip_set *set, ip_set_ip_t *hash_ip,
5896 + ip_set_ip_t start, ip_set_ip_t end, gfp_t flags)
5898 + struct ip_set_iptreemap *map = set->data;
5899 + struct ip_set_iptreemap_b *btree;
5900 + struct ip_set_iptreemap_c *ctree;
5901 + struct ip_set_iptreemap_d *dtree;
5902 + unsigned int a, b, c, d;
5903 + unsigned char a1, b1, c1, d1;
5904 + unsigned char a2, b2, c2, d2;
5907 + return __delip_single(set, hash_ip, start, flags);
5911 + ABCD(a1, b1, c1, d1, &start);
5912 + ABCD(a2, b2, c2, d2, &end);
5914 + /* This is sooo ugly... */
5915 + DELIP_RANGE_LOOP(map, a, a1, a2, CHECK1(a, a1, a2, b1, b2, c1, c2, d1, d2), btree, fullbitmap_b, cachep_b, free_b, flags) {
5916 + DELIP_RANGE_LOOP(btree, b, GETVALUE1(a, a1, b1, 0), GETVALUE1(a, a2, b2, 255), CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2), ctree, fullbitmap_c, cachep_c, free_c, flags) {
5917 + DELIP_RANGE_LOOP(ctree, c, GETVALUE2(a, b, a1, b1, c1, 0), GETVALUE2(a, b, a2, b2, c2, 255), CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2), dtree, fullbitmap_d, cachep_d, free_d, flags) {
5918 + for (d = GETVALUE3(a, b, c, a1, b1, c1, d1, 0); d <= GETVALUE3(a, b, c, a2, b2, c2, d2, 255); d++)
5919 + __clear_bit(d, (void *) dtree->bitmap);
5920 + __set_bit(b, (void *) btree->dirty);
5921 + } DELIP_RANGE_LOOP_END();
5922 + } DELIP_RANGE_LOOP_END();
5923 + } DELIP_RANGE_LOOP_END();
5928 +UADT0(iptreemap, del, min(req->ip, req->end), max(req->ip, req->end), GFP_KERNEL)
5929 +KADT(iptreemap, del, ipaddr, ip, GFP_ATOMIC)
5931 +/* Check the status of the bitmap
5932 + * -1 == all bits cleared
5933 + * 1 == all bits set
5934 + * 0 == anything else
5937 +bitmap_status(struct ip_set_iptreemap_d *dtree)
5939 + unsigned char first = dtree->bitmap[0];
5942 + for (a = 1; a < 32; a++)
5943 + if (dtree->bitmap[a] != first)
5946 + return (first == 0 ? -1 : (first == 255 ? 1 : 0));
5950 +gc(unsigned long addr)
5952 + struct ip_set *set = (struct ip_set *) addr;
5953 + struct ip_set_iptreemap *map = set->data;
5954 + struct ip_set_iptreemap_b *btree;
5955 + struct ip_set_iptreemap_c *ctree;
5956 + struct ip_set_iptreemap_d *dtree;
5957 + unsigned int a, b, c;
5960 + write_lock_bh(&set->lock);
5962 + LOOP_WALK_BEGIN_GC(map, a, btree, fullbitmap_b, cachep_b, i) {
5963 + LOOP_WALK_BEGIN_GC(btree, b, ctree, fullbitmap_c, cachep_c, j) {
5964 + if (!__test_and_clear_bit(b, (void *) btree->dirty))
5966 + LOOP_WALK_BEGIN_GC(ctree, c, dtree, fullbitmap_d, cachep_d, k) {
5967 + switch (bitmap_status(dtree)) {
5969 + kmem_cache_free(cachep_d, dtree);
5970 + ctree->tree[c] = NULL;
5974 + kmem_cache_free(cachep_d, dtree);
5975 + ctree->tree[c] = fullbitmap_d;
5979 + } LOOP_WALK_END();
5980 + } LOOP_WALK_END_GC(btree, b, ctree, fullbitmap_c, cachep_c, k);
5981 + } LOOP_WALK_END_GC(map, a, btree, fullbitmap_b, cachep_b, j);
5983 + write_unlock_bh(&set->lock);
5985 + map->gc.expires = jiffies + map->gc_interval * HZ;
5986 + add_timer(&map->gc);
5990 +init_gc_timer(struct ip_set *set)
5992 + struct ip_set_iptreemap *map = set->data;
5994 + init_timer(&map->gc);
5995 + map->gc.data = (unsigned long) set;
5996 + map->gc.function = gc;
5997 + map->gc.expires = jiffies + map->gc_interval * HZ;
5998 + add_timer(&map->gc);
6002 +iptreemap_create(struct ip_set *set, const void *data, u_int32_t size)
6004 + const struct ip_set_req_iptreemap_create *req = data;
6005 + struct ip_set_iptreemap *map;
6007 + map = kzalloc(sizeof(*map), GFP_KERNEL);
6011 + map->gc_interval = req->gc_interval ? req->gc_interval : IPTREEMAP_DEFAULT_GC_TIME;
6014 + init_gc_timer(set);
6020 +__flush(struct ip_set_iptreemap *map)
6022 + struct ip_set_iptreemap_b *btree;
6025 + LOOP_WALK_BEGIN(map, a, btree);
6026 + if (btree != fullbitmap_b)
6032 +iptreemap_destroy(struct ip_set *set)
6034 + struct ip_set_iptreemap *map = set->data;
6036 + while (!del_timer(&map->gc))
6037 + msleep(IPTREEMAP_DESTROY_SLEEP);
6046 +iptreemap_flush(struct ip_set *set)
6048 + struct ip_set_iptreemap *map = set->data;
6050 + while (!del_timer(&map->gc))
6051 + msleep(IPTREEMAP_DESTROY_SLEEP);
6055 + memset(map, 0, sizeof(*map));
6057 + init_gc_timer(set);
6061 +iptreemap_list_header(const struct ip_set *set, void *data)
6063 + struct ip_set_iptreemap *map = set->data;
6064 + struct ip_set_req_iptreemap_create *header = data;
6066 + header->gc_interval = map->gc_interval;
6070 +iptreemap_list_members_size(const struct ip_set *set)
6072 + struct ip_set_iptreemap *map = set->data;
6073 + struct ip_set_iptreemap_b *btree;
6074 + struct ip_set_iptreemap_c *ctree;
6075 + struct ip_set_iptreemap_d *dtree;
6076 + unsigned int a, b, c, d, inrange = 0, count = 0;
6078 + LOOP_WALK_BEGIN_COUNT(map, a, btree, inrange, count) {
6079 + LOOP_WALK_BEGIN_COUNT(btree, b, ctree, inrange, count) {
6080 + LOOP_WALK_BEGIN_COUNT(ctree, c, dtree, inrange, count) {
6081 + for (d = 0; d < 256; d++) {
6082 + if (test_bit(d, (void *) dtree->bitmap)) {
6084 + } else if (inrange) {
6089 + } LOOP_WALK_END_COUNT();
6090 + } LOOP_WALK_END_COUNT();
6091 + } LOOP_WALK_END_COUNT();
6096 + return (count * sizeof(struct ip_set_req_iptreemap));
6099 +static inline u_int32_t
6100 +add_member(void *data, size_t offset, ip_set_ip_t start, ip_set_ip_t end)
6102 + struct ip_set_req_iptreemap *entry = data + offset;
6104 + entry->ip = start;
6107 + return sizeof(*entry);
6111 +iptreemap_list_members(const struct ip_set *set, void *data)
6113 + struct ip_set_iptreemap *map = set->data;
6114 + struct ip_set_iptreemap_b *btree;
6115 + struct ip_set_iptreemap_c *ctree;
6116 + struct ip_set_iptreemap_d *dtree;
6117 + unsigned int a, b, c, d, inrange = 0;
6118 + size_t offset = 0;
6119 + ip_set_ip_t start = 0, end = 0, ip;
6121 + LOOP_WALK_BEGIN(map, a, btree) {
6122 + LOOP_WALK_BEGIN(btree, b, ctree) {
6123 + LOOP_WALK_BEGIN(ctree, c, dtree) {
6124 + for (d = 0; d < 256; d++) {
6125 + if (test_bit(d, (void *) dtree->bitmap)) {
6126 + ip = ((a << 24) | (b << 16) | (c << 8) | d);
6130 + } else if (end < ip - 1) {
6131 + offset += add_member(data, offset, start, end);
6135 + } else if (inrange) {
6136 + offset += add_member(data, offset, start, end);
6140 + } LOOP_WALK_END();
6141 + } LOOP_WALK_END();
6142 + } LOOP_WALK_END();
6145 + add_member(data, offset, start, end);
6148 +IP_SET_TYPE(iptreemap, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
6150 +MODULE_LICENSE("GPL");
6151 +MODULE_AUTHOR("Sven Wegener <sven.wegener@stealer.net>");
6152 +MODULE_DESCRIPTION("iptreemap type of IP sets");
6154 +static int __init ip_set_iptreemap_init(void)
6156 + int ret = -ENOMEM;
6159 + cachep_b = KMEM_CACHE_CREATE("ip_set_iptreemap_b",
6160 + sizeof(struct ip_set_iptreemap_b));
6162 + ip_set_printk("Unable to create ip_set_iptreemap_b slab cache");
6166 + cachep_c = KMEM_CACHE_CREATE("ip_set_iptreemap_c",
6167 + sizeof(struct ip_set_iptreemap_c));
6169 + ip_set_printk("Unable to create ip_set_iptreemap_c slab cache");
6173 + cachep_d = KMEM_CACHE_CREATE("ip_set_iptreemap_d",
6174 + sizeof(struct ip_set_iptreemap_d));
6176 + ip_set_printk("Unable to create ip_set_iptreemap_d slab cache");
6180 + fullbitmap_d = kmem_cache_alloc(cachep_d, GFP_KERNEL);
6181 + if (!fullbitmap_d)
6184 + fullbitmap_c = kmem_cache_alloc(cachep_c, GFP_KERNEL);
6185 + if (!fullbitmap_c)
6188 + fullbitmap_b = kmem_cache_alloc(cachep_b, GFP_KERNEL);
6189 + if (!fullbitmap_b)
6192 + ret = ip_set_register_set_type(&ip_set_iptreemap);
6196 + /* Now init our global bitmaps */
6197 + memset(fullbitmap_d->bitmap, 0xff, sizeof(fullbitmap_d->bitmap));
6199 + for (a = 0; a < 256; a++)
6200 + fullbitmap_c->tree[a] = fullbitmap_d;
6202 + for (a = 0; a < 256; a++)
6203 + fullbitmap_b->tree[a] = fullbitmap_c;
6204 + memset(fullbitmap_b->dirty, 0, sizeof(fullbitmap_b->dirty));
6209 + kmem_cache_free(cachep_b, fullbitmap_b);
6211 + kmem_cache_free(cachep_c, fullbitmap_c);
6213 + kmem_cache_free(cachep_d, fullbitmap_d);
6215 + kmem_cache_destroy(cachep_d);
6217 + kmem_cache_destroy(cachep_c);
6219 + kmem_cache_destroy(cachep_b);
6225 +static void __exit ip_set_iptreemap_fini(void)
6227 + ip_set_unregister_set_type(&ip_set_iptreemap);
6228 + kmem_cache_free(cachep_d, fullbitmap_d);
6229 + kmem_cache_free(cachep_c, fullbitmap_c);
6230 + kmem_cache_free(cachep_b, fullbitmap_b);
6231 + kmem_cache_destroy(cachep_d);
6232 + kmem_cache_destroy(cachep_c);
6233 + kmem_cache_destroy(cachep_b);
6236 +module_init(ip_set_iptreemap_init);
6237 +module_exit(ip_set_iptreemap_fini);
6239 +++ b/net/ipv4/netfilter/ip_set_macipmap.c
6241 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
6242 + * Patrick Schaaf <bof@bof.de>
6243 + * Martin Josefsson <gandalf@wlug.westbo.se>
6244 + * Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
6246 + * This program is free software; you can redistribute it and/or modify
6247 + * it under the terms of the GNU General Public License version 2 as
6248 + * published by the Free Software Foundation.
6251 +/* Kernel module implementing an IP set type: the macipmap type */
6253 +#include <linux/module.h>
6254 +#include <linux/ip.h>
6255 +#include <linux/skbuff.h>
6256 +#include <linux/errno.h>
6257 +#include <asm/uaccess.h>
6258 +#include <asm/bitops.h>
6259 +#include <linux/spinlock.h>
6260 +#include <linux/if_ether.h>
6262 +#include <linux/netfilter_ipv4/ip_set_macipmap.h>
6265 +macipmap_utest(struct ip_set *set, const void *data, u_int32_t size,
6266 + ip_set_ip_t *hash_ip)
6268 + const struct ip_set_macipmap *map = set->data;
6269 + const struct ip_set_macip *table = map->members;
6270 + const struct ip_set_req_macipmap *req = data;
6272 + if (req->ip < map->first_ip || req->ip > map->last_ip)
6275 + *hash_ip = req->ip;
6276 + DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
6277 + set->name, HIPQUAD(req->ip), HIPQUAD(*hash_ip));
6278 + if (table[req->ip - map->first_ip].match) {
6279 + return (memcmp(req->ethernet,
6280 + &table[req->ip - map->first_ip].ethernet,
6283 + return (map->flags & IPSET_MACIP_MATCHUNSET ? 1 : 0);
6288 +macipmap_ktest(struct ip_set *set,
6289 + const struct sk_buff *skb,
6290 + ip_set_ip_t *hash_ip,
6291 + const u_int32_t *flags,
6292 + unsigned char index)
6294 + const struct ip_set_macipmap *map = set->data;
6295 + const struct ip_set_macip *table = map->members;
6298 + ip = ipaddr(skb, flags[index]);
6300 + if (ip < map->first_ip || ip > map->last_ip)
6304 + DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
6305 + set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
6306 + if (table[ip - map->first_ip].match) {
6307 + /* Is mac pointer valid?
6308 + * If so, compare... */
6309 + return (skb_mac_header(skb) >= skb->head
6310 + && (skb_mac_header(skb) + ETH_HLEN) <= skb->data
6311 + && (memcmp(eth_hdr(skb)->h_source,
6312 + &table[ip - map->first_ip].ethernet,
6315 + return (map->flags & IPSET_MACIP_MATCHUNSET ? 1 : 0);
6319 +/* returns 0 on success */
6321 +macipmap_add(struct ip_set *set, ip_set_ip_t *hash_ip,
6322 + ip_set_ip_t ip, const unsigned char *ethernet)
6324 + struct ip_set_macipmap *map = set->data;
6325 + struct ip_set_macip *table = map->members;
6327 + if (ip < map->first_ip || ip > map->last_ip)
6329 + if (table[ip - map->first_ip].match)
6333 + DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
6334 + memcpy(&table[ip - map->first_ip].ethernet, ethernet, ETH_ALEN);
6335 + table[ip - map->first_ip].match = IPSET_MACIP_ISSET;
6339 +#define KADT_CONDITION \
6340 + if (!(skb_mac_header(skb) >= skb->head \
6341 + && (skb_mac_header(skb) + ETH_HLEN) <= skb->data))\
6344 +UADT(macipmap, add, req->ethernet)
6345 +KADT(macipmap, add, ipaddr, eth_hdr(skb)->h_source)
6348 +macipmap_del(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
6350 + struct ip_set_macipmap *map = set->data;
6351 + struct ip_set_macip *table = map->members;
6353 + if (ip < map->first_ip || ip > map->last_ip)
6355 + if (!table[ip - map->first_ip].match)
6359 + table[ip - map->first_ip].match = 0;
6360 + DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
6364 +#undef KADT_CONDITION
6365 +#define KADT_CONDITION
6367 +UADT(macipmap, del)
6368 +KADT(macipmap, del, ipaddr)
6371 +__macipmap_create(const struct ip_set_req_macipmap_create *req,
6372 + struct ip_set_macipmap *map)
6374 + if (req->to - req->from > MAX_RANGE) {
6375 + ip_set_printk("range too big, %d elements (max %d)",
6376 + req->to - req->from + 1, MAX_RANGE+1);
6379 + map->flags = req->flags;
6380 + return (req->to - req->from + 1) * sizeof(struct ip_set_macip);
6383 +BITMAP_CREATE(macipmap)
6384 +BITMAP_DESTROY(macipmap)
6385 +BITMAP_FLUSH(macipmap)
6388 +__macipmap_list_header(const struct ip_set_macipmap *map,
6389 + struct ip_set_req_macipmap_create *header)
6391 + header->flags = map->flags;
6394 +BITMAP_LIST_HEADER(macipmap)
6395 +BITMAP_LIST_MEMBERS_SIZE(macipmap)
6396 +BITMAP_LIST_MEMBERS(macipmap)
6398 +IP_SET_TYPE(macipmap, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
6400 +MODULE_LICENSE("GPL");
6401 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
6402 +MODULE_DESCRIPTION("macipmap type of IP sets");
6404 +REGISTER_MODULE(macipmap)
6406 +++ b/net/ipv4/netfilter/ip_set_nethash.c
6408 +/* Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
6410 + * This program is free software; you can redistribute it and/or modify
6411 + * it under the terms of the GNU General Public License version 2 as
6412 + * published by the Free Software Foundation.
6415 +/* Kernel module implementing a cidr nethash set */
6417 +#include <linux/module.h>
6418 +#include <linux/moduleparam.h>
6419 +#include <linux/ip.h>
6420 +#include <linux/skbuff.h>
6421 +#include <linux/netfilter_ipv4/ip_set_jhash.h>
6422 +#include <linux/errno.h>
6423 +#include <asm/uaccess.h>
6424 +#include <asm/bitops.h>
6425 +#include <linux/spinlock.h>
6426 +#include <linux/random.h>
6428 +#include <net/ip.h>
6430 +#include <linux/netfilter_ipv4/ip_set_nethash.h>
6432 +static int limit = MAX_RANGE;
6434 +static inline __u32
6435 +nethash_id_cidr(const struct ip_set_nethash *map,
6436 + ip_set_ip_t *hash_ip,
6442 + ip_set_ip_t *elem;
6444 + *hash_ip = pack_ip_cidr(ip, cidr);
6448 + for (i = 0; i < map->probes; i++) {
6449 + id = jhash_ip(map, i, *hash_ip) % map->hashsize;
6450 + DP("hash key: %u", id);
6451 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
6452 + if (*elem == *hash_ip)
6454 + /* No shortcut - there can be deleted entries. */
6459 +static inline __u32
6460 +nethash_id(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
6462 + const struct ip_set_nethash *map = set->data;
6463 + __u32 id = UINT_MAX;
6466 + for (i = 0; i < 30 && map->cidr[i]; i++) {
6467 + id = nethash_id_cidr(map, hash_ip, ip, map->cidr[i]);
6468 + if (id != UINT_MAX)
6475 +nethash_test_cidr(struct ip_set *set, ip_set_ip_t *hash_ip,
6476 + ip_set_ip_t ip, uint8_t cidr)
6478 + const struct ip_set_nethash *map = set->data;
6480 + return (nethash_id_cidr(map, hash_ip, ip, cidr) != UINT_MAX);
6484 +nethash_test(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
6486 + return (nethash_id(set, hash_ip, ip) != UINT_MAX);
6490 +nethash_utest(struct ip_set *set, const void *data, u_int32_t size,
6491 + ip_set_ip_t *hash_ip)
6493 + const struct ip_set_req_nethash *req = data;
6495 + if (req->cidr <= 0 || req->cidr > 32)
6497 + return (req->cidr == 32 ? nethash_test(set, hash_ip, req->ip)
6498 + : nethash_test_cidr(set, hash_ip, req->ip, req->cidr));
6501 +#define KADT_CONDITION
6503 +KADT(nethash, test, ipaddr)
6506 +__nethash_add(struct ip_set_nethash *map, ip_set_ip_t *ip)
6510 + ip_set_ip_t *elem, *slot = NULL;
6512 + for (i = 0; i < map->probes; i++) {
6513 + probe = jhash_ip(map, i, *ip) % map->hashsize;
6514 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
6517 + if (!(slot || *elem))
6519 + /* There can be deleted entries, must check all slots */
6526 + /* Trigger rehashing */
6531 +nethash_add(struct ip_set *set, ip_set_ip_t *hash_ip,
6532 + ip_set_ip_t ip, uint8_t cidr)
6534 + struct ip_set_nethash *map = set->data;
6537 + if (map->elements >= limit || map->nets[cidr-1] == UINT16_MAX)
6539 + if (cidr <= 0 || cidr >= 32)
6542 + *hash_ip = pack_ip_cidr(ip, cidr);
6543 + DP("%u.%u.%u.%u/%u, %u.%u.%u.%u", HIPQUAD(ip), cidr, HIPQUAD(*hash_ip));
6547 + ret = __nethash_add(map, hash_ip);
6549 + if (!map->nets[cidr-1]++)
6550 + add_cidr_size(map->cidr, cidr);
6557 +#undef KADT_CONDITION
6558 +#define KADT_CONDITION \
6559 + struct ip_set_nethash *map = set->data; \
6560 + uint8_t cidr = map->cidr[0] ? map->cidr[0] : 31;
6562 +UADT(nethash, add, req->cidr)
6563 +KADT(nethash, add, ipaddr, cidr)
6566 +__nethash_retry(struct ip_set_nethash *tmp, struct ip_set_nethash *map)
6568 + memcpy(tmp->cidr, map->cidr, sizeof(tmp->cidr));
6569 + memcpy(tmp->nets, map->nets, sizeof(tmp->nets));
6572 +HASH_RETRY(nethash, ip_set_ip_t)
6575 +nethash_del(struct ip_set *set, ip_set_ip_t *hash_ip,
6576 + ip_set_ip_t ip, uint8_t cidr)
6578 + struct ip_set_nethash *map = set->data;
6579 + ip_set_ip_t id, *elem;
6581 + if (cidr <= 0 || cidr >= 32)
6584 + id = nethash_id_cidr(map, hash_ip, ip, cidr);
6585 + if (id == UINT_MAX)
6588 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
6591 + if (!map->nets[cidr-1]--)
6592 + del_cidr_size(map->cidr, cidr);
6596 +UADT(nethash, del, req->cidr)
6597 +KADT(nethash, del, ipaddr, cidr)
6600 +__nethash_create(const struct ip_set_req_nethash_create *req,
6601 + struct ip_set_nethash *map)
6603 + memset(map->cidr, 0, sizeof(map->cidr));
6604 + memset(map->nets, 0, sizeof(map->nets));
6609 +HASH_CREATE(nethash, ip_set_ip_t)
6610 +HASH_DESTROY(nethash)
6612 +HASH_FLUSH_CIDR(nethash, ip_set_ip_t)
6615 +__nethash_list_header(const struct ip_set_nethash *map,
6616 + struct ip_set_req_nethash_create *header)
6620 +HASH_LIST_HEADER(nethash)
6621 +HASH_LIST_MEMBERS_SIZE(nethash, ip_set_ip_t)
6622 +HASH_LIST_MEMBERS(nethash, ip_set_ip_t)
6624 +IP_SET_RTYPE(nethash, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
6626 +MODULE_LICENSE("GPL");
6627 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
6628 +MODULE_DESCRIPTION("nethash type of IP sets");
6629 +module_param(limit, int, 0600);
6630 +MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
6632 +REGISTER_MODULE(nethash)
6634 +++ b/net/ipv4/netfilter/ip_set_portmap.c
6636 +/* Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
6638 + * This program is free software; you can redistribute it and/or modify
6639 + * it under the terms of the GNU General Public License version 2 as
6640 + * published by the Free Software Foundation.
6643 +/* Kernel module implementing a port set type as a bitmap */
6645 +#include <linux/module.h>
6646 +#include <linux/ip.h>
6647 +#include <linux/tcp.h>
6648 +#include <linux/udp.h>
6649 +#include <linux/skbuff.h>
6650 +#include <linux/errno.h>
6651 +#include <asm/uaccess.h>
6652 +#include <asm/bitops.h>
6653 +#include <linux/spinlock.h>
6655 +#include <net/ip.h>
6657 +#include <linux/netfilter_ipv4/ip_set_portmap.h>
6658 +#include <linux/netfilter_ipv4/ip_set_getport.h>
6661 +portmap_test(const struct ip_set *set, ip_set_ip_t *hash_port,
6664 + const struct ip_set_portmap *map = set->data;
6666 + if (port < map->first_ip || port > map->last_ip)
6669 + *hash_port = port;
6670 + DP("set: %s, port:%u, %u", set->name, port, *hash_port);
6671 + return !!test_bit(port - map->first_ip, map->members);
6674 +#define KADT_CONDITION \
6675 + if (ip == INVALID_PORT) \
6678 +UADT(portmap, test)
6679 +KADT(portmap, test, get_port)
6682 +portmap_add(struct ip_set *set, ip_set_ip_t *hash_port, ip_set_ip_t port)
6684 + struct ip_set_portmap *map = set->data;
6686 + if (port < map->first_ip || port > map->last_ip)
6688 + if (test_and_set_bit(port - map->first_ip, map->members))
6691 + *hash_port = port;
6692 + DP("port %u", port);
6697 +KADT(portmap, add, get_port)
6700 +portmap_del(struct ip_set *set, ip_set_ip_t *hash_port, ip_set_ip_t port)
6702 + struct ip_set_portmap *map = set->data;
6704 + if (port < map->first_ip || port > map->last_ip)
6706 + if (!test_and_clear_bit(port - map->first_ip, map->members))
6709 + *hash_port = port;
6710 + DP("port %u", port);
6715 +KADT(portmap, del, get_port)
6718 +__portmap_create(const struct ip_set_req_portmap_create *req,
6719 + struct ip_set_portmap *map)
6721 + if (req->to - req->from > MAX_RANGE) {
6722 + ip_set_printk("range too big, %d elements (max %d)",
6723 + req->to - req->from + 1, MAX_RANGE+1);
6726 + return bitmap_bytes(req->from, req->to);
6729 +BITMAP_CREATE(portmap)
6730 +BITMAP_DESTROY(portmap)
6731 +BITMAP_FLUSH(portmap)
6734 +__portmap_list_header(const struct ip_set_portmap *map,
6735 + struct ip_set_req_portmap_create *header)
6739 +BITMAP_LIST_HEADER(portmap)
6740 +BITMAP_LIST_MEMBERS_SIZE(portmap)
6741 +BITMAP_LIST_MEMBERS(portmap)
6743 +IP_SET_TYPE(portmap, IPSET_TYPE_PORT | IPSET_DATA_SINGLE)
6745 +MODULE_LICENSE("GPL");
6746 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
6747 +MODULE_DESCRIPTION("portmap type of IP sets");
6749 +REGISTER_MODULE(portmap)
6751 +++ b/net/ipv4/netfilter/ip_set_setlist.c
6753 +/* Copyright (C) 2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
6755 + * This program is free software; you can redistribute it and/or modify
6756 + * it under the terms of the GNU General Public License version 2 as
6757 + * published by the Free Software Foundation.
6760 +/* Kernel module implementing an IP set type: the setlist type */
6762 +#include <linux/module.h>
6763 +#include <linux/ip.h>
6764 +#include <linux/skbuff.h>
6765 +#include <linux/errno.h>
6767 +#include <linux/netfilter_ipv4/ip_set.h>
6768 +#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
6769 +#include <linux/netfilter_ipv4/ip_set_setlist.h>
6772 + * before ==> index, ref
6773 + * after ==> ref, index
6777 +next_index_eq(const struct ip_set_setlist *map, int i, ip_set_id_t index)
6779 + return i < map->size && map->index[i] == index;
6783 +setlist_utest(struct ip_set *set, const void *data, u_int32_t size,
6784 + ip_set_ip_t *hash_ip)
6786 + const struct ip_set_setlist *map = set->data;
6787 + const struct ip_set_req_setlist *req = data;
6788 + ip_set_id_t index, ref = IP_SET_INVALID_ID;
6792 + if (req->before && req->ref[0] == '\0')
6795 + index = __ip_set_get_byname(req->name, &s);
6796 + if (index == IP_SET_INVALID_ID)
6798 + if (req->ref[0] != '\0') {
6799 + ref = __ip_set_get_byname(req->ref, &s);
6800 + if (ref == IP_SET_INVALID_ID)
6803 + for (i = 0; i < map->size
6804 + && map->index[i] != IP_SET_INVALID_ID; i++) {
6805 + if (req->before && map->index[i] == index) {
6806 + res = next_index_eq(map, i + 1, ref);
6808 + } else if (!req->before) {
6809 + if ((ref == IP_SET_INVALID_ID
6810 + && map->index[i] == index)
6811 + || (map->index[i] == ref
6812 + && next_index_eq(map, i + 1, index))) {
6818 + if (ref != IP_SET_INVALID_ID)
6819 + __ip_set_put_byindex(ref);
6821 + __ip_set_put_byindex(index);
6826 +setlist_ktest(struct ip_set *set,
6827 + const struct sk_buff *skb,
6828 + ip_set_ip_t *hash_ip,
6829 + const u_int32_t *flags,
6830 + unsigned char index)
6832 + struct ip_set_setlist *map = set->data;
6835 + for (i = 0; i < map->size
6836 + && map->index[i] != IP_SET_INVALID_ID
6838 + res = ip_set_testip_kernel(map->index[i], skb, flags);
6843 +insert_setlist(struct ip_set_setlist *map, int i, ip_set_id_t index)
6848 + DP("i: %u, last %u\n", i, map->index[map->size - 1]);
6849 + if (i >= map->size || map->index[map->size - 1] != IP_SET_INVALID_ID)
6852 + for (j = i; j < map->size
6853 + && index != IP_SET_INVALID_ID; j++) {
6854 + tmp = map->index[j];
6855 + map->index[j] = index;
6862 +setlist_uadd(struct ip_set *set, const void *data, u_int32_t size,
6863 + ip_set_ip_t *hash_ip)
6865 + struct ip_set_setlist *map = set->data;
6866 + const struct ip_set_req_setlist *req = data;
6867 + ip_set_id_t index, ref = IP_SET_INVALID_ID;
6868 + int i, res = -ERANGE;
6871 + if (req->before && req->ref[0] == '\0')
6874 + index = __ip_set_get_byname(req->name, &s);
6875 + if (index == IP_SET_INVALID_ID)
6877 + /* "Loop detection" */
6878 + if (strcmp(s->type->typename, "setlist") == 0)
6881 + if (req->ref[0] != '\0') {
6882 + ref = __ip_set_get_byname(req->ref, &s);
6883 + if (ref == IP_SET_INVALID_ID) {
6888 + for (i = 0; i < map->size; i++) {
6889 + if (map->index[i] != ref)
6892 + res = insert_setlist(map, i, index);
6894 + res = insert_setlist(map,
6895 + ref == IP_SET_INVALID_ID ? i : i + 1,
6899 + if (ref != IP_SET_INVALID_ID)
6900 + __ip_set_put_byindex(ref);
6901 + /* In case of success, we keep the reference to the set */
6904 + __ip_set_put_byindex(index);
6909 +setlist_kadd(struct ip_set *set,
6910 + const struct sk_buff *skb,
6911 + ip_set_ip_t *hash_ip,
6912 + const u_int32_t *flags,
6913 + unsigned char index)
6915 + struct ip_set_setlist *map = set->data;
6916 + int i, res = -EINVAL;
6918 + for (i = 0; i < map->size
6919 + && map->index[i] != IP_SET_INVALID_ID
6921 + res = ip_set_addip_kernel(map->index[i], skb, flags);
6926 +unshift_setlist(struct ip_set_setlist *map, int i)
6930 + for (j = i; j < map->size - 1; j++)
6931 + map->index[j] = map->index[j+1];
6932 + map->index[map->size-1] = IP_SET_INVALID_ID;
6937 +setlist_udel(struct ip_set *set, const void *data, u_int32_t size,
6938 + ip_set_ip_t *hash_ip)
6940 + struct ip_set_setlist *map = set->data;
6941 + const struct ip_set_req_setlist *req = data;
6942 + ip_set_id_t index, ref = IP_SET_INVALID_ID;
6943 + int i, res = -EEXIST;
6946 + if (req->before && req->ref[0] == '\0')
6949 + index = __ip_set_get_byname(req->name, &s);
6950 + if (index == IP_SET_INVALID_ID)
6952 + if (req->ref[0] != '\0') {
6953 + ref = __ip_set_get_byname(req->ref, &s);
6954 + if (ref == IP_SET_INVALID_ID)
6957 + for (i = 0; i < map->size
6958 + && map->index[i] != IP_SET_INVALID_ID; i++) {
6959 + if (req->before) {
6960 + if (map->index[i] == index
6961 + && next_index_eq(map, i + 1, ref)) {
6962 + res = unshift_setlist(map, i);
6965 + } else if (ref == IP_SET_INVALID_ID) {
6966 + if (map->index[i] == index) {
6967 + res = unshift_setlist(map, i);
6970 + } else if (map->index[i] == ref
6971 + && next_index_eq(map, i + 1, index)) {
6972 + res = unshift_setlist(map, i + 1);
6976 + if (ref != IP_SET_INVALID_ID)
6977 + __ip_set_put_byindex(ref);
6979 + __ip_set_put_byindex(index);
6980 + /* In case of success, release the reference to the set */
6982 + __ip_set_put_byindex(index);
6987 +setlist_kdel(struct ip_set *set,
6988 + const struct sk_buff *skb,
6989 + ip_set_ip_t *hash_ip,
6990 + const u_int32_t *flags,
6991 + unsigned char index)
6993 + struct ip_set_setlist *map = set->data;
6994 + int i, res = -EINVAL;
6996 + for (i = 0; i < map->size
6997 + && map->index[i] != IP_SET_INVALID_ID
6999 + res = ip_set_delip_kernel(map->index[i], skb, flags);
7004 +setlist_create(struct ip_set *set, const void *data, u_int32_t size)
7006 + struct ip_set_setlist *map;
7007 + const struct ip_set_req_setlist_create *req = data;
7010 + map = kmalloc(sizeof(struct ip_set_setlist) +
7011 + req->size * sizeof(ip_set_id_t), GFP_KERNEL);
7014 + map->size = req->size;
7015 + for (i = 0; i < map->size; i++)
7016 + map->index[i] = IP_SET_INVALID_ID;
7023 +setlist_destroy(struct ip_set *set)
7025 + struct ip_set_setlist *map = set->data;
7028 + for (i = 0; i < map->size
7029 + && map->index[i] != IP_SET_INVALID_ID; i++)
7030 + __ip_set_put_byindex(map->index[i]);
7037 +setlist_flush(struct ip_set *set)
7039 + struct ip_set_setlist *map = set->data;
7042 + for (i = 0; i < map->size
7043 + && map->index[i] != IP_SET_INVALID_ID; i++) {
7044 + __ip_set_put_byindex(map->index[i]);
7045 + map->index[i] = IP_SET_INVALID_ID;
7050 +setlist_list_header(const struct ip_set *set, void *data)
7052 + const struct ip_set_setlist *map = set->data;
7053 + struct ip_set_req_setlist_create *header = data;
7055 + header->size = map->size;
7059 +setlist_list_members_size(const struct ip_set *set)
7061 + const struct ip_set_setlist *map = set->data;
7063 + return map->size * sizeof(ip_set_id_t);
7067 +setlist_list_members(const struct ip_set *set, void *data)
7069 + struct ip_set_setlist *map = set->data;
7072 + for (i = 0; i < map->size; i++)
7073 + *((ip_set_id_t *)data + i) = ip_set_id(map->index[i]);
7076 +IP_SET_TYPE(setlist, IPSET_TYPE_SETNAME | IPSET_DATA_SINGLE)
7078 +MODULE_LICENSE("GPL");
7079 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
7080 +MODULE_DESCRIPTION("setlist type of IP sets");
7082 +REGISTER_MODULE(setlist)
7084 +++ b/net/ipv4/netfilter/ipt_set.c
7086 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
7087 + * Patrick Schaaf <bof@bof.de>
7088 + * Martin Josefsson <gandalf@wlug.westbo.se>
7089 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
7091 + * This program is free software; you can redistribute it and/or modify
7092 + * it under the terms of the GNU General Public License version 2 as
7093 + * published by the Free Software Foundation.
7096 +/* Kernel module to match an IP set. */
7098 +#include <linux/module.h>
7099 +#include <linux/ip.h>
7100 +#include <linux/skbuff.h>
7101 +#include <linux/version.h>
7103 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
7104 +#include <linux/netfilter_ipv4/ip_tables.h>
7105 +#define xt_register_match ipt_register_match
7106 +#define xt_unregister_match ipt_unregister_match
7107 +#define xt_match ipt_match
7109 +#include <linux/netfilter/x_tables.h>
7111 +#include <linux/netfilter_ipv4/ip_set.h>
7112 +#include <linux/netfilter_ipv4/ipt_set.h>
7115 +match_set(const struct ipt_set_info *info,
7116 + const struct sk_buff *skb,
7119 + if (ip_set_testip_kernel(info->index, skb, info->flags))
7124 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
7126 +match(const struct sk_buff *skb,
7127 + const struct net_device *in,
7128 + const struct net_device *out,
7129 + const void *matchinfo,
7132 + u_int16_t datalen,
7134 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
7136 +match(const struct sk_buff *skb,
7137 + const struct net_device *in,
7138 + const struct net_device *out,
7139 + const void *matchinfo,
7142 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
7144 +match(const struct sk_buff *skb,
7145 + const struct net_device *in,
7146 + const struct net_device *out,
7147 + const void *matchinfo,
7149 + unsigned int protoff,
7151 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
7153 +match(const struct sk_buff *skb,
7154 + const struct net_device *in,
7155 + const struct net_device *out,
7156 + const struct xt_match *match,
7157 + const void *matchinfo,
7159 + unsigned int protoff,
7161 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
7163 +match(const struct sk_buff *skb,
7164 + const struct net_device *in,
7165 + const struct net_device *out,
7166 + const struct xt_match *match,
7167 + const void *matchinfo,
7169 + unsigned int protoff,
7171 +#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
7173 +match(const struct sk_buff *skb,
7174 + const struct xt_match_param *par)
7177 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
7178 + const struct ipt_set_info_match *info = matchinfo;
7180 + const struct ipt_set_info_match *info = par->matchinfo;
7183 + return match_set(&info->match_set,
7185 + info->match_set.flags[0] & IPSET_MATCH_INV);
7188 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
7190 +checkentry(const char *tablename,
7191 + const struct ipt_ip *ip,
7193 + unsigned int matchsize,
7194 + unsigned int hook_mask)
7195 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
7197 +checkentry(const char *tablename,
7200 + unsigned int matchsize,
7201 + unsigned int hook_mask)
7202 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7204 +checkentry(const char *tablename,
7206 + const struct xt_match *match,
7208 + unsigned int matchsize,
7209 + unsigned int hook_mask)
7210 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
7212 +checkentry(const char *tablename,
7214 + const struct xt_match *match,
7216 + unsigned int hook_mask)
7217 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
7219 +checkentry(const char *tablename,
7221 + const struct xt_match *match,
7223 + unsigned int hook_mask)
7224 +#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
7226 +checkentry(const struct xt_mtchk_param *par)
7229 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
7230 + struct ipt_set_info_match *info = matchinfo;
7232 + struct ipt_set_info_match *info = par->matchinfo;
7234 + ip_set_id_t index;
7236 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
7237 + if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
7238 + ip_set_printk("invalid matchsize %d", matchsize);
7243 + index = ip_set_get_byindex(info->match_set.index);
7245 + if (index == IP_SET_INVALID_ID) {
7246 + ip_set_printk("Cannot find set indentified by id %u to match",
7247 + info->match_set.index);
7248 + return 0; /* error */
7250 + if (info->match_set.flags[IP_SET_MAX_BINDINGS] != 0) {
7251 + ip_set_printk("That's nasty!");
7252 + return 0; /* error */
7258 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
7259 +static void destroy(void *matchinfo,
7260 + unsigned int matchsize)
7261 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7262 +static void destroy(const struct xt_match *match,
7264 + unsigned int matchsize)
7265 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
7266 +static void destroy(const struct xt_match *match,
7268 +#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
7269 +static void destroy(const struct xt_mtdtor_param *par)
7272 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
7273 + struct ipt_set_info_match *info = matchinfo;
7275 + struct ipt_set_info_match *info = par->matchinfo;
7279 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
7280 + if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
7281 + ip_set_printk("invalid matchsize %d", matchsize);
7285 + ip_set_put_byindex(info->match_set.index);
7288 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
7289 +static struct xt_match set_match = {
7292 + .checkentry = &checkentry,
7293 + .destroy = &destroy,
7296 +#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17) */
7297 +static struct xt_match set_match = {
7299 + .family = AF_INET,
7301 + .matchsize = sizeof(struct ipt_set_info_match),
7302 + .checkentry = &checkentry,
7303 + .destroy = &destroy,
7308 +MODULE_LICENSE("GPL");
7309 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
7310 +MODULE_DESCRIPTION("iptables IP set match module");
7312 +static int __init ipt_ipset_init(void)
7314 + return xt_register_match(&set_match);
7317 +static void __exit ipt_ipset_fini(void)
7319 + xt_unregister_match(&set_match);
7322 +module_init(ipt_ipset_init);
7323 +module_exit(ipt_ipset_fini);
7325 +++ b/net/ipv4/netfilter/ipt_SET.c
7327 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
7328 + * Patrick Schaaf <bof@bof.de>
7329 + * Martin Josefsson <gandalf@wlug.westbo.se>
7330 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
7332 + * This program is free software; you can redistribute it and/or modify
7333 + * it under the terms of the GNU General Public License version 2 as
7334 + * published by the Free Software Foundation.
7337 +/* ipt_SET.c - netfilter target to manipulate IP sets */
7339 +#include <linux/module.h>
7340 +#include <linux/ip.h>
7341 +#include <linux/skbuff.h>
7342 +#include <linux/version.h>
7344 +#include <linux/netfilter_ipv4.h>
7345 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
7346 +#include <linux/netfilter_ipv4/ip_tables.h>
7347 +#define xt_register_target ipt_register_target
7348 +#define xt_unregister_target ipt_unregister_target
7349 +#define xt_target ipt_target
7350 +#define XT_CONTINUE IPT_CONTINUE
7352 +#include <linux/netfilter/x_tables.h>
7354 +#include <linux/netfilter_ipv4/ipt_set.h>
7356 +static unsigned int
7357 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
7358 +target(struct sk_buff **pskb,
7359 + unsigned int hooknum,
7360 + const struct net_device *in,
7361 + const struct net_device *out,
7362 + const void *targinfo,
7364 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
7365 +target(struct sk_buff **pskb,
7366 + const struct net_device *in,
7367 + const struct net_device *out,
7368 + unsigned int hooknum,
7369 + const void *targinfo,
7371 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7372 +target(struct sk_buff **pskb,
7373 + const struct net_device *in,
7374 + const struct net_device *out,
7375 + unsigned int hooknum,
7376 + const struct xt_target *target,
7377 + const void *targinfo,
7379 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
7380 +target(struct sk_buff **pskb,
7381 + const struct net_device *in,
7382 + const struct net_device *out,
7383 + unsigned int hooknum,
7384 + const struct xt_target *target,
7385 + const void *targinfo)
7386 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
7387 +target(struct sk_buff *skb,
7388 + const struct net_device *in,
7389 + const struct net_device *out,
7390 + unsigned int hooknum,
7391 + const struct xt_target *target,
7392 + const void *targinfo)
7393 +#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
7394 +target(struct sk_buff *skb,
7395 + const struct xt_target_param *par)
7398 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
7399 + const struct ipt_set_info_target *info = targinfo;
7401 + const struct ipt_set_info_target *info = par->targinfo;
7403 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
7404 + struct sk_buff *skb = *pskb;
7408 + if (info->add_set.index != IP_SET_INVALID_ID)
7409 + ip_set_addip_kernel(info->add_set.index,
7411 + info->add_set.flags);
7412 + if (info->del_set.index != IP_SET_INVALID_ID)
7413 + ip_set_delip_kernel(info->del_set.index,
7415 + info->del_set.flags);
7417 + return XT_CONTINUE;
7420 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
7422 +checkentry(const char *tablename,
7423 + const struct ipt_entry *e,
7425 + unsigned int targinfosize,
7426 + unsigned int hook_mask)
7427 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
7429 +checkentry(const char *tablename,
7432 + unsigned int targinfosize,
7433 + unsigned int hook_mask)
7434 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7436 +checkentry(const char *tablename,
7438 + const struct xt_target *target,
7440 + unsigned int targinfosize,
7441 + unsigned int hook_mask)
7442 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
7444 +checkentry(const char *tablename,
7446 + const struct xt_target *target,
7448 + unsigned int hook_mask)
7449 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
7451 +checkentry(const char *tablename,
7453 + const struct xt_target *target,
7455 + unsigned int hook_mask)
7456 +#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
7458 +checkentry(const struct xt_tgchk_param *par)
7461 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
7462 + const struct ipt_set_info_target *info = targinfo;
7464 + const struct ipt_set_info_target *info = par->targinfo;
7466 + ip_set_id_t index;
7468 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
7469 + if (targinfosize != IPT_ALIGN(sizeof(*info))) {
7470 + DP("bad target info size %u", targinfosize);
7475 + if (info->add_set.index != IP_SET_INVALID_ID) {
7476 + index = ip_set_get_byindex(info->add_set.index);
7477 + if (index == IP_SET_INVALID_ID) {
7478 + ip_set_printk("cannot find add_set index %u as target",
7479 + info->add_set.index);
7480 + return 0; /* error */
7484 + if (info->del_set.index != IP_SET_INVALID_ID) {
7485 + index = ip_set_get_byindex(info->del_set.index);
7486 + if (index == IP_SET_INVALID_ID) {
7487 + ip_set_printk("cannot find del_set index %u as target",
7488 + info->del_set.index);
7489 + return 0; /* error */
7492 + if (info->add_set.flags[IP_SET_MAX_BINDINGS] != 0
7493 + || info->del_set.flags[IP_SET_MAX_BINDINGS] != 0) {
7494 + ip_set_printk("That's nasty!");
7495 + return 0; /* error */
7501 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
7502 +static void destroy(void *targetinfo,
7503 + unsigned int targetsize)
7504 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7505 +static void destroy(const struct xt_target *target,
7507 + unsigned int targetsize)
7508 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
7509 +static void destroy(const struct xt_target *target,
7511 +#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
7512 +static void destroy(const struct xt_tgdtor_param *par)
7515 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
7516 + const struct ipt_set_info_target *info = targetinfo;
7518 + const struct ipt_set_info_target *info = par->targinfo;
7521 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
7522 + if (targetsize != IPT_ALIGN(sizeof(struct ipt_set_info_target))) {
7523 + ip_set_printk("invalid targetsize %d", targetsize);
7527 + if (info->add_set.index != IP_SET_INVALID_ID)
7528 + ip_set_put_byindex(info->add_set.index);
7529 + if (info->del_set.index != IP_SET_INVALID_ID)
7530 + ip_set_put_byindex(info->del_set.index);
7533 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
7534 +static struct xt_target SET_target = {
7537 + .checkentry = checkentry,
7538 + .destroy = destroy,
7541 +#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17) */
7542 +static struct xt_target SET_target = {
7544 + .family = AF_INET,
7546 + .targetsize = sizeof(struct ipt_set_info_target),
7547 + .checkentry = checkentry,
7548 + .destroy = destroy,
7553 +MODULE_LICENSE("GPL");
7554 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
7555 +MODULE_DESCRIPTION("iptables IP set target module");
7557 +static int __init ipt_SET_init(void)
7559 + return xt_register_target(&SET_target);
7562 +static void __exit ipt_SET_fini(void)
7564 + xt_unregister_target(&SET_target);
7567 +module_init(ipt_SET_init);
7568 +module_exit(ipt_SET_fini);
7569 --- a/net/ipv4/netfilter/Kconfig
7570 +++ b/net/ipv4/netfilter/Kconfig
7571 @@ -406,5 +406,146 @@ config IP_NF_ARP_MANGLE
7572 Allows altering the ARP packet payload: source and destination
7573 hardware and network addresses.
7576 + tristate "IP set support"
7577 + depends on INET && NETFILTER
7579 + This option adds IP set support to the kernel.
7580 + In order to define and use sets, you need the userspace utility
7583 + To compile it as a module, choose M here. If unsure, say N.
7585 +config IP_NF_SET_MAX
7586 + int "Maximum number of IP sets"
7589 + depends on IP_NF_SET
7591 + You can define here default value of the maximum number
7592 + of IP sets for the kernel.
7594 + The value can be overriden by the 'max_sets' module
7595 + parameter of the 'ip_set' module.
7597 +config IP_NF_SET_HASHSIZE
7598 + int "Hash size for bindings of IP sets"
7600 + depends on IP_NF_SET
7602 + You can define here default value of the hash size for
7603 + bindings of IP sets.
7605 + The value can be overriden by the 'hash_size' module
7606 + parameter of the 'ip_set' module.
7608 +config IP_NF_SET_IPMAP
7609 + tristate "ipmap set support"
7610 + depends on IP_NF_SET
7612 + This option adds the ipmap set type support.
7614 + To compile it as a module, choose M here. If unsure, say N.
7616 +config IP_NF_SET_MACIPMAP
7617 + tristate "macipmap set support"
7618 + depends on IP_NF_SET
7620 + This option adds the macipmap set type support.
7622 + To compile it as a module, choose M here. If unsure, say N.
7624 +config IP_NF_SET_PORTMAP
7625 + tristate "portmap set support"
7626 + depends on IP_NF_SET
7628 + This option adds the portmap set type support.
7630 + To compile it as a module, choose M here. If unsure, say N.
7632 +config IP_NF_SET_IPHASH
7633 + tristate "iphash set support"
7634 + depends on IP_NF_SET
7636 + This option adds the iphash set type support.
7638 + To compile it as a module, choose M here. If unsure, say N.
7640 +config IP_NF_SET_NETHASH
7641 + tristate "nethash set support"
7642 + depends on IP_NF_SET
7644 + This option adds the nethash set type support.
7646 + To compile it as a module, choose M here. If unsure, say N.
7648 +config IP_NF_SET_IPPORTHASH
7649 + tristate "ipporthash set support"
7650 + depends on IP_NF_SET
7652 + This option adds the ipporthash set type support.
7654 + To compile it as a module, choose M here. If unsure, say N.
7656 +config IP_NF_SET_IPPORTIPHASH
7657 + tristate "ipportiphash set support"
7658 + depends on IP_NF_SET
7660 + This option adds the ipportiphash set type support.
7662 + To compile it as a module, choose M here. If unsure, say N.
7664 +config IP_NF_SET_IPPORTNETHASH
7665 + tristate "ipportnethash set support"
7666 + depends on IP_NF_SET
7668 + This option adds the ipportnethash set type support.
7670 + To compile it as a module, choose M here. If unsure, say N.
7672 +config IP_NF_SET_IPTREE
7673 + tristate "iptree set support"
7674 + depends on IP_NF_SET
7676 + This option adds the iptree set type support.
7678 + To compile it as a module, choose M here. If unsure, say N.
7680 +config IP_NF_SET_IPTREEMAP
7681 + tristate "iptreemap set support"
7682 + depends on IP_NF_SET
7684 + This option adds the iptreemap set type support.
7686 + To compile it as a module, choose M here. If unsure, say N.
7688 +config IP_NF_SET_SETLIST
7689 + tristate "setlist set support"
7690 + depends on IP_NF_SET
7692 + This option adds the setlist set type support.
7694 + To compile it as a module, choose M here. If unsure, say N.
7696 +config IP_NF_MATCH_SET
7697 + tristate "set match support"
7698 + depends on IP_NF_SET
7700 + Set matching matches against given IP sets.
7701 + You need the ipset utility to create and set up the sets.
7703 + To compile it as a module, choose M here. If unsure, say N.
7705 +config IP_NF_TARGET_SET
7706 + tristate "SET target support"
7707 + depends on IP_NF_SET
7709 + The SET target makes possible to add/delete entries
7711 + You need the ipset utility to create and set up the sets.
7713 + To compile it as a module, choose M here. If unsure, say N.
7718 --- a/net/ipv4/netfilter/Makefile
7719 +++ b/net/ipv4/netfilter/Makefile
7720 @@ -50,6 +50,7 @@ obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
7721 obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o
7722 obj-$(CONFIG_IP_NF_MATCH_RECENT) += ipt_recent.o
7723 obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o
7724 +obj-$(CONFIG_IP_NF_MATCH_SET) += ipt_set.o
7727 obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
7728 @@ -61,6 +62,21 @@ obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += i
7729 obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
7730 obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o
7731 obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
7732 +obj-$(CONFIG_IP_NF_TARGET_SET) += ipt_SET.o
7735 +obj-$(CONFIG_IP_NF_SET) += ip_set.o
7736 +obj-$(CONFIG_IP_NF_SET_IPMAP) += ip_set_ipmap.o
7737 +obj-$(CONFIG_IP_NF_SET_PORTMAP) += ip_set_portmap.o
7738 +obj-$(CONFIG_IP_NF_SET_MACIPMAP) += ip_set_macipmap.o
7739 +obj-$(CONFIG_IP_NF_SET_IPHASH) += ip_set_iphash.o
7740 +obj-$(CONFIG_IP_NF_SET_NETHASH) += ip_set_nethash.o
7741 +obj-$(CONFIG_IP_NF_SET_IPPORTHASH) += ip_set_ipporthash.o
7742 +obj-$(CONFIG_IP_NF_SET_IPPORTIPHASH) += ip_set_ipportiphash.o
7743 +obj-$(CONFIG_IP_NF_SET_IPPORTNETHASH) += ip_set_ipportnethash.o
7744 +obj-$(CONFIG_IP_NF_SET_IPTREE) += ip_set_iptree.o
7745 +obj-$(CONFIG_IP_NF_SET_IPTREEMAP) += ip_set_iptreemap.o
7746 +obj-$(CONFIG_IP_NF_SET_SETLIST) += ip_set_setlist.o
7748 # generic ARP tables
7749 obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o