Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (25 commits)
  [XFRM]: Fix OOPSes in xfrm_audit_log().
  [TCP]: cleanup of htcp (resend)
  [TCP]: Use read mostly for CUBIC parameters.
  [NETFILTER]: nf_conntrack_tcp: make sysctl variables static
  [NETFILTER]: ip6t_mh: drop piggyback payload packet on MH packets
  [NETFILTER]: Fix whitespace errors
  [NETFILTER]: Kconfig: improve dependency handling
  [NETFILTER]: xt_mac/xt_CLASSIFY: use IPv6 hook names for IPv6 registration
  [NETFILTER]: nf_conntrack: change nf_conntrack_l[34]proto_unregister to void
  [NETFILTER]: nf_conntrack: properly use RCU for nf_conntrack_destroyed callback
  [NETFILTER]: ip_conntrack: properly use RCU for ip_conntrack_destroyed callback
  [NETFILTER]: nf_conntrack: fix invalid conntrack statistics RCU assumption
  [NETFILTER]: ip_conntrack: fix invalid conntrack statistics RCU assumption
  [NETFILTER]: nf_conntrack: properly use RCU API for nf_ct_protos/nf_ct_l3protos arrays
  [NETFILTER]: ip_conntrack: properly use RCU API for ip_ct_protos array
  [NETFILTER]: nf_nat: properly use RCU API for nf_nat_protos array
  [NETFILTER]: ip_nat: properly use RCU API for ip_nat_protos array
  [NETFILTER]: nf_log: minor cleanups
  [NETFILTER]: nf_log: switch logger registration/unregistration to mutex
  [NETFILTER]: nf_log: make nf_log_unregister_pf return void
  ...
This commit is contained in:
Linus Torvalds 2007-02-12 15:34:17 -08:00
commit 89697f1d71
71 changed files with 768 additions and 709 deletions

View file

@ -172,8 +172,8 @@ struct nf_logger {
/* Function to register/unregister log function. */
int nf_log_register(int pf, struct nf_logger *logger);
int nf_log_unregister_pf(int pf);
void nf_log_unregister_logger(struct nf_logger *logger);
void nf_log_unregister(struct nf_logger *logger);
void nf_log_unregister_pf(int pf);
/* Calls the registered backend logging function */
void nf_log_packet(int pf,

View file

@ -301,6 +301,12 @@ extern unsigned int ip_conntrack_htable_size;
extern int ip_conntrack_checksum;
#define CONNTRACK_STAT_INC(count) (__get_cpu_var(ip_conntrack_stat).count++)
#define CONNTRACK_STAT_INC_ATOMIC(count) \
do { \
local_bh_disable(); \
__get_cpu_var(ip_conntrack_stat).count++; \
local_bh_enable(); \
} while (0)
#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS
#include <linux/notifier.h>

View file

@ -257,6 +257,12 @@ extern int nf_conntrack_max;
DECLARE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat);
#define NF_CT_STAT_INC(count) (__get_cpu_var(nf_conntrack_stat).count++)
#define NF_CT_STAT_INC_ATOMIC(count) \
do { \
local_bh_disable(); \
__get_cpu_var(nf_conntrack_stat).count++; \
local_bh_enable(); \
} while (0)
/* no helper, no nat */
#define NF_CT_F_BASIC 0

View file

@ -89,7 +89,7 @@ extern struct nf_conntrack_l3proto *nf_ct_l3protos[AF_MAX];
/* Protocol registration. */
extern int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto);
extern int nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto);
extern void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto);
extern struct nf_conntrack_l3proto *
nf_ct_l3proto_find_get(u_int16_t l3proto);
@ -106,7 +106,7 @@ __nf_ct_l3proto_find(u_int16_t l3proto)
{
if (unlikely(l3proto >= AF_MAX))
return &nf_conntrack_l3proto_generic;
return nf_ct_l3protos[l3proto];
return rcu_dereference(nf_ct_l3protos[l3proto]);
}
#endif /*_NF_CONNTRACK_L3PROTO_H*/

View file

@ -109,7 +109,7 @@ extern void nf_ct_l4proto_put(struct nf_conntrack_l4proto *p);
/* Protocol registration. */
extern int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *proto);
extern int nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *proto);
extern void nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *proto);
/* Generic netlink helpers */
extern int nf_ct_port_tuple_to_nfattr(struct sk_buff *skb,

View file

@ -208,7 +208,7 @@ static int __init ebt_log_init(void)
static void __exit ebt_log_fini(void)
{
nf_log_unregister_logger(&ebt_log_logger);
nf_log_unregister(&ebt_log_logger);
ebt_unregister_watcher(&log);
}

View file

@ -323,7 +323,7 @@ static void __exit ebt_ulog_fini(void)
ebt_ulog_buff_t *ub;
int i;
nf_log_unregister_logger(&ebt_ulog_logger);
nf_log_unregister(&ebt_ulog_logger);
ebt_unregister_watcher(&ulog);
for (i = 0; i < EBT_ULOG_MAXNLGROUPS; i++) {
ub = &ulog_buffers[i];

View file

@ -226,7 +226,7 @@ config IP_NF_QUEUE
config IP_NF_IPTABLES
tristate "IP tables support (required for filtering/masq/NAT)"
depends on NETFILTER_XTABLES
select NETFILTER_XTABLES
help
iptables is a general, extensible packet identification framework.
The packet filtering and full NAT (masquerading, port forwarding,
@ -606,7 +606,9 @@ config IP_NF_TARGET_TTL
config IP_NF_TARGET_CLUSTERIP
tristate "CLUSTERIP target support (EXPERIMENTAL)"
depends on IP_NF_MANGLE && EXPERIMENTAL
depends on (IP_NF_CONNTRACK && IP_NF_CONNTRACK_MARK) || (NF_CONNTRACK_MARK && NF_CONNTRACK_IPV4)
depends on IP_NF_CONNTRACK || NF_CONNTRACK_IPV4
select IP_NF_CONNTRACK_MARK if IP_NF_CONNTRACK
select NF_CONNTRACK_MARK if NF_CONNTRACK_IPV4
help
The CLUSTERIP target allows you to build load-balancing clusters of
network servers without having a dedicated load-balancing
@ -629,7 +631,7 @@ config IP_NF_RAW
# ARP tables
config IP_NF_ARPTABLES
tristate "ARP tables support"
depends on NETFILTER_XTABLES
select NETFILTER_XTABLES
help
arptables is a general, extensible packet identification framework.
The ARP packet filtering and mangling (manipulation)subsystems

View file

@ -303,6 +303,7 @@ destroy_conntrack(struct nf_conntrack *nfct)
struct ip_conntrack *ct = (struct ip_conntrack *)nfct;
struct ip_conntrack_protocol *proto;
struct ip_conntrack_helper *helper;
typeof(ip_conntrack_destroyed) destroyed;
DEBUGP("destroy_conntrack(%p)\n", ct);
IP_NF_ASSERT(atomic_read(&nfct->use) == 0);
@ -318,12 +319,16 @@ destroy_conntrack(struct nf_conntrack *nfct)
/* To make sure we don't get any weird locking issues here:
* destroy_conntrack() MUST NOT be called with a write lock
* to ip_conntrack_lock!!! -HW */
rcu_read_lock();
proto = __ip_conntrack_proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum);
if (proto && proto->destroy)
proto->destroy(ct);
if (ip_conntrack_destroyed)
ip_conntrack_destroyed(ct);
destroyed = rcu_dereference(ip_conntrack_destroyed);
if (destroyed)
destroyed(ct);
rcu_read_unlock();
write_lock_bh(&ip_conntrack_lock);
/* Expectations will have been removed in clean_from_lists,
@ -536,7 +541,7 @@ static int early_drop(struct list_head *chain)
if (del_timer(&ct->timeout)) {
death_by_timeout((unsigned long)ct);
dropped = 1;
CONNTRACK_STAT_INC(early_drop);
CONNTRACK_STAT_INC_ATOMIC(early_drop);
}
ip_conntrack_put(ct);
return dropped;
@ -595,13 +600,13 @@ ip_conntrack_proto_find_get(u_int8_t protocol)
{
struct ip_conntrack_protocol *p;
preempt_disable();
rcu_read_lock();
p = __ip_conntrack_proto_find(protocol);
if (p) {
if (!try_module_get(p->me))
p = &ip_conntrack_generic_protocol;
}
preempt_enable();
rcu_read_unlock();
return p;
}
@ -802,7 +807,7 @@ unsigned int ip_conntrack_in(unsigned int hooknum,
/* Previously seen (loopback or untracked)? Ignore. */
if ((*pskb)->nfct) {
CONNTRACK_STAT_INC(ignore);
CONNTRACK_STAT_INC_ATOMIC(ignore);
return NF_ACCEPT;
}
@ -830,6 +835,7 @@ unsigned int ip_conntrack_in(unsigned int hooknum,
}
#endif
/* rcu_read_lock()ed by nf_hook_slow */
proto = __ip_conntrack_proto_find((*pskb)->nh.iph->protocol);
/* It may be an special packet, error, unclean...
@ -837,20 +843,20 @@ unsigned int ip_conntrack_in(unsigned int hooknum,
* core what to do with the packet. */
if (proto->error != NULL
&& (ret = proto->error(*pskb, &ctinfo, hooknum)) <= 0) {
CONNTRACK_STAT_INC(error);
CONNTRACK_STAT_INC(invalid);
CONNTRACK_STAT_INC_ATOMIC(error);
CONNTRACK_STAT_INC_ATOMIC(invalid);
return -ret;
}
if (!(ct = resolve_normal_ct(*pskb, proto,&set_reply,hooknum,&ctinfo))) {
/* Not valid part of a connection */
CONNTRACK_STAT_INC(invalid);
CONNTRACK_STAT_INC_ATOMIC(invalid);
return NF_ACCEPT;
}
if (IS_ERR(ct)) {
/* Too stressed to deal. */
CONNTRACK_STAT_INC(drop);
CONNTRACK_STAT_INC_ATOMIC(drop);
return NF_DROP;
}
@ -862,7 +868,7 @@ unsigned int ip_conntrack_in(unsigned int hooknum,
* the netfilter core what to do*/
nf_conntrack_put((*pskb)->nfct);
(*pskb)->nfct = NULL;
CONNTRACK_STAT_INC(invalid);
CONNTRACK_STAT_INC_ATOMIC(invalid);
return -ret;
}
@ -875,8 +881,15 @@ unsigned int ip_conntrack_in(unsigned int hooknum,
int invert_tuplepr(struct ip_conntrack_tuple *inverse,
const struct ip_conntrack_tuple *orig)
{
return ip_ct_invert_tuple(inverse, orig,
__ip_conntrack_proto_find(orig->dst.protonum));
struct ip_conntrack_protocol *proto;
int ret;
rcu_read_lock();
proto = __ip_conntrack_proto_find(orig->dst.protonum);
ret = ip_ct_invert_tuple(inverse, orig, proto);
rcu_read_unlock();
return ret;
}
/* Would two expected things clash? */
@ -1354,7 +1367,7 @@ static void free_conntrack_hash(struct list_head *hash, int vmalloced,int size)
supposed to kill the mall. */
void ip_conntrack_cleanup(void)
{
ip_ct_attach = NULL;
rcu_assign_pointer(ip_ct_attach, NULL);
/* This makes sure all current packets have passed through
netfilter framework. Roll on, two-stage module
@ -1507,15 +1520,15 @@ int __init ip_conntrack_init(void)
/* Don't NEED lock here, but good form anyway. */
write_lock_bh(&ip_conntrack_lock);
for (i = 0; i < MAX_IP_CT_PROTO; i++)
ip_ct_protos[i] = &ip_conntrack_generic_protocol;
rcu_assign_pointer(ip_ct_protos[i], &ip_conntrack_generic_protocol);
/* Sew in builtin protocols. */
ip_ct_protos[IPPROTO_TCP] = &ip_conntrack_protocol_tcp;
ip_ct_protos[IPPROTO_UDP] = &ip_conntrack_protocol_udp;
ip_ct_protos[IPPROTO_ICMP] = &ip_conntrack_protocol_icmp;
rcu_assign_pointer(ip_ct_protos[IPPROTO_TCP], &ip_conntrack_protocol_tcp);
rcu_assign_pointer(ip_ct_protos[IPPROTO_UDP], &ip_conntrack_protocol_udp);
rcu_assign_pointer(ip_ct_protos[IPPROTO_ICMP], &ip_conntrack_protocol_icmp);
write_unlock_bh(&ip_conntrack_lock);
/* For use by ipt_REJECT */
ip_ct_attach = ip_conntrack_attach;
rcu_assign_pointer(ip_ct_attach, ip_conntrack_attach);
/* Set up fake conntrack:
- to never be deleted, not in any hashes */

View file

@ -796,7 +796,7 @@ int ip_conntrack_protocol_register(struct ip_conntrack_protocol *proto)
ret = -EBUSY;
goto out;
}
ip_ct_protos[proto->proto] = proto;
rcu_assign_pointer(ip_ct_protos[proto->proto], proto);
out:
write_unlock_bh(&ip_conntrack_lock);
return ret;
@ -805,11 +805,10 @@ int ip_conntrack_protocol_register(struct ip_conntrack_protocol *proto)
void ip_conntrack_protocol_unregister(struct ip_conntrack_protocol *proto)
{
write_lock_bh(&ip_conntrack_lock);
ip_ct_protos[proto->proto] = &ip_conntrack_generic_protocol;
rcu_assign_pointer(ip_ct_protos[proto->proto],
&ip_conntrack_generic_protocol);
write_unlock_bh(&ip_conntrack_lock);
/* Somebody could be still looking at the proto in bh. */
synchronize_net();
synchronize_rcu();
/* Remove all contrack entries for this protocol */
ip_ct_iterate_cleanup(kill_proto, &proto->proto);

View file

@ -50,7 +50,7 @@ static struct ip_nat_protocol *ip_nat_protos[MAX_IP_NAT_PROTO];
static inline struct ip_nat_protocol *
__ip_nat_proto_find(u_int8_t protonum)
{
return ip_nat_protos[protonum];
return rcu_dereference(ip_nat_protos[protonum]);
}
struct ip_nat_protocol *
@ -58,13 +58,11 @@ ip_nat_proto_find_get(u_int8_t protonum)
{
struct ip_nat_protocol *p;
/* we need to disable preemption to make sure 'p' doesn't get
* removed until we've grabbed the reference */
preempt_disable();
rcu_read_lock();
p = __ip_nat_proto_find(protonum);
if (!try_module_get(p->me))
p = &ip_nat_unknown_protocol;
preempt_enable();
rcu_read_unlock();
return p;
}
@ -120,8 +118,8 @@ static int
in_range(const struct ip_conntrack_tuple *tuple,
const struct ip_nat_range *range)
{
struct ip_nat_protocol *proto =
__ip_nat_proto_find(tuple->dst.protonum);
struct ip_nat_protocol *proto;
int ret = 0;
/* If we are supposed to map IPs, then we must be in the
range specified, otherwise let this drag us onto a new src IP. */
@ -131,12 +129,15 @@ in_range(const struct ip_conntrack_tuple *tuple,
return 0;
}
rcu_read_lock();
proto = __ip_nat_proto_find(tuple->dst.protonum);
if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)
|| proto->in_range(tuple, IP_NAT_MANIP_SRC,
&range->min, &range->max))
return 1;
ret = 1;
rcu_read_unlock();
return 0;
return ret;
}
static inline int
@ -260,27 +261,25 @@ get_unique_tuple(struct ip_conntrack_tuple *tuple,
/* 3) The per-protocol part of the manip is made to map into
the range to make a unique tuple. */
proto = ip_nat_proto_find_get(orig_tuple->dst.protonum);
rcu_read_lock();
proto = __ip_nat_proto_find(orig_tuple->dst.protonum);
/* Change protocol info to have some randomization */
if (range->flags & IP_NAT_RANGE_PROTO_RANDOM) {
proto->unique_tuple(tuple, range, maniptype, conntrack);
ip_nat_proto_put(proto);
return;
goto out;
}
/* Only bother mapping if it's not already in range and unique */
if ((!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)
|| proto->in_range(tuple, maniptype, &range->min, &range->max))
&& !ip_nat_used_tuple(tuple, conntrack)) {
ip_nat_proto_put(proto);
return;
}
&& !ip_nat_used_tuple(tuple, conntrack))
goto out;
/* Last change: get protocol to try to obtain unique tuple. */
proto->unique_tuple(tuple, range, maniptype, conntrack);
ip_nat_proto_put(proto);
out:
rcu_read_unlock();
}
unsigned int
@ -360,12 +359,11 @@ manip_pkt(u_int16_t proto,
iph = (void *)(*pskb)->data + iphdroff;
/* Manipulate protcol part. */
p = ip_nat_proto_find_get(proto);
if (!p->manip_pkt(pskb, iphdroff, target, maniptype)) {
ip_nat_proto_put(p);
/* rcu_read_lock()ed by nf_hook_slow */
p = __ip_nat_proto_find(proto);
if (!p->manip_pkt(pskb, iphdroff, target, maniptype))
return 0;
}
ip_nat_proto_put(p);
iph = (void *)(*pskb)->data + iphdroff;
@ -422,6 +420,7 @@ int ip_nat_icmp_reply_translation(struct ip_conntrack *ct,
struct icmphdr icmp;
struct iphdr ip;
} *inside;
struct ip_conntrack_protocol *proto;
struct ip_conntrack_tuple inner, target;
int hdrlen = (*pskb)->nh.iph->ihl * 4;
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
@ -457,10 +456,11 @@ int ip_nat_icmp_reply_translation(struct ip_conntrack *ct,
DEBUGP("icmp_reply_translation: translating error %p manp %u dir %s\n",
*pskb, manip, dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY");
/* rcu_read_lock()ed by nf_hook_slow */
proto = __ip_conntrack_proto_find(inside->ip.protocol);
if (!ip_ct_get_tuple(&inside->ip, *pskb, (*pskb)->nh.iph->ihl*4 +
sizeof(struct icmphdr) + inside->ip.ihl*4,
&inner,
__ip_conntrack_proto_find(inside->ip.protocol)))
&inner, proto))
return 0;
/* Change inner back to look like incoming packet. We do the
@ -515,7 +515,7 @@ int ip_nat_protocol_register(struct ip_nat_protocol *proto)
ret = -EBUSY;
goto out;
}
ip_nat_protos[proto->protonum] = proto;
rcu_assign_pointer(ip_nat_protos[proto->protonum], proto);
out:
write_unlock_bh(&ip_nat_lock);
return ret;
@ -526,11 +526,10 @@ EXPORT_SYMBOL(ip_nat_protocol_register);
void ip_nat_protocol_unregister(struct ip_nat_protocol *proto)
{
write_lock_bh(&ip_nat_lock);
ip_nat_protos[proto->protonum] = &ip_nat_unknown_protocol;
rcu_assign_pointer(ip_nat_protos[proto->protonum],
&ip_nat_unknown_protocol);
write_unlock_bh(&ip_nat_lock);
/* Someone could be still looking at the proto in a bh. */
synchronize_net();
synchronize_rcu();
}
EXPORT_SYMBOL(ip_nat_protocol_unregister);
@ -594,10 +593,10 @@ static int __init ip_nat_init(void)
/* Sew in builtin protocols. */
write_lock_bh(&ip_nat_lock);
for (i = 0; i < MAX_IP_NAT_PROTO; i++)
ip_nat_protos[i] = &ip_nat_unknown_protocol;
ip_nat_protos[IPPROTO_TCP] = &ip_nat_protocol_tcp;
ip_nat_protos[IPPROTO_UDP] = &ip_nat_protocol_udp;
ip_nat_protos[IPPROTO_ICMP] = &ip_nat_protocol_icmp;
rcu_assign_pointer(ip_nat_protos[i], &ip_nat_unknown_protocol);
rcu_assign_pointer(ip_nat_protos[IPPROTO_TCP], &ip_nat_protocol_tcp);
rcu_assign_pointer(ip_nat_protos[IPPROTO_UDP], &ip_nat_protocol_udp);
rcu_assign_pointer(ip_nat_protos[IPPROTO_ICMP], &ip_nat_protocol_icmp);
write_unlock_bh(&ip_nat_lock);
for (i = 0; i < ip_nat_htable_size; i++) {
@ -605,8 +604,8 @@ static int __init ip_nat_init(void)
}
/* FIXME: Man, this is a hack. <SIGH> */
IP_NF_ASSERT(ip_conntrack_destroyed == NULL);
ip_conntrack_destroyed = &ip_nat_cleanup_conntrack;
IP_NF_ASSERT(rcu_dereference(ip_conntrack_destroyed) == NULL);
rcu_assign_pointer(ip_conntrack_destroyed, ip_nat_cleanup_conntrack);
/* Initialize fake conntrack so that NAT will skip it */
ip_conntrack_untracked.status |= IPS_NAT_DONE_MASK;
@ -624,7 +623,8 @@ static int clean_nat(struct ip_conntrack *i, void *data)
static void __exit ip_nat_cleanup(void)
{
ip_ct_iterate_cleanup(&clean_nat, NULL);
ip_conntrack_destroyed = NULL;
rcu_assign_pointer(ip_conntrack_destroyed, NULL);
synchronize_rcu();
vfree(bysource);
}

View file

@ -489,7 +489,7 @@ static int __init ipt_log_init(void)
static void __exit ipt_log_fini(void)
{
nf_log_unregister_logger(&ipt_log_logger);
nf_log_unregister(&ipt_log_logger);
xt_unregister_target(&ipt_log_reg);
}

View file

@ -419,7 +419,7 @@ static void __exit ipt_ulog_fini(void)
DEBUGP("ipt_ULOG: cleanup_module\n");
if (nflog)
nf_log_unregister_logger(&ipt_ulog_logger);
nf_log_unregister(&ipt_ulog_logger);
xt_unregister_target(&ipt_ulog_reg);
sock_release(nflognl->sk_socket);

View file

@ -170,7 +170,9 @@ icmp_error_message(struct sk_buff *skb,
return -NF_ACCEPT;
}
/* rcu_read_lock()ed by nf_hook_slow */
innerproto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol);
dataoff = skb->nh.iph->ihl*4 + sizeof(inside->icmp);
/* Are they talking about one of our connections? */
if (!nf_ct_get_tuple(skb, dataoff, dataoff + inside->ip.ihl*4, PF_INET,

View file

@ -53,7 +53,7 @@ static struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO];
static inline struct nf_nat_protocol *
__nf_nat_proto_find(u_int8_t protonum)
{
return nf_nat_protos[protonum];
return rcu_dereference(nf_nat_protos[protonum]);
}
struct nf_nat_protocol *
@ -61,13 +61,11 @@ nf_nat_proto_find_get(u_int8_t protonum)
{
struct nf_nat_protocol *p;
/* we need to disable preemption to make sure 'p' doesn't get
* removed until we've grabbed the reference */
preempt_disable();
rcu_read_lock();
p = __nf_nat_proto_find(protonum);
if (!try_module_get(p->me))
p = &nf_nat_unknown_protocol;
preempt_enable();
rcu_read_unlock();
return p;
}
@ -126,8 +124,8 @@ in_range(const struct nf_conntrack_tuple *tuple,
const struct nf_nat_range *range)
{
struct nf_nat_protocol *proto;
int ret = 0;
proto = __nf_nat_proto_find(tuple->dst.protonum);
/* If we are supposed to map IPs, then we must be in the
range specified, otherwise let this drag us onto a new src IP. */
if (range->flags & IP_NAT_RANGE_MAP_IPS) {
@ -136,12 +134,15 @@ in_range(const struct nf_conntrack_tuple *tuple,
return 0;
}
rcu_read_lock();
proto = __nf_nat_proto_find(tuple->dst.protonum);
if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) ||
proto->in_range(tuple, IP_NAT_MANIP_SRC,
&range->min, &range->max))
return 1;
ret = 1;
rcu_read_unlock();
return 0;
return ret;
}
static inline int
@ -268,27 +269,25 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
/* 3) The per-protocol part of the manip is made to map into
the range to make a unique tuple. */
proto = nf_nat_proto_find_get(orig_tuple->dst.protonum);
rcu_read_lock();
proto = __nf_nat_proto_find(orig_tuple->dst.protonum);
/* Change protocol info to have some randomization */
if (range->flags & IP_NAT_RANGE_PROTO_RANDOM) {
proto->unique_tuple(tuple, range, maniptype, ct);
nf_nat_proto_put(proto);
return;
goto out;
}
/* Only bother mapping if it's not already in range and unique */
if ((!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) ||
proto->in_range(tuple, maniptype, &range->min, &range->max)) &&
!nf_nat_used_tuple(tuple, ct)) {
nf_nat_proto_put(proto);
return;
}
!nf_nat_used_tuple(tuple, ct))
goto out;
/* Last change: get protocol to try to obtain unique tuple. */
proto->unique_tuple(tuple, range, maniptype, ct);
nf_nat_proto_put(proto);
out:
rcu_read_unlock();
}
unsigned int
@ -369,12 +368,11 @@ manip_pkt(u_int16_t proto,
iph = (void *)(*pskb)->data + iphdroff;
/* Manipulate protcol part. */
p = nf_nat_proto_find_get(proto);
if (!p->manip_pkt(pskb, iphdroff, target, maniptype)) {
nf_nat_proto_put(p);
/* rcu_read_lock()ed by nf_hook_slow */
p = __nf_nat_proto_find(proto);
if (!p->manip_pkt(pskb, iphdroff, target, maniptype))
return 0;
}
nf_nat_proto_put(p);
iph = (void *)(*pskb)->data + iphdroff;
@ -431,6 +429,7 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct,
struct icmphdr icmp;
struct iphdr ip;
} *inside;
struct nf_conntrack_l4proto *l4proto;
struct nf_conntrack_tuple inner, target;
int hdrlen = (*pskb)->nh.iph->ihl * 4;
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
@ -466,16 +465,16 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct,
DEBUGP("icmp_reply_translation: translating error %p manp %u dir %s\n",
*pskb, manip, dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY");
/* rcu_read_lock()ed by nf_hook_slow */
l4proto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol);
if (!nf_ct_get_tuple(*pskb,
(*pskb)->nh.iph->ihl*4 + sizeof(struct icmphdr),
(*pskb)->nh.iph->ihl*4 +
sizeof(struct icmphdr) + inside->ip.ihl*4,
(u_int16_t)AF_INET,
inside->ip.protocol,
&inner,
l3proto,
__nf_ct_l4proto_find((u_int16_t)PF_INET,
inside->ip.protocol)))
&inner, l3proto, l4proto))
return 0;
/* Change inner back to look like incoming packet. We do the
@ -529,7 +528,7 @@ int nf_nat_protocol_register(struct nf_nat_protocol *proto)
ret = -EBUSY;
goto out;
}
nf_nat_protos[proto->protonum] = proto;
rcu_assign_pointer(nf_nat_protos[proto->protonum], proto);
out:
write_unlock_bh(&nf_nat_lock);
return ret;
@ -540,11 +539,10 @@ EXPORT_SYMBOL(nf_nat_protocol_register);
void nf_nat_protocol_unregister(struct nf_nat_protocol *proto)
{
write_lock_bh(&nf_nat_lock);
nf_nat_protos[proto->protonum] = &nf_nat_unknown_protocol;
rcu_assign_pointer(nf_nat_protos[proto->protonum],
&nf_nat_unknown_protocol);
write_unlock_bh(&nf_nat_lock);
/* Someone could be still looking at the proto in a bh. */
synchronize_net();
synchronize_rcu();
}
EXPORT_SYMBOL(nf_nat_protocol_unregister);
@ -608,10 +606,10 @@ static int __init nf_nat_init(void)
/* Sew in builtin protocols. */
write_lock_bh(&nf_nat_lock);
for (i = 0; i < MAX_IP_NAT_PROTO; i++)
nf_nat_protos[i] = &nf_nat_unknown_protocol;
nf_nat_protos[IPPROTO_TCP] = &nf_nat_protocol_tcp;
nf_nat_protos[IPPROTO_UDP] = &nf_nat_protocol_udp;
nf_nat_protos[IPPROTO_ICMP] = &nf_nat_protocol_icmp;
rcu_assign_pointer(nf_nat_protos[i], &nf_nat_unknown_protocol);
rcu_assign_pointer(nf_nat_protos[IPPROTO_TCP], &nf_nat_protocol_tcp);
rcu_assign_pointer(nf_nat_protos[IPPROTO_UDP], &nf_nat_protocol_udp);
rcu_assign_pointer(nf_nat_protos[IPPROTO_ICMP], &nf_nat_protocol_icmp);
write_unlock_bh(&nf_nat_lock);
for (i = 0; i < nf_nat_htable_size; i++) {
@ -619,8 +617,8 @@ static int __init nf_nat_init(void)
}
/* FIXME: Man, this is a hack. <SIGH> */
NF_CT_ASSERT(nf_conntrack_destroyed == NULL);
nf_conntrack_destroyed = &nf_nat_cleanup_conntrack;
NF_CT_ASSERT(rcu_dereference(nf_conntrack_destroyed) == NULL);
rcu_assign_pointer(nf_conntrack_destroyed, nf_nat_cleanup_conntrack);
/* Initialize fake conntrack so that NAT will skip it */
nf_conntrack_untracked.status |= IPS_NAT_DONE_MASK;
@ -644,7 +642,8 @@ static int clean_nat(struct nf_conn *i, void *data)
static void __exit nf_nat_cleanup(void)
{
nf_ct_iterate_cleanup(&clean_nat, NULL);
nf_conntrack_destroyed = NULL;
rcu_assign_pointer(nf_conntrack_destroyed, NULL);
synchronize_rcu();
vfree(bysource);
nf_ct_l3proto_put(l3proto);
}

View file

@ -26,16 +26,16 @@
*/
#define BICTCP_HZ 10 /* BIC HZ 2^10 = 1024 */
static int fast_convergence = 1;
static int max_increment = 16;
static int beta = 819; /* = 819/1024 (BICTCP_BETA_SCALE) */
static int initial_ssthresh = 100;
static int bic_scale = 41;
static int tcp_friendliness = 1;
static int fast_convergence __read_mostly = 1;
static int max_increment __read_mostly = 16;
static int beta __read_mostly = 819; /* = 819/1024 (BICTCP_BETA_SCALE) */
static int initial_ssthresh __read_mostly = 100;
static int bic_scale __read_mostly = 41;
static int tcp_friendliness __read_mostly = 1;
static u32 cube_rtt_scale;
static u32 beta_scale;
static u64 cube_factor;
static u32 cube_rtt_scale __read_mostly;
static u32 beta_scale __read_mostly;
static u64 cube_factor __read_mostly;
/* Note parameters that are used for precomputing scale factors are read-only */
module_param(fast_convergence, int, 0644);

View file

@ -10,22 +10,23 @@
#include <linux/module.h>
#include <net/tcp.h>
#define ALPHA_BASE (1<<7) /* 1.0 with shift << 7 */
#define BETA_MIN (1<<6) /* 0.5 with shift << 7 */
#define ALPHA_BASE (1<<7) /* 1.0 with shift << 7 */
#define BETA_MIN (1<<6) /* 0.5 with shift << 7 */
#define BETA_MAX 102 /* 0.8 with shift << 7 */
static int use_rtt_scaling = 1;
static int use_rtt_scaling __read_mostly = 1;
module_param(use_rtt_scaling, int, 0644);
MODULE_PARM_DESC(use_rtt_scaling, "turn on/off RTT scaling");
static int use_bandwidth_switch = 1;
static int use_bandwidth_switch __read_mostly = 1;
module_param(use_bandwidth_switch, int, 0644);
MODULE_PARM_DESC(use_bandwidth_switch, "turn on/off bandwidth switcher");
struct htcp {
u32 alpha; /* Fixed point arith, << 7 */
u8 beta; /* Fixed point arith, << 7 */
u8 modeswitch; /* Delay modeswitch until we had at least one congestion event */
u8 modeswitch; /* Delay modeswitch
until we had at least one congestion event */
u16 pkts_acked;
u32 packetcount;
u32 minRTT;
@ -44,14 +45,14 @@ struct htcp {
u32 lasttime;
};
static inline u32 htcp_cong_time(struct htcp *ca)
static inline u32 htcp_cong_time(const struct htcp *ca)
{
return jiffies - ca->last_cong;
}
static inline u32 htcp_ccount(struct htcp *ca)
static inline u32 htcp_ccount(const struct htcp *ca)
{
return htcp_cong_time(ca)/ca->minRTT;
return htcp_cong_time(ca) / ca->minRTT;
}
static inline void htcp_reset(struct htcp *ca)
@ -67,10 +68,12 @@ static u32 htcp_cwnd_undo(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct htcp *ca = inet_csk_ca(sk);
ca->last_cong = ca->undo_last_cong;
ca->maxRTT = ca->undo_maxRTT;
ca->old_maxB = ca->undo_old_maxB;
return max(tp->snd_cwnd, (tp->snd_ssthresh<<7)/ca->beta);
return max(tp->snd_cwnd, (tp->snd_ssthresh << 7) / ca->beta);
}
static inline void measure_rtt(struct sock *sk)
@ -78,17 +81,19 @@ static inline void measure_rtt(struct sock *sk)
const struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcp_sock *tp = tcp_sk(sk);
struct htcp *ca = inet_csk_ca(sk);
u32 srtt = tp->srtt>>3;
u32 srtt = tp->srtt >> 3;
/* keep track of minimum RTT seen so far, minRTT is zero at first */
if (ca->minRTT > srtt || !ca->minRTT)
ca->minRTT = srtt;
/* max RTT */
if (icsk->icsk_ca_state == TCP_CA_Open && tp->snd_ssthresh < 0xFFFF && htcp_ccount(ca) > 3) {
if (icsk->icsk_ca_state == TCP_CA_Open
&& tp->snd_ssthresh < 0xFFFF && htcp_ccount(ca) > 3) {
if (ca->maxRTT < ca->minRTT)
ca->maxRTT = ca->minRTT;
if (ca->maxRTT < srtt && srtt <= ca->maxRTT+msecs_to_jiffies(20))
if (ca->maxRTT < srtt
&& srtt <= ca->maxRTT + msecs_to_jiffies(20))
ca->maxRTT = srtt;
}
}
@ -116,15 +121,16 @@ static void measure_achieved_throughput(struct sock *sk, u32 pkts_acked)
ca->packetcount += pkts_acked;
if (ca->packetcount >= tp->snd_cwnd - (ca->alpha>>7? : 1)
&& now - ca->lasttime >= ca->minRTT
&& ca->minRTT > 0) {
__u32 cur_Bi = ca->packetcount*HZ/(now - ca->lasttime);
if (ca->packetcount >= tp->snd_cwnd - (ca->alpha >> 7 ? : 1)
&& now - ca->lasttime >= ca->minRTT
&& ca->minRTT > 0) {
__u32 cur_Bi = ca->packetcount * HZ / (now - ca->lasttime);
if (htcp_ccount(ca) <= 3) {
/* just after backoff */
ca->minB = ca->maxB = ca->Bi = cur_Bi;
} else {
ca->Bi = (3*ca->Bi + cur_Bi)/4;
ca->Bi = (3 * ca->Bi + cur_Bi) / 4;
if (ca->Bi > ca->maxB)
ca->maxB = ca->Bi;
if (ca->minB > ca->maxB)
@ -142,7 +148,7 @@ static inline void htcp_beta_update(struct htcp *ca, u32 minRTT, u32 maxRTT)
u32 old_maxB = ca->old_maxB;
ca->old_maxB = ca->maxB;
if (!between(5*maxB, 4*old_maxB, 6*old_maxB)) {
if (!between(5 * maxB, 4 * old_maxB, 6 * old_maxB)) {
ca->beta = BETA_MIN;
ca->modeswitch = 0;
return;
@ -150,7 +156,7 @@ static inline void htcp_beta_update(struct htcp *ca, u32 minRTT, u32 maxRTT)
}
if (ca->modeswitch && minRTT > msecs_to_jiffies(10) && maxRTT) {
ca->beta = (minRTT<<7)/maxRTT;
ca->beta = (minRTT << 7) / maxRTT;
if (ca->beta < BETA_MIN)
ca->beta = BETA_MIN;
else if (ca->beta > BETA_MAX)
@ -169,23 +175,26 @@ static inline void htcp_alpha_update(struct htcp *ca)
if (diff > HZ) {
diff -= HZ;
factor = 1+ ( 10*diff + ((diff/2)*(diff/2)/HZ) )/HZ;
factor = 1 + (10 * diff + ((diff / 2) * (diff / 2) / HZ)) / HZ;
}
if (use_rtt_scaling && minRTT) {
u32 scale = (HZ<<3)/(10*minRTT);
scale = min(max(scale, 1U<<2), 10U<<3); /* clamping ratio to interval [0.5,10]<<3 */
factor = (factor<<3)/scale;
u32 scale = (HZ << 3) / (10 * minRTT);
/* clamping ratio to interval [0.5,10]<<3 */
scale = min(max(scale, 1U << 2), 10U << 3);
factor = (factor << 3) / scale;
if (!factor)
factor = 1;
}
ca->alpha = 2*factor*((1<<7)-ca->beta);
ca->alpha = 2 * factor * ((1 << 7) - ca->beta);
if (!ca->alpha)
ca->alpha = ALPHA_BASE;
}
/* After we have the rtt data to calculate beta, we'd still prefer to wait one
/*
* After we have the rtt data to calculate beta, we'd still prefer to wait one
* rtt before we adjust our beta to ensure we are working from a consistent
* data.
*
@ -202,15 +211,16 @@ static void htcp_param_update(struct sock *sk)
htcp_beta_update(ca, minRTT, maxRTT);
htcp_alpha_update(ca);
/* add slowly fading memory for maxRTT to accommodate routing changes etc */
/* add slowly fading memory for maxRTT to accommodate routing changes */
if (minRTT > 0 && maxRTT > minRTT)
ca->maxRTT = minRTT + ((maxRTT-minRTT)*95)/100;
ca->maxRTT = minRTT + ((maxRTT - minRTT) * 95) / 100;
}
static u32 htcp_recalc_ssthresh(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
const struct htcp *ca = inet_csk_ca(sk);
htcp_param_update(sk);
return max((tp->snd_cwnd * ca->beta) >> 7, 2U);
}
@ -227,7 +237,6 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
if (tp->snd_cwnd <= tp->snd_ssthresh)
tcp_slow_start(tp);
else {
measure_rtt(sk);
/* In dangerous area, increase slowly.

View file

@ -42,7 +42,8 @@ config IP6_NF_QUEUE
config IP6_NF_IPTABLES
tristate "IP6 tables support (required for filtering)"
depends on INET && IPV6 && EXPERIMENTAL && NETFILTER_XTABLES
depends on INET && IPV6 && EXPERIMENTAL
select NETFILTER_XTABLES
help
ip6tables is a general, extensible packet identification framework.
Currently only the packet filtering and packet mangling subsystem

View file

@ -501,7 +501,7 @@ static int __init ip6t_log_init(void)
static void __exit ip6t_log_fini(void)
{
nf_log_unregister_logger(&ip6t_logger);
nf_log_unregister(&ip6t_logger);
xt_unregister_target(&ip6t_log_reg);
}

View file

@ -66,6 +66,13 @@ match(const struct sk_buff *skb,
return 0;
}
if (mh->ip6mh_proto != IPPROTO_NONE) {
duprintf("Dropping invalid MH Payload Proto: %u\n",
mh->ip6mh_proto);
*hotdrop = 1;
return 0;
}
return type_match(mhinfo->types[0], mhinfo->types[1], mh->ip6mh_type,
!!(mhinfo->invflags & IP6T_MH_INV_TYPE));
}

View file

@ -154,8 +154,8 @@ ipv6_prepare(struct sk_buff **pskb, unsigned int hooknum, unsigned int *dataoff,
*/
if ((protoff < 0) || (protoff > (*pskb)->len)) {
DEBUGP("ip6_conntrack_core: can't find proto in pkt\n");
NF_CT_STAT_INC(error);
NF_CT_STAT_INC(invalid);
NF_CT_STAT_INC_ATOMIC(error);
NF_CT_STAT_INC_ATOMIC(invalid);
return -NF_ACCEPT;
}

View file

@ -182,6 +182,7 @@ icmpv6_error_message(struct sk_buff *skb,
return -NF_ACCEPT;
}
/* rcu_read_lock()ed by nf_hook_slow */
inproto = __nf_ct_l4proto_find(PF_INET6, inprotonum);
/* Are they talking about one of our connections? */

View file

@ -2297,16 +2297,17 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, struct sadb_msg
&sel, tmp.security, 1);
security_xfrm_policy_free(&tmp);
xfrm_audit_log(audit_get_loginuid(current->audit_context), 0,
AUDIT_MAC_IPSEC_DELSPD, (xp) ? 1 : 0, xp, NULL);
if (xp == NULL)
return -ENOENT;
err = 0;
err = security_xfrm_policy_delete(xp);
if ((err = security_xfrm_policy_delete(xp)))
xfrm_audit_log(audit_get_loginuid(current->audit_context), 0,
AUDIT_MAC_IPSEC_DELSPD, err ? 0 : 1, xp, NULL);
if (err)
goto out;
c.seq = hdr->sadb_msg_seq;
c.pid = hdr->sadb_msg_pid;
c.event = XFRM_MSG_DELPOLICY;

View file

@ -302,7 +302,9 @@ config NETFILTER_XT_TARGET_CONNMARK
tristate '"CONNMARK" target support'
depends on NETFILTER_XTABLES
depends on IP_NF_MANGLE || IP6_NF_MANGLE
depends on (IP_NF_CONNTRACK && IP_NF_CONNTRACK_MARK) || (NF_CONNTRACK_MARK && NF_CONNTRACK)
depends on IP_NF_CONNTRACK || NF_CONNTRACK
select IP_NF_CONNTRACK_MARK if IP_NF_CONNTRACK
select NF_CONNTRACK_MARK if NF_CONNTRACK
help
This option adds a `CONNMARK' target, which allows one to manipulate
the connection mark value. Similar to the MARK target, but
@ -434,7 +436,9 @@ config NETFILTER_XT_MATCH_COMMENT
config NETFILTER_XT_MATCH_CONNBYTES
tristate '"connbytes" per-connection counter match support'
depends on NETFILTER_XTABLES
depends on (IP_NF_CONNTRACK && IP_NF_CT_ACCT) || (NF_CT_ACCT && NF_CONNTRACK)
depends on IP_NF_CONNTRACK || NF_CONNTRACK
select IP_NF_CT_ACCT if IP_NF_CONNTRACK
select NF_CT_ACCT if NF_CONNTRACK
help
This option adds a `connbytes' match, which allows you to match the
number of bytes and/or packets for each direction within a connection.
@ -445,7 +449,9 @@ config NETFILTER_XT_MATCH_CONNBYTES
config NETFILTER_XT_MATCH_CONNMARK
tristate '"connmark" connection mark match support'
depends on NETFILTER_XTABLES
depends on (IP_NF_CONNTRACK && IP_NF_CONNTRACK_MARK) || (NF_CONNTRACK_MARK && NF_CONNTRACK)
depends on IP_NF_CONNTRACK || NF_CONNTRACK
select IP_NF_CONNTRACK_MARK if IP_NF_CONNTRACK
select NF_CONNTRACK_MARK if NF_CONNTRACK
help
This option adds a `connmark' match, which allows you to match the
connection mark value previously set for the session by `CONNMARK'.

View file

@ -22,29 +22,34 @@
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/proc_fs.h>
#include <linux/mutex.h>
#include <net/sock.h>
#include "nf_internals.h"
static DEFINE_SPINLOCK(afinfo_lock);
static DEFINE_MUTEX(afinfo_mutex);
struct nf_afinfo *nf_afinfo[NPROTO] __read_mostly;
EXPORT_SYMBOL(nf_afinfo);
int nf_register_afinfo(struct nf_afinfo *afinfo)
{
spin_lock(&afinfo_lock);
int err;
err = mutex_lock_interruptible(&afinfo_mutex);
if (err < 0)
return err;
rcu_assign_pointer(nf_afinfo[afinfo->family], afinfo);
spin_unlock(&afinfo_lock);
mutex_unlock(&afinfo_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(nf_register_afinfo);
void nf_unregister_afinfo(struct nf_afinfo *afinfo)
{
spin_lock(&afinfo_lock);
mutex_lock(&afinfo_mutex);
rcu_assign_pointer(nf_afinfo[afinfo->family], NULL);
spin_unlock(&afinfo_lock);
mutex_unlock(&afinfo_mutex);
synchronize_rcu();
}
EXPORT_SYMBOL_GPL(nf_unregister_afinfo);
@ -56,30 +61,31 @@ EXPORT_SYMBOL_GPL(nf_unregister_afinfo);
* packets come back: if the hook is gone, the packet is discarded. */
struct list_head nf_hooks[NPROTO][NF_MAX_HOOKS] __read_mostly;
EXPORT_SYMBOL(nf_hooks);
static DEFINE_SPINLOCK(nf_hook_lock);
static DEFINE_MUTEX(nf_hook_mutex);
int nf_register_hook(struct nf_hook_ops *reg)
{
struct list_head *i;
int err;
spin_lock_bh(&nf_hook_lock);
err = mutex_lock_interruptible(&nf_hook_mutex);
if (err < 0)
return err;
list_for_each(i, &nf_hooks[reg->pf][reg->hooknum]) {
if (reg->priority < ((struct nf_hook_ops *)i)->priority)
break;
}
list_add_rcu(&reg->list, i->prev);
spin_unlock_bh(&nf_hook_lock);
synchronize_net();
mutex_unlock(&nf_hook_mutex);
return 0;
}
EXPORT_SYMBOL(nf_register_hook);
void nf_unregister_hook(struct nf_hook_ops *reg)
{
spin_lock_bh(&nf_hook_lock);
mutex_lock(&nf_hook_mutex);
list_del_rcu(&reg->list);
spin_unlock_bh(&nf_hook_lock);
mutex_unlock(&nf_hook_mutex);
synchronize_net();
}
@ -135,14 +141,14 @@ unsigned int nf_iterate(struct list_head *head,
continue;
/* Optimization: we don't need to hold module
reference here, since function can't sleep. --RR */
reference here, since function can't sleep. --RR */
verdict = elem->hook(hook, skb, indev, outdev, okfn);
if (verdict != NF_ACCEPT) {
#ifdef CONFIG_NETFILTER_DEBUG
if (unlikely((verdict & NF_VERDICT_MASK)
> NF_MAX_VERDICT)) {
NFDEBUG("Evil return from %p(%u).\n",
elem->hook, hook);
elem->hook, hook);
continue;
}
#endif
@ -248,9 +254,12 @@ void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb)
{
void (*attach)(struct sk_buff *, struct sk_buff *);
if (skb->nfct && (attach = ip_ct_attach) != NULL) {
mb(); /* Just to be sure: must be read before executing this */
attach(new, skb);
if (skb->nfct) {
rcu_read_lock();
attach = rcu_dereference(ip_ct_attach);
if (attach)
attach(new, skb);
rcu_read_unlock();
}
}
EXPORT_SYMBOL(nf_ct_attach);

View file

@ -318,6 +318,7 @@ destroy_conntrack(struct nf_conntrack *nfct)
struct nf_conn_help *help = nfct_help(ct);
struct nf_conntrack_l3proto *l3proto;
struct nf_conntrack_l4proto *l4proto;
typeof(nf_conntrack_destroyed) destroyed;
DEBUGP("destroy_conntrack(%p)\n", ct);
NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
@ -332,16 +333,21 @@ destroy_conntrack(struct nf_conntrack *nfct)
/* To make sure we don't get any weird locking issues here:
* destroy_conntrack() MUST NOT be called with a write lock
* to nf_conntrack_lock!!! -HW */
rcu_read_lock();
l3proto = __nf_ct_l3proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num);
if (l3proto && l3proto->destroy)
l3proto->destroy(ct);
l4proto = __nf_ct_l4proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num, ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum);
l4proto = __nf_ct_l4proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num,
ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum);
if (l4proto && l4proto->destroy)
l4proto->destroy(ct);
if (nf_conntrack_destroyed)
nf_conntrack_destroyed(ct);
destroyed = rcu_dereference(nf_conntrack_destroyed);
if (destroyed)
destroyed(ct);
rcu_read_unlock();
write_lock_bh(&nf_conntrack_lock);
/* Expectations will have been removed in clean_from_lists,
@ -560,7 +566,7 @@ static int early_drop(struct list_head *chain)
if (del_timer(&ct->timeout)) {
death_by_timeout((unsigned long)ct);
dropped = 1;
NF_CT_STAT_INC(early_drop);
NF_CT_STAT_INC_ATOMIC(early_drop);
}
nf_ct_put(ct);
return dropped;
@ -647,9 +653,14 @@ struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
const struct nf_conntrack_tuple *repl)
{
struct nf_conntrack_l3proto *l3proto;
struct nf_conn *ct;
rcu_read_lock();
l3proto = __nf_ct_l3proto_find(orig->src.l3num);
return __nf_conntrack_alloc(orig, repl, l3proto, 0);
ct = __nf_conntrack_alloc(orig, repl, l3proto, 0);
rcu_read_unlock();
return ct;
}
EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
@ -813,11 +824,13 @@ nf_conntrack_in(int pf, unsigned int hooknum, struct sk_buff **pskb)
/* Previously seen (loopback or untracked)? Ignore. */
if ((*pskb)->nfct) {
NF_CT_STAT_INC(ignore);
NF_CT_STAT_INC_ATOMIC(ignore);
return NF_ACCEPT;
}
/* rcu_read_lock()ed by nf_hook_slow */
l3proto = __nf_ct_l3proto_find((u_int16_t)pf);
if ((ret = l3proto->prepare(pskb, hooknum, &dataoff, &protonum)) <= 0) {
DEBUGP("not prepared to track yet or error occured\n");
return -ret;
@ -830,8 +843,8 @@ nf_conntrack_in(int pf, unsigned int hooknum, struct sk_buff **pskb)
* core what to do with the packet. */
if (l4proto->error != NULL &&
(ret = l4proto->error(*pskb, dataoff, &ctinfo, pf, hooknum)) <= 0) {
NF_CT_STAT_INC(error);
NF_CT_STAT_INC(invalid);
NF_CT_STAT_INC_ATOMIC(error);
NF_CT_STAT_INC_ATOMIC(invalid);
return -ret;
}
@ -839,13 +852,13 @@ nf_conntrack_in(int pf, unsigned int hooknum, struct sk_buff **pskb)
&set_reply, &ctinfo);
if (!ct) {
/* Not valid part of a connection */
NF_CT_STAT_INC(invalid);
NF_CT_STAT_INC_ATOMIC(invalid);
return NF_ACCEPT;
}
if (IS_ERR(ct)) {
/* Too stressed to deal. */
NF_CT_STAT_INC(drop);
NF_CT_STAT_INC_ATOMIC(drop);
return NF_DROP;
}
@ -858,7 +871,7 @@ nf_conntrack_in(int pf, unsigned int hooknum, struct sk_buff **pskb)
DEBUGP("nf_conntrack_in: Can't track with proto module\n");
nf_conntrack_put((*pskb)->nfct);
(*pskb)->nfct = NULL;
NF_CT_STAT_INC(invalid);
NF_CT_STAT_INC_ATOMIC(invalid);
return -ret;
}
@ -872,10 +885,15 @@ EXPORT_SYMBOL_GPL(nf_conntrack_in);
int nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
const struct nf_conntrack_tuple *orig)
{
return nf_ct_invert_tuple(inverse, orig,
__nf_ct_l3proto_find(orig->src.l3num),
__nf_ct_l4proto_find(orig->src.l3num,
orig->dst.protonum));
int ret;
rcu_read_lock();
ret = nf_ct_invert_tuple(inverse, orig,
__nf_ct_l3proto_find(orig->src.l3num),
__nf_ct_l4proto_find(orig->src.l3num,
orig->dst.protonum));
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr);
@ -1048,7 +1066,7 @@ get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
if (iter(ct, data))
goto found;
}
}
}
list_for_each_entry(h, &unconfirmed, list) {
ct = nf_ct_tuplehash_to_ctrack(h);
if (iter(ct, data))
@ -1105,7 +1123,7 @@ void nf_conntrack_cleanup(void)
{
int i;
ip_ct_attach = NULL;
rcu_assign_pointer(ip_ct_attach, NULL);
/* This makes sure all current packets have passed through
netfilter framework. Roll on, two-stage module
@ -1268,12 +1286,12 @@ int __init nf_conntrack_init(void)
/* Don't NEED lock here, but good form anyway. */
write_lock_bh(&nf_conntrack_lock);
for (i = 0; i < AF_MAX; i++)
for (i = 0; i < AF_MAX; i++)
nf_ct_l3protos[i] = &nf_conntrack_l3proto_generic;
write_unlock_bh(&nf_conntrack_lock);
write_unlock_bh(&nf_conntrack_lock);
/* For use by REJECT target */
ip_ct_attach = __nf_conntrack_attach;
rcu_assign_pointer(ip_ct_attach, __nf_conntrack_attach);
/* Set up fake conntrack:
- to never be deleted, not in any hashes */

View file

@ -130,7 +130,7 @@ void nf_ct_remove_expectations(struct nf_conn *ct)
if (i->master == ct && del_timer(&i->timeout)) {
nf_ct_unlink_expect(i);
nf_conntrack_expect_put(i);
}
}
}
}
EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);

View file

@ -126,7 +126,7 @@ get_ipv6_addr(const char *src, size_t dlen, struct in6_addr *dst, u_int8_t term)
}
static int try_number(const char *data, size_t dlen, u_int32_t array[],
int array_size, char sep, char term)
int array_size, char sep, char term)
{
u_int32_t i, len;
@ -413,8 +413,8 @@ static int help(struct sk_buff **pskb,
goto out_update_nl;
}
/* Initialize IP/IPv6 addr to expected address (it's not mentioned
in EPSV responses) */
/* Initialize IP/IPv6 addr to expected address (it's not mentioned
in EPSV responses) */
cmd.l3num = ct->tuplehash[dir].tuple.src.l3num;
memcpy(cmd.u3.all, &ct->tuplehash[dir].tuple.src.u3.all,
sizeof(cmd.u3.all));
@ -466,11 +466,11 @@ static int help(struct sk_buff **pskb,
memcmp(&cmd.u3.all, &ct->tuplehash[dir].tuple.src.u3.all,
sizeof(cmd.u3.all))) {
/* Enrico Scholz's passive FTP to partially RNAT'd ftp
server: it really wants us to connect to a
different IP address. Simply don't record it for
NAT. */
server: it really wants us to connect to a
different IP address. Simply don't record it for
NAT. */
if (cmd.l3num == PF_INET) {
DEBUGP("conntrack_ftp: NOT RECORDING: " NIPQUAD_FMT " != " NIPQUAD_FMT "\n",
DEBUGP("conntrack_ftp: NOT RECORDING: " NIPQUAD_FMT " != " NIPQUAD_FMT "\n",
NIPQUAD(cmd.u3.ip),
NIPQUAD(ct->tuplehash[dir].tuple.src.u3.ip));
} else {

View file

@ -49,7 +49,7 @@ MODULE_PARM_DESC(gkrouted_only, "only accept calls from gatekeeper");
static int callforward_filter __read_mostly = 1;
module_param(callforward_filter, bool, 0600);
MODULE_PARM_DESC(callforward_filter, "only create call forwarding expectations "
"if both endpoints are on different sides "
"if both endpoints are on different sides "
"(determined by routing information)");
/* Hooks for NAT */
@ -300,7 +300,7 @@ static int expect_rtp_rtcp(struct sk_buff **pskb, struct nf_conn *ct,
IPPROTO_UDP, NULL, &rtcp_port);
if (memcmp(&ct->tuplehash[dir].tuple.src.u3,
&ct->tuplehash[!dir].tuple.dst.u3,
&ct->tuplehash[!dir].tuple.dst.u3,
sizeof(ct->tuplehash[dir].tuple.src.u3)) &&
(nat_rtp_rtcp = rcu_dereference(nat_rtp_rtcp_hook)) &&
ct->status & IPS_NAT_MASK) {
@ -743,7 +743,7 @@ static int callforward_do_filter(union nf_conntrack_address *src,
rt2 = (struct rt6_info *)ip6_route_output(NULL, &fl2);
if (rt2) {
if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway,
sizeof(rt1->rt6i_gateway)) &&
sizeof(rt1->rt6i_gateway)) &&
rt1->u.dst.dev == rt2->u.dst.dev)
ret = 1;
dst_release(&rt2->u.dst);
@ -780,7 +780,7 @@ static int expect_callforwarding(struct sk_buff **pskb,
* we don't need to track the second call */
if (callforward_filter &&
callforward_do_filter(&addr, &ct->tuplehash[!dir].tuple.src.u3,
ct->tuplehash[!dir].tuple.src.l3num)) {
ct->tuplehash[!dir].tuple.src.l3num)) {
DEBUGP("nf_ct_q931: Call Forwarding not tracked\n");
return 0;
}
@ -840,7 +840,7 @@ static int process_setup(struct sk_buff **pskb, struct nf_conn *ct,
if ((setup->options & eSetup_UUIE_destCallSignalAddress) &&
(set_h225_addr) && ct->status && IPS_NAT_MASK &&
get_h225_addr(ct, *data, &setup->destCallSignalAddress,
&addr, &port) &&
&addr, &port) &&
memcmp(&addr, &ct->tuplehash[!dir].tuple.src.u3, sizeof(addr))) {
DEBUGP("nf_ct_q931: set destCallSignalAddress "
NIP6_FMT ":%hu->" NIP6_FMT ":%hu\n",
@ -858,7 +858,7 @@ static int process_setup(struct sk_buff **pskb, struct nf_conn *ct,
if ((setup->options & eSetup_UUIE_sourceCallSignalAddress) &&
(set_h225_addr) && ct->status & IPS_NAT_MASK &&
get_h225_addr(ct, *data, &setup->sourceCallSignalAddress,
&addr, &port) &&
&addr, &port) &&
memcmp(&addr, &ct->tuplehash[!dir].tuple.dst.u3, sizeof(addr))) {
DEBUGP("nf_ct_q931: set sourceCallSignalAddress "
NIP6_FMT ":%hu->" NIP6_FMT ":%hu\n",
@ -1282,7 +1282,7 @@ static int expect_q931(struct sk_buff **pskb, struct nf_conn *ct,
for (i = 0; i < count; i++) {
if (get_h225_addr(ct, *data, &taddr[i], &addr, &port) &&
memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3,
sizeof(addr)) == 0 && port != 0)
sizeof(addr)) == 0 && port != 0)
break;
}
@ -1294,7 +1294,7 @@ static int expect_q931(struct sk_buff **pskb, struct nf_conn *ct,
return -1;
nf_conntrack_expect_init(exp, ct->tuplehash[!dir].tuple.src.l3num,
gkrouted_only ? /* only accept calls from GK? */
&ct->tuplehash[!dir].tuple.src.u3 :
&ct->tuplehash[!dir].tuple.src.u3 :
NULL,
&ct->tuplehash[!dir].tuple.dst.u3,
IPPROTO_TCP, NULL, &port);
@ -1513,7 +1513,7 @@ static int process_arq(struct sk_buff **pskb, struct nf_conn *ct,
set_h225_addr = rcu_dereference(set_h225_addr_hook);
if ((arq->options & eAdmissionRequest_destCallSignalAddress) &&
get_h225_addr(ct, *data, &arq->destCallSignalAddress,
&addr, &port) &&
&addr, &port) &&
!memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) &&
port == info->sig_port[dir] &&
set_h225_addr && ct->status & IPS_NAT_MASK) {
@ -1526,7 +1526,7 @@ static int process_arq(struct sk_buff **pskb, struct nf_conn *ct,
if ((arq->options & eAdmissionRequest_srcCallSignalAddress) &&
get_h225_addr(ct, *data, &arq->srcCallSignalAddress,
&addr, &port) &&
&addr, &port) &&
!memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) &&
set_h225_addr && ct->status & IPS_NAT_MASK) {
/* Calling ARQ */

View file

@ -57,7 +57,7 @@ static const char *dccprotos[] = {
#if 0
#define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s:" format, \
__FILE__, __FUNCTION__ , ## args)
__FILE__, __FUNCTION__ , ## args)
#else
#define DEBUGP(format, args...)
#endif

View file

@ -43,7 +43,7 @@ module_param(timeout, uint, 0400);
MODULE_PARM_DESC(timeout, "timeout for master connection/replies in seconds");
static int help(struct sk_buff **pskb, unsigned int protoff,
struct nf_conn *ct, enum ip_conntrack_info ctinfo)
struct nf_conn *ct, enum ip_conntrack_info ctinfo)
{
struct nf_conntrack_expect *exp;
struct iphdr *iph = (*pskb)->nh.iph;

View file

@ -314,7 +314,7 @@ nfattr_failure:
#ifdef CONFIG_NF_CONNTRACK_EVENTS
static int ctnetlink_conntrack_event(struct notifier_block *this,
unsigned long events, void *ptr)
unsigned long events, void *ptr)
{
struct nlmsghdr *nlh;
struct nfgenmsg *nfmsg;
@ -383,16 +383,16 @@ static int ctnetlink_conntrack_event(struct notifier_block *this,
if (events & IPCT_PROTOINFO
&& ctnetlink_dump_protoinfo(skb, ct) < 0)
goto nfattr_failure;
goto nfattr_failure;
if ((events & IPCT_HELPER || nfct_help(ct))
&& ctnetlink_dump_helpinfo(skb, ct) < 0)
goto nfattr_failure;
goto nfattr_failure;
#ifdef CONFIG_NF_CONNTRACK_MARK
if ((events & IPCT_MARK || ct->mark)
&& ctnetlink_dump_mark(skb, ct) < 0)
goto nfattr_failure;
goto nfattr_failure;
#endif
if (events & IPCT_COUNTER_FILLING &&
@ -450,7 +450,7 @@ restart:
cb->args[1] = 0;
}
if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq,
cb->nlh->nlmsg_seq,
IPCTNL_MSG_CT_NEW,
1, ct) < 0) {
nf_conntrack_get(&ct->ct_general);
@ -1120,7 +1120,7 @@ nfattr_failure:
static inline int
ctnetlink_exp_dump_expect(struct sk_buff *skb,
const struct nf_conntrack_expect *exp)
const struct nf_conntrack_expect *exp)
{
struct nf_conn *master = exp->master;
__be32 timeout = htonl((exp->timeout.expires - jiffies) / HZ);
@ -1279,7 +1279,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
u32 rlen;
if ((*errp = netlink_dump_start(ctnl, skb, nlh,
ctnetlink_exp_dump_table,
ctnetlink_exp_dump_table,
ctnetlink_done)) != 0)
return -EINVAL;
rlen = NLMSG_ALIGN(nlh->nlmsg_len);

View file

@ -520,7 +520,7 @@ conntrack_pptp_help(struct sk_buff **pskb, unsigned int protoff,
tcph = skb_header_pointer(*pskb, nexthdr_off, sizeof(_tcph), &_tcph);
BUG_ON(!tcph);
nexthdr_off += tcph->doff * 4;
datalen = tcplen - tcph->doff * 4;
datalen = tcplen - tcph->doff * 4;
pptph = skb_header_pointer(*pskb, nexthdr_off, sizeof(_pptph), &_pptph);
if (!pptph) {

View file

@ -66,7 +66,7 @@ __nf_ct_l4proto_find(u_int16_t l3proto, u_int8_t l4proto)
if (unlikely(l3proto >= AF_MAX || nf_ct_protos[l3proto] == NULL))
return &nf_conntrack_l4proto_generic;
return nf_ct_protos[l3proto][l4proto];
return rcu_dereference(nf_ct_protos[l3proto][l4proto]);
}
EXPORT_SYMBOL_GPL(__nf_ct_l4proto_find);
@ -77,11 +77,11 @@ nf_ct_l4proto_find_get(u_int16_t l3proto, u_int8_t l4proto)
{
struct nf_conntrack_l4proto *p;
preempt_disable();
rcu_read_lock();
p = __nf_ct_l4proto_find(l3proto, l4proto);
if (!try_module_get(p->me))
p = &nf_conntrack_l4proto_generic;
preempt_enable();
rcu_read_unlock();
return p;
}
@ -98,11 +98,11 @@ nf_ct_l3proto_find_get(u_int16_t l3proto)
{
struct nf_conntrack_l3proto *p;
preempt_disable();
rcu_read_lock();
p = __nf_ct_l3proto_find(l3proto);
if (!try_module_get(p->me))
p = &nf_conntrack_l3proto_generic;
preempt_enable();
rcu_read_unlock();
return p;
}
@ -137,10 +137,8 @@ void nf_ct_l3proto_module_put(unsigned short l3proto)
{
struct nf_conntrack_l3proto *p;
preempt_disable();
/* rcu_read_lock not necessary since the caller holds a reference */
p = __nf_ct_l3proto_find(l3proto);
preempt_enable();
module_put(p->me);
}
EXPORT_SYMBOL_GPL(nf_ct_l3proto_module_put);
@ -202,7 +200,7 @@ int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto)
ret = -EBUSY;
goto out_unlock;
}
nf_ct_l3protos[proto->l3proto] = proto;
rcu_assign_pointer(nf_ct_l3protos[proto->l3proto], proto);
write_unlock_bh(&nf_conntrack_lock);
ret = nf_ct_l3proto_register_sysctl(proto);
@ -217,35 +215,21 @@ out:
}
EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_register);
int nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto)
void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto)
{
int ret = 0;
if (proto->l3proto >= AF_MAX) {
ret = -EBUSY;
goto out;
}
BUG_ON(proto->l3proto >= AF_MAX);
write_lock_bh(&nf_conntrack_lock);
if (nf_ct_l3protos[proto->l3proto] != proto) {
write_unlock_bh(&nf_conntrack_lock);
ret = -EBUSY;
goto out;
}
nf_ct_l3protos[proto->l3proto] = &nf_conntrack_l3proto_generic;
BUG_ON(nf_ct_l3protos[proto->l3proto] != proto);
rcu_assign_pointer(nf_ct_l3protos[proto->l3proto],
&nf_conntrack_l3proto_generic);
write_unlock_bh(&nf_conntrack_lock);
synchronize_rcu();
nf_ct_l3proto_unregister_sysctl(proto);
/* Somebody could be still looking at the proto in bh. */
synchronize_net();
/* Remove all contrack entries for this protocol */
nf_ct_iterate_cleanup(kill_l3proto, proto);
out:
return ret;
}
EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_unregister);
@ -356,7 +340,7 @@ retry:
goto retry;
}
nf_ct_protos[l4proto->l3proto][l4proto->l4proto] = l4proto;
rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto], l4proto);
write_unlock_bh(&nf_conntrack_lock);
ret = nf_ct_l4proto_register_sysctl(l4proto);
@ -371,40 +355,25 @@ out:
}
EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_register);
int nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *l4proto)
void nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *l4proto)
{
int ret = 0;
if (l4proto->l3proto >= PF_MAX) {
ret = -EBUSY;
goto out;
}
BUG_ON(l4proto->l3proto >= PF_MAX);
if (l4proto == &nf_conntrack_l4proto_generic) {
nf_ct_l4proto_unregister_sysctl(l4proto);
goto out;
return;
}
write_lock_bh(&nf_conntrack_lock);
if (nf_ct_protos[l4proto->l3proto][l4proto->l4proto]
!= l4proto) {
write_unlock_bh(&nf_conntrack_lock);
ret = -EBUSY;
goto out;
}
nf_ct_protos[l4proto->l3proto][l4proto->l4proto]
= &nf_conntrack_l4proto_generic;
BUG_ON(nf_ct_protos[l4proto->l3proto][l4proto->l4proto] != l4proto);
rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
&nf_conntrack_l4proto_generic);
write_unlock_bh(&nf_conntrack_lock);
synchronize_rcu();
nf_ct_l4proto_unregister_sysctl(l4proto);
/* Somebody could be still looking at the proto in bh. */
synchronize_net();
/* Remove all contrack entries for this protocol */
nf_ct_iterate_cleanup(kill_l4proto, l4proto);
out:
return ret;
}
EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_unregister);

View file

@ -104,15 +104,15 @@ point. Please note the subtleties. -Kiran
NONE - Nothing so far.
COOKIE WAIT - We have seen an INIT chunk in the original direction, or also
an INIT_ACK chunk in the reply direction.
an INIT_ACK chunk in the reply direction.
COOKIE ECHOED - We have seen a COOKIE_ECHO chunk in the original direction.
ESTABLISHED - We have seen a COOKIE_ACK in the reply direction.
SHUTDOWN_SENT - We have seen a SHUTDOWN chunk in the original direction.
SHUTDOWN_RECD - We have seen a SHUTDOWN chunk in the reply directoin.
SHUTDOWN_ACK_SENT - We have seen a SHUTDOWN_ACK chunk in the direction opposite
to that of the SHUTDOWN chunk.
to that of the SHUTDOWN chunk.
CLOSED - We have seen a SHUTDOWN_COMPLETE chunk in the direction of
the SHUTDOWN chunk. Connection is closed.
the SHUTDOWN chunk. Connection is closed.
*/
/* TODO
@ -407,7 +407,7 @@ static int sctp_packet(struct nf_conn *conntrack,
sctp_inithdr_t _inithdr, *ih;
ih = skb_header_pointer(skb, offset + sizeof(sctp_chunkhdr_t),
sizeof(_inithdr), &_inithdr);
sizeof(_inithdr), &_inithdr);
if (ih == NULL) {
write_unlock_bh(&sctp_lock);
return -1;
@ -481,7 +481,7 @@ static int sctp_new(struct nf_conn *conntrack, const struct sk_buff *skb,
sctp_inithdr_t _inithdr, *ih;
ih = skb_header_pointer(skb, offset + sizeof(sctp_chunkhdr_t),
sizeof(_inithdr), &_inithdr);
sizeof(_inithdr), &_inithdr);
if (ih == NULL)
return 0;

View file

@ -58,16 +58,16 @@ static DEFINE_RWLOCK(tcp_lock);
/* "Be conservative in what you do,
be liberal in what you accept from others."
If it's non-zero, we mark only out of window RST segments as INVALID. */
int nf_ct_tcp_be_liberal __read_mostly = 0;
static int nf_ct_tcp_be_liberal __read_mostly = 0;
/* If it is set to zero, we disable picking up already established
connections. */
int nf_ct_tcp_loose __read_mostly = 1;
static int nf_ct_tcp_loose __read_mostly = 1;
/* Max number of the retransmitted packets without receiving an (acceptable)
ACK from the destination. If this number is reached, a shorter timer
will be started. */
int nf_ct_tcp_max_retrans __read_mostly = 3;
static int nf_ct_tcp_max_retrans __read_mostly = 3;
/* FIXME: Examine ipfilter's timeouts and conntrack transitions more
closely. They're more complex. --RR */
@ -291,7 +291,7 @@ static enum tcp_conntrack tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */
/*rst*/ { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV },
/*none*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
}
}
};
static int tcp_pkt_to_tuple(const struct sk_buff *skb,
@ -361,7 +361,7 @@ static unsigned int get_conntrack_index(const struct tcphdr *tcph)
after the right or before the left edge) and thus receivers may ACK
segments after the right edge of the window.
td_maxend = max(sack + max(win,1)) seen in reply packets
td_maxend = max(sack + max(win,1)) seen in reply packets
td_maxwin = max(max(win, 1)) + (sack - ack) seen in sent packets
td_maxwin += seq + len - sender.td_maxend
if seq + len > sender.td_maxend
@ -457,7 +457,7 @@ static void tcp_options(const struct sk_buff *skb,
static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
struct tcphdr *tcph, __u32 *sack)
{
unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
unsigned char *ptr;
int length = (tcph->doff*4) - sizeof(struct tcphdr);
__u32 tmp;
@ -472,10 +472,10 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
/* Fast path for timestamp-only option */
if (length == TCPOLEN_TSTAMP_ALIGNED*4
&& *(__be32 *)ptr ==
__constant_htonl((TCPOPT_NOP << 24)
| (TCPOPT_NOP << 16)
| (TCPOPT_TIMESTAMP << 8)
| TCPOLEN_TIMESTAMP))
__constant_htonl((TCPOPT_NOP << 24)
| (TCPOPT_NOP << 16)
| (TCPOPT_TIMESTAMP << 8)
| TCPOLEN_TIMESTAMP))
return;
while (length > 0) {
@ -497,13 +497,13 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
if (opcode == TCPOPT_SACK
&& opsize >= (TCPOLEN_SACK_BASE
+ TCPOLEN_SACK_PERBLOCK)
+ TCPOLEN_SACK_PERBLOCK)
&& !((opsize - TCPOLEN_SACK_BASE)
% TCPOLEN_SACK_PERBLOCK)) {
for (i = 0;
i < (opsize - TCPOLEN_SACK_BASE);
i += TCPOLEN_SACK_PERBLOCK) {
tmp = ntohl(*((__be32 *)(ptr+i)+1));
% TCPOLEN_SACK_PERBLOCK)) {
for (i = 0;
i < (opsize - TCPOLEN_SACK_BASE);
i += TCPOLEN_SACK_PERBLOCK) {
tmp = ntohl(*((__be32 *)(ptr+i)+1));
if (after(tmp, *sack))
*sack = tmp;
@ -517,11 +517,11 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
}
static int tcp_in_window(struct ip_ct_tcp *state,
enum ip_conntrack_dir dir,
unsigned int index,
const struct sk_buff *skb,
enum ip_conntrack_dir dir,
unsigned int index,
const struct sk_buff *skb,
unsigned int dataoff,
struct tcphdr *tcph,
struct tcphdr *tcph,
int pf)
{
struct ip_ct_tcp_state *sender = &state->seen[dir];
@ -580,7 +580,7 @@ static int tcp_in_window(struct ip_ct_tcp *state,
* We are in the middle of a connection,
* its history is lost for us.
* Let's try to use the data from the packet.
*/
*/
sender->td_end = end;
sender->td_maxwin = (win == 0 ? 1 : win);
sender->td_maxend = end + sender->td_maxwin;
@ -644,14 +644,14 @@ static int tcp_in_window(struct ip_ct_tcp *state,
DEBUGP("tcp_in_window: I=%i II=%i III=%i IV=%i\n",
before(seq, sender->td_maxend + 1),
after(end, sender->td_end - receiver->td_maxwin - 1),
before(sack, receiver->td_end + 1),
after(ack, receiver->td_end - MAXACKWINDOW(sender)));
before(sack, receiver->td_end + 1),
after(ack, receiver->td_end - MAXACKWINDOW(sender)));
if (before(seq, sender->td_maxend + 1) &&
after(end, sender->td_end - receiver->td_maxwin - 1) &&
before(sack, receiver->td_end + 1) &&
after(ack, receiver->td_end - MAXACKWINDOW(sender))) {
/*
/*
* Take into account window scaling (RFC 1323).
*/
if (!tcph->syn)
@ -712,7 +712,7 @@ static int tcp_in_window(struct ip_ct_tcp *state,
: "ACK is over the upper bound (ACKed data not seen yet)"
: "SEQ is under the lower bound (already ACKed data retransmitted)"
: "SEQ is over the upper bound (over the window of the receiver)");
}
}
DEBUGP("tcp_in_window: res=%i sender end=%u maxend=%u maxwin=%u "
"receiver end=%u maxend=%u maxwin=%u\n",
@ -804,7 +804,7 @@ static int tcp_error(struct sk_buff *skb,
nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
"nf_ct_tcp: short packet ");
return -NF_ACCEPT;
}
}
/* Not whole TCP header or malformed packet */
if (th->doff*4 < sizeof(struct tcphdr) || tcplen < th->doff*4) {
@ -876,7 +876,7 @@ static int tcp_packet(struct nf_conn *conntrack,
&& conntrack->proto.tcp.last_index == TCP_SYN_SET
&& conntrack->proto.tcp.last_dir != dir
&& ntohl(th->ack_seq) ==
conntrack->proto.tcp.last_end) {
conntrack->proto.tcp.last_end) {
/* This SYN/ACK acknowledges a SYN that we earlier
* ignored as invalid. This means that the client and
* the server are both in sync, while the firewall is
@ -884,14 +884,14 @@ static int tcp_packet(struct nf_conn *conntrack,
* that the client cannot but retransmit its SYN and
* thus initiate a clean new session.
*/
write_unlock_bh(&tcp_lock);
write_unlock_bh(&tcp_lock);
if (LOG_INVALID(IPPROTO_TCP))
nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
"nf_ct_tcp: killing out of sync session ");
if (del_timer(&conntrack->timeout))
conntrack->timeout.function((unsigned long)
conntrack);
return -NF_DROP;
if (del_timer(&conntrack->timeout))
conntrack->timeout.function((unsigned long)
conntrack);
return -NF_DROP;
}
conntrack->proto.tcp.last_index = index;
conntrack->proto.tcp.last_dir = dir;
@ -921,13 +921,13 @@ static int tcp_packet(struct nf_conn *conntrack,
IP_CT_TCP_FLAG_CLOSE_INIT)
|| after(ntohl(th->seq),
conntrack->proto.tcp.seen[dir].td_end)) {
/* Attempt to reopen a closed connection.
* Delete this connection and look up again. */
write_unlock_bh(&tcp_lock);
if (del_timer(&conntrack->timeout))
conntrack->timeout.function((unsigned long)
conntrack);
return -NF_REPEAT;
/* Attempt to reopen a closed connection.
* Delete this connection and look up again. */
write_unlock_bh(&tcp_lock);
if (del_timer(&conntrack->timeout))
conntrack->timeout.function((unsigned long)
conntrack);
return -NF_REPEAT;
} else {
write_unlock_bh(&tcp_lock);
if (LOG_INVALID(IPPROTO_TCP))
@ -938,9 +938,9 @@ static int tcp_packet(struct nf_conn *conntrack,
case TCP_CONNTRACK_CLOSE:
if (index == TCP_RST_SET
&& ((test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)
&& conntrack->proto.tcp.last_index == TCP_SYN_SET)
|| (!test_bit(IPS_ASSURED_BIT, &conntrack->status)
&& conntrack->proto.tcp.last_index == TCP_ACK_SET))
&& conntrack->proto.tcp.last_index == TCP_SYN_SET)
|| (!test_bit(IPS_ASSURED_BIT, &conntrack->status)
&& conntrack->proto.tcp.last_index == TCP_ACK_SET))
&& ntohl(th->ack_seq) == conntrack->proto.tcp.last_end) {
/* RST sent to invalid SYN or ACK we had let through
* at a) and c) above:
@ -1140,7 +1140,7 @@ static int nfattr_to_tcp(struct nfattr *cda[], struct nf_conn *ct)
if (!attr)
return 0;
nfattr_parse_nested(tb, CTA_PROTOINFO_TCP_MAX, attr);
nfattr_parse_nested(tb, CTA_PROTOINFO_TCP_MAX, attr);
if (nfattr_bad_size(tb, CTA_PROTOINFO_TCP_MAX, cta_min_tcp))
return -EINVAL;

View file

@ -341,7 +341,7 @@ int ct_sip_get_info(struct nf_conn *ct,
continue;
}
aux = ct_sip_search(hnfo->ln_str, dptr, hnfo->ln_strlen,
ct_sip_lnlen(dptr, limit),
ct_sip_lnlen(dptr, limit),
hnfo->case_sensitive);
if (!aux) {
DEBUGP("'%s' not found in '%s'.\n", hnfo->ln_str,
@ -451,12 +451,12 @@ static int sip_help(struct sk_buff **pskb,
/* We'll drop only if there are parse problems. */
if (!parse_addr(ct, dptr + matchoff, NULL, &addr,
dptr + datalen)) {
dptr + datalen)) {
ret = NF_DROP;
goto out;
}
if (ct_sip_get_info(ct, dptr, datalen, &matchoff, &matchlen,
POS_MEDIA) > 0) {
POS_MEDIA) > 0) {
port = simple_strtoul(dptr + matchoff, NULL, 10);
if (port < 1024) {

View file

@ -472,7 +472,7 @@ static int __init nf_conntrack_standalone_init(void)
static void __exit nf_conntrack_standalone_fini(void)
{
#ifdef CONFIG_SYSCTL
unregister_sysctl_table(nf_ct_sysctl_header);
unregister_sysctl_table(nf_ct_sysctl_header);
#endif
#ifdef CONFIG_PROC_FS
remove_proc_entry("nf_conntrack", proc_net_stat);

View file

@ -31,7 +31,7 @@ MODULE_PARM_DESC(ports, "Port numbers of TFTP servers");
#if 0
#define DEBUGP(format, args...) printk("%s:%s:" format, \
__FILE__, __FUNCTION__ , ## args)
__FILE__, __FUNCTION__ , ## args)
#else
#define DEBUGP(format, args...)
#endif

View file

@ -14,62 +14,63 @@
#define NF_LOG_PREFIXLEN 128
static struct nf_logger *nf_logging[NPROTO]; /* = NULL */
static DEFINE_SPINLOCK(nf_log_lock);
static struct nf_logger *nf_loggers[NPROTO];
static DEFINE_MUTEX(nf_log_mutex);
/* return EBUSY if somebody else is registered, EEXIST if the same logger
* is registred, 0 on success. */
int nf_log_register(int pf, struct nf_logger *logger)
{
int ret = -EBUSY;
int ret;
if (pf >= NPROTO)
return -EINVAL;
/* Any setup of logging members must be done before
* substituting pointer. */
spin_lock(&nf_log_lock);
if (!nf_logging[pf]) {
rcu_assign_pointer(nf_logging[pf], logger);
ret = 0;
} else if (nf_logging[pf] == logger)
ret = -EEXIST;
ret = mutex_lock_interruptible(&nf_log_mutex);
if (ret < 0)
return ret;
spin_unlock(&nf_log_lock);
if (!nf_loggers[pf])
rcu_assign_pointer(nf_loggers[pf], logger);
else if (nf_loggers[pf] == logger)
ret = -EEXIST;
else
ret = -EBUSY;
mutex_unlock(&nf_log_mutex);
return ret;
}
EXPORT_SYMBOL(nf_log_register);
int nf_log_unregister_pf(int pf)
void nf_log_unregister_pf(int pf)
{
if (pf >= NPROTO)
return -EINVAL;
spin_lock(&nf_log_lock);
nf_logging[pf] = NULL;
spin_unlock(&nf_log_lock);
return;
mutex_lock(&nf_log_mutex);
rcu_assign_pointer(nf_loggers[pf], NULL);
mutex_unlock(&nf_log_mutex);
/* Give time to concurrent readers. */
synchronize_net();
return 0;
synchronize_rcu();
}
EXPORT_SYMBOL(nf_log_unregister_pf);
void nf_log_unregister_logger(struct nf_logger *logger)
void nf_log_unregister(struct nf_logger *logger)
{
int i;
spin_lock(&nf_log_lock);
mutex_lock(&nf_log_mutex);
for (i = 0; i < NPROTO; i++) {
if (nf_logging[i] == logger)
nf_logging[i] = NULL;
if (nf_loggers[i] == logger)
rcu_assign_pointer(nf_loggers[i], NULL);
}
spin_unlock(&nf_log_lock);
mutex_unlock(&nf_log_mutex);
synchronize_net();
synchronize_rcu();
}
EXPORT_SYMBOL(nf_log_unregister_logger);
EXPORT_SYMBOL(nf_log_unregister);
void nf_log_packet(int pf,
unsigned int hooknum,
@ -84,7 +85,7 @@ void nf_log_packet(int pf,
struct nf_logger *logger;
rcu_read_lock();
logger = rcu_dereference(nf_logging[pf]);
logger = rcu_dereference(nf_loggers[pf]);
if (logger) {
va_start(args, fmt);
vsnprintf(prefix, sizeof(prefix), fmt, args);
@ -131,7 +132,7 @@ static int seq_show(struct seq_file *s, void *v)
loff_t *pos = v;
const struct nf_logger *logger;
logger = rcu_dereference(nf_logging[*pos]);
logger = rcu_dereference(nf_loggers[*pos]);
if (!logger)
return seq_printf(s, "%2lld NONE\n", *pos);

View file

@ -227,8 +227,8 @@ void nf_reinject(struct sk_buff *skb, struct nf_info *info,
list_for_each_rcu(i, &nf_hooks[info->pf][info->hook]) {
if (i == elem)
break;
}
break;
}
if (i == &nf_hooks[info->pf][info->hook]) {
/* The module which sent it to userspace is gone. */

View file

@ -187,7 +187,7 @@ nfnetlink_check_attributes(struct nfnetlink_subsystem *subsys,
/* implicit: if nlmsg_len == min_len, we return 0, and an empty
* (zeroed) cda[] array. The message is valid, but empty. */
return 0;
return 0;
}
int nfnetlink_has_listeners(unsigned int group)
@ -357,7 +357,7 @@ static int __init nfnetlink_init(void)
printk("Netfilter messages via NETLINK v%s.\n", nfversion);
nfnl = netlink_kernel_create(NETLINK_NETFILTER, NFNLGRP_MAX,
nfnetlink_rcv, THIS_MODULE);
nfnetlink_rcv, THIS_MODULE);
if (!nfnl) {
printk(KERN_ERR "cannot initialize nfnetlink!\n");
return -1;

View file

@ -1077,7 +1077,7 @@ cleanup_netlink_notifier:
static void __exit nfnetlink_log_fini(void)
{
nf_log_unregister_logger(&nfulnl_logger);
nf_log_unregister(&nfulnl_logger);
#ifdef CONFIG_PROC_FS
remove_proc_entry("nfnetlink_log", proc_net_netfilter);
#endif

View file

@ -490,7 +490,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
struct nfqnl_msg_packet_hw phw;
int len = entskb->dev->hard_header_parse(entskb,
phw.hw_addr);
phw.hw_addr);
phw.hw_addrlen = htons(len);
NFA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw);
}
@ -580,10 +580,10 @@ nfqnl_enqueue_packet(struct sk_buff *skb, struct nf_info *info,
goto err_out_free_nskb;
if (queue->queue_total >= queue->queue_maxlen) {
queue->queue_dropped++;
queue->queue_dropped++;
status = -ENOSPC;
if (net_ratelimit())
printk(KERN_WARNING "nf_queue: full at %d entries, "
printk(KERN_WARNING "nf_queue: full at %d entries, "
"dropping packets(s). Dropped: %d\n",
queue->queue_total, queue->queue_dropped);
goto err_out_free_nskb;
@ -592,7 +592,7 @@ nfqnl_enqueue_packet(struct sk_buff *skb, struct nf_info *info,
/* nfnetlink_unicast will either free the nskb or add it to a socket */
status = nfnetlink_unicast(nskb, queue->peer_pid, MSG_DONTWAIT);
if (status < 0) {
queue->queue_user_dropped++;
queue->queue_user_dropped++;
goto err_out_unlock;
}
@ -631,9 +631,9 @@ nfqnl_mangle(void *data, int data_len, struct nfqnl_queue_entry *e)
struct sk_buff *newskb;
newskb = skb_copy_expand(e->skb,
skb_headroom(e->skb),
diff,
GFP_ATOMIC);
skb_headroom(e->skb),
diff,
GFP_ATOMIC);
if (newskb == NULL) {
printk(KERN_WARNING "nf_queue: OOM "
"in mangle, dropping packet\n");
@ -835,7 +835,7 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
if (nfqa[NFQA_MARK-1])
entry->skb->mark = ntohl(*(__be32 *)
NFA_DATA(nfqa[NFQA_MARK-1]));
NFA_DATA(nfqa[NFQA_MARK-1]));
issue_verdict(entry, verdict);
instance_put(queue);

View file

@ -305,7 +305,7 @@ int xt_find_revision(int af, const char *name, u8 revision, int target,
EXPORT_SYMBOL_GPL(xt_find_revision);
int xt_check_match(const struct xt_match *match, unsigned short family,
unsigned int size, const char *table, unsigned int hook_mask,
unsigned int size, const char *table, unsigned int hook_mask,
unsigned short proto, int inv_proto)
{
if (XT_ALIGN(match->matchsize) != size) {
@ -377,7 +377,7 @@ int xt_compat_match_to_user(struct xt_entry_match *m, void __user **dstptr,
if (copy_to_user(cm, m, sizeof(*cm)) ||
put_user(msize, &cm->u.user.match_size))
return -EFAULT;
return -EFAULT;
if (match->compat_to_user) {
if (match->compat_to_user((void __user *)cm->data, m->data))
@ -432,7 +432,7 @@ int xt_compat_target_offset(struct xt_target *target)
EXPORT_SYMBOL_GPL(xt_compat_target_offset);
void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
int *size)
int *size)
{
struct xt_target *target = t->u.kernel.target;
struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
@ -467,7 +467,7 @@ int xt_compat_target_to_user(struct xt_entry_target *t, void __user **dstptr,
if (copy_to_user(ct, t, sizeof(*ct)) ||
put_user(tsize, &ct->u.user.target_size))
return -EFAULT;
return -EFAULT;
if (target->compat_to_user) {
if (target->compat_to_user((void __user *)ct->data, t->data))

View file

@ -15,6 +15,8 @@
#include <linux/ip.h>
#include <net/checksum.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_CLASSIFY.h>
@ -46,7 +48,7 @@ static struct xt_target xt_classify_target[] = {
.table = "mangle",
.hooks = (1 << NF_IP_LOCAL_OUT) |
(1 << NF_IP_FORWARD) |
(1 << NF_IP_POST_ROUTING),
(1 << NF_IP_POST_ROUTING),
.me = THIS_MODULE,
},
{
@ -55,9 +57,9 @@ static struct xt_target xt_classify_target[] = {
.target = target,
.targetsize = sizeof(struct xt_classify_target_info),
.table = "mangle",
.hooks = (1 << NF_IP_LOCAL_OUT) |
(1 << NF_IP_FORWARD) |
(1 << NF_IP_POST_ROUTING),
.hooks = (1 << NF_IP6_LOCAL_OUT) |
(1 << NF_IP6_FORWARD) |
(1 << NF_IP6_POST_ROUTING),
.me = THIS_MODULE,
},
};

View file

@ -51,9 +51,9 @@ match(const struct sk_buff *skb,
if (ct == &ip_conntrack_untracked)
statebit = XT_CONNTRACK_STATE_UNTRACKED;
else if (ct)
statebit = XT_CONNTRACK_STATE_BIT(ctinfo);
else
statebit = XT_CONNTRACK_STATE_INVALID;
statebit = XT_CONNTRACK_STATE_BIT(ctinfo);
else
statebit = XT_CONNTRACK_STATE_INVALID;
if (sinfo->flags & XT_CONNTRACK_STATE) {
if (ct) {
@ -77,7 +77,7 @@ match(const struct sk_buff *skb,
FWINV(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum !=
sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.protonum,
XT_CONNTRACK_PROTO))
return 0;
return 0;
if (sinfo->flags & XT_CONNTRACK_ORIGSRC &&
FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip &
@ -147,9 +147,9 @@ match(const struct sk_buff *skb,
if (ct == &nf_conntrack_untracked)
statebit = XT_CONNTRACK_STATE_UNTRACKED;
else if (ct)
statebit = XT_CONNTRACK_STATE_BIT(ctinfo);
else
statebit = XT_CONNTRACK_STATE_INVALID;
statebit = XT_CONNTRACK_STATE_BIT(ctinfo);
else
statebit = XT_CONNTRACK_STATE_INVALID;
if (sinfo->flags & XT_CONNTRACK_STATE) {
if (ct) {
@ -171,41 +171,41 @@ match(const struct sk_buff *skb,
if (sinfo->flags & XT_CONNTRACK_PROTO &&
FWINV(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum !=
sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.protonum,
sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.protonum,
XT_CONNTRACK_PROTO))
return 0;
return 0;
if (sinfo->flags & XT_CONNTRACK_ORIGSRC &&
FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip &
sinfo->sipmsk[IP_CT_DIR_ORIGINAL].s_addr) !=
sinfo->sipmsk[IP_CT_DIR_ORIGINAL].s_addr) !=
sinfo->tuple[IP_CT_DIR_ORIGINAL].src.ip,
XT_CONNTRACK_ORIGSRC))
return 0;
if (sinfo->flags & XT_CONNTRACK_ORIGDST &&
FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip &
sinfo->dipmsk[IP_CT_DIR_ORIGINAL].s_addr) !=
sinfo->dipmsk[IP_CT_DIR_ORIGINAL].s_addr) !=
sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.ip,
XT_CONNTRACK_ORIGDST))
return 0;
if (sinfo->flags & XT_CONNTRACK_REPLSRC &&
FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip &
sinfo->sipmsk[IP_CT_DIR_REPLY].s_addr) !=
sinfo->sipmsk[IP_CT_DIR_REPLY].s_addr) !=
sinfo->tuple[IP_CT_DIR_REPLY].src.ip,
XT_CONNTRACK_REPLSRC))
return 0;
if (sinfo->flags & XT_CONNTRACK_REPLDST &&
FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip &
sinfo->dipmsk[IP_CT_DIR_REPLY].s_addr) !=
sinfo->dipmsk[IP_CT_DIR_REPLY].s_addr) !=
sinfo->tuple[IP_CT_DIR_REPLY].dst.ip,
XT_CONNTRACK_REPLDST))
return 0;
if (sinfo->flags & XT_CONNTRACK_STATUS &&
FWINV((ct->status & sinfo->statusmask) == 0,
XT_CONNTRACK_STATUS))
XT_CONNTRACK_STATUS))
return 0;
if(sinfo->flags & XT_CONNTRACK_EXPIRES) {

View file

@ -26,7 +26,7 @@ MODULE_DESCRIPTION("Match for DCCP protocol packets");
MODULE_ALIAS("ipt_dccp");
#define DCCHECK(cond, option, flag, invflag) (!((flag) & (option)) \
|| (!!((invflag) & (option)) ^ (cond)))
|| (!!((invflag) & (option)) ^ (cond)))
static unsigned char *dccp_optbuf;
static DEFINE_SPINLOCK(dccp_buflock);
@ -111,11 +111,11 @@ match(const struct sk_buff *skb,
if (dh == NULL) {
*hotdrop = 1;
return 0;
}
}
return DCCHECK(((ntohs(dh->dccph_sport) >= info->spts[0])
&& (ntohs(dh->dccph_sport) <= info->spts[1])),
XT_DCCP_SRC_PORTS, info->flags, info->invflags)
XT_DCCP_SRC_PORTS, info->flags, info->invflags)
&& DCCHECK(((ntohs(dh->dccph_dport) >= info->dpts[0])
&& (ntohs(dh->dccph_dport) <= info->dpts[1])),
XT_DCCP_DEST_PORTS, info->flags, info->invflags)

View file

@ -208,7 +208,7 @@ static int htable_create(struct xt_hashlimit_info *minfo, int family)
spin_lock_init(&hinfo->lock);
hinfo->pde = create_proc_entry(minfo->name, 0,
family == AF_INET ? hashlimit_procdir4 :
hashlimit_procdir6);
hashlimit_procdir6);
if (!hinfo->pde) {
vfree(hinfo);
return -1;
@ -240,7 +240,7 @@ static int select_gc(struct xt_hashlimit_htable *ht, struct dsthash_ent *he)
}
static void htable_selective_cleanup(struct xt_hashlimit_htable *ht,
int (*select)(struct xt_hashlimit_htable *ht,
int (*select)(struct xt_hashlimit_htable *ht,
struct dsthash_ent *he))
{
unsigned int i;
@ -279,7 +279,7 @@ static void htable_destroy(struct xt_hashlimit_htable *hinfo)
/* remove proc entry */
remove_proc_entry(hinfo->pde->name,
hinfo->family == AF_INET ? hashlimit_procdir4 :
hashlimit_procdir6);
hashlimit_procdir6);
htable_selective_cleanup(hinfo, select_all);
vfree(hinfo);
}
@ -483,7 +483,7 @@ hashlimit_match(const struct sk_buff *skb,
return 1;
}
spin_unlock_bh(&hinfo->lock);
spin_unlock_bh(&hinfo->lock);
/* default case: we're overlimit, thus don't match */
return 0;

View file

@ -79,7 +79,7 @@ match(const struct sk_buff *skb,
ret ^= 1;
else
ret ^= !strncmp(ct->master->helper->name, info->name,
strlen(ct->master->helper->name));
strlen(ct->master->helper->name));
out_unlock:
read_unlock_bh(&ip_conntrack_lock);
return ret;
@ -129,7 +129,7 @@ match(const struct sk_buff *skb,
ret ^= 1;
else
ret ^= !strncmp(master_help->helper->name, info->name,
strlen(master_help->helper->name));
strlen(master_help->helper->name));
out_unlock:
read_unlock_bh(&nf_conntrack_lock);
return ret;

View file

@ -89,7 +89,7 @@ ipt_limit_match(const struct sk_buff *skb,
return 1;
}
spin_unlock_bh(&limit_lock);
spin_unlock_bh(&limit_lock);
return 0;
}

View file

@ -14,6 +14,7 @@
#include <linux/etherdevice.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter/xt_mac.h>
#include <linux/netfilter/x_tables.h>
@ -59,9 +60,9 @@ static struct xt_match xt_mac_match[] = {
.family = AF_INET6,
.match = match,
.matchsize = sizeof(struct xt_mac_info),
.hooks = (1 << NF_IP_PRE_ROUTING) |
(1 << NF_IP_LOCAL_IN) |
(1 << NF_IP_FORWARD),
.hooks = (1 << NF_IP6_PRE_ROUTING) |
(1 << NF_IP6_LOCAL_IN) |
(1 << NF_IP6_FORWARD),
.me = THIS_MODULE,
},
};

View file

@ -36,10 +36,10 @@ match(const struct sk_buff *skb,
static int
checkentry(const char *tablename,
const void *entry,
const void *entry,
const struct xt_match *match,
void *matchinfo,
unsigned int hook_mask)
void *matchinfo,
unsigned int hook_mask)
{
const struct xt_mark_info *minfo = matchinfo;

View file

@ -91,7 +91,7 @@ ports_match_v1(const struct xt_multiport_v1 *minfo,
}
}
return minfo->invert;
return minfo->invert;
}
static int

View file

@ -117,7 +117,7 @@ checkentry(const char *tablename,
(!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) ||
info->invert & XT_PHYSDEV_OP_BRIDGED) &&
hook_mask & ((1 << NF_IP_LOCAL_OUT) | (1 << NF_IP_FORWARD) |
(1 << NF_IP_POST_ROUTING))) {
(1 << NF_IP_POST_ROUTING))) {
printk(KERN_WARNING "physdev match: using --physdev-out in the "
"OUTPUT, FORWARD and POSTROUTING chains for non-bridged "
"traffic is not supported anymore.\n");

View file

@ -109,13 +109,13 @@ match_policy_out(const struct sk_buff *skb, const struct xt_policy_info *info,
}
static int match(const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
const struct xt_match *match,
const void *matchinfo,
int offset,
unsigned int protoff,
int *hotdrop)
const struct net_device *in,
const struct net_device *out,
const struct xt_match *match,
const void *matchinfo,
int offset,
unsigned int protoff,
int *hotdrop)
{
const struct xt_policy_info *info = matchinfo;
int ret;
@ -134,27 +134,27 @@ static int match(const struct sk_buff *skb,
}
static int checkentry(const char *tablename, const void *ip_void,
const struct xt_match *match,
void *matchinfo, unsigned int hook_mask)
const struct xt_match *match,
void *matchinfo, unsigned int hook_mask)
{
struct xt_policy_info *info = matchinfo;
if (!(info->flags & (XT_POLICY_MATCH_IN|XT_POLICY_MATCH_OUT))) {
printk(KERN_ERR "xt_policy: neither incoming nor "
"outgoing policy selected\n");
"outgoing policy selected\n");
return 0;
}
/* hook values are equal for IPv4 and IPv6 */
if (hook_mask & (1 << NF_IP_PRE_ROUTING | 1 << NF_IP_LOCAL_IN)
&& info->flags & XT_POLICY_MATCH_OUT) {
printk(KERN_ERR "xt_policy: output policy not valid in "
"PRE_ROUTING and INPUT\n");
"PRE_ROUTING and INPUT\n");
return 0;
}
if (hook_mask & (1 << NF_IP_POST_ROUTING | 1 << NF_IP_LOCAL_OUT)
&& info->flags & XT_POLICY_MATCH_IN) {
printk(KERN_ERR "xt_policy: input policy not valid in "
"POST_ROUTING and OUTPUT\n");
"POST_ROUTING and OUTPUT\n");
return 0;
}
if (info->len > XT_POLICY_MAX_ELEM) {

View file

@ -30,8 +30,8 @@ match(const struct sk_buff *skb,
q->quota -= skb->len;
ret ^= 1;
} else {
/* we do not allow even small packets from now on */
q->quota = 0;
/* we do not allow even small packets from now on */
q->quota = 0;
}
spin_unlock_bh(&quota_lock);

View file

@ -66,7 +66,7 @@ match_packet(const struct sk_buff *skb,
duprintf("Dropping invalid SCTP packet.\n");
*hotdrop = 1;
return 0;
}
}
duprintf("Chunk num: %d\toffset: %d\ttype: %d\tlength: %d\tflags: %x\n",
++i, offset, sch->type, htons(sch->length), sch->flags);
@ -142,18 +142,18 @@ match(const struct sk_buff *skb,
duprintf("Dropping evil TCP offset=0 tinygram.\n");
*hotdrop = 1;
return 0;
}
}
duprintf("spt: %d\tdpt: %d\n", ntohs(sh->source), ntohs(sh->dest));
return SCCHECK(((ntohs(sh->source) >= info->spts[0])
&& (ntohs(sh->source) <= info->spts[1])),
XT_SCTP_SRC_PORTS, info->flags, info->invflags)
XT_SCTP_SRC_PORTS, info->flags, info->invflags)
&& SCCHECK(((ntohs(sh->dest) >= info->dpts[0])
&& (ntohs(sh->dest) <= info->dpts[1])),
XT_SCTP_DEST_PORTS, info->flags, info->invflags)
&& SCCHECK(match_packet(skb, protoff + sizeof (sctp_sctphdr_t),
info->chunkmap, info->chunk_match_type,
info->flag_info, info->flag_count,
info->flag_info, info->flag_count,
hotdrop),
XT_SCTP_CHUNK_TYPES, info->flags, info->invflags);
}

View file

@ -55,7 +55,7 @@ static int checkentry(const char *tablename,
if (conf->from_offset > conf->to_offset)
return 0;
if (conf->algo[XT_STRING_MAX_ALGO_NAME_SIZE - 1] != '\0')
return 0;
return 0;
if (conf->patlen > XT_STRING_MAX_PATTERN_SIZE)
return 0;
ts_conf = textsearch_prepare(conf->algo, conf->pattern, conf->patlen,

View file

@ -66,7 +66,7 @@ match(const struct sk_buff *skb,
mssval = (op[i+2] << 8) | op[i+3];
return (mssval >= info->mss_min &&
mssval <= info->mss_max) ^ info->invert;
mssval <= info->mss_max) ^ info->invert;
}
if (op[i] < 2)
i++;

View file

@ -1997,9 +1997,14 @@ void xfrm_audit_log(uid_t auid, u32 sid, int type, int result,
if (audit_enabled == 0)
return;
BUG_ON((type == AUDIT_MAC_IPSEC_ADDSA ||
type == AUDIT_MAC_IPSEC_DELSA) && !x);
BUG_ON((type == AUDIT_MAC_IPSEC_ADDSPD ||
type == AUDIT_MAC_IPSEC_DELSPD) && !xp);
audit_buf = audit_log_start(current->audit_context, GFP_ATOMIC, type);
if (audit_buf == NULL)
return;
return;
switch(type) {
case AUDIT_MAC_IPSEC_ADDSA:

View file

@ -1273,10 +1273,6 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
xp = xfrm_policy_bysel_ctx(type, p->dir, &p->sel, tmp.security, delete);
security_xfrm_policy_free(&tmp);
}
if (delete)
xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
AUDIT_MAC_IPSEC_DELSPD, (xp) ? 1 : 0, xp, NULL);
if (xp == NULL)
return -ENOENT;
@ -1292,8 +1288,14 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
MSG_DONTWAIT);
}
} else {
if ((err = security_xfrm_policy_delete(xp)) != 0)
err = security_xfrm_policy_delete(xp);
xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
AUDIT_MAC_IPSEC_DELSPD, err ? 0 : 1, xp, NULL);
if (err != 0)
goto out;
c.data.byid = p->index;
c.event = nlh->nlmsg_type;
c.seq = nlh->nlmsg_seq;