https://github.com/torvalds/linux
Raw File
Tip revision: 40e020c129cfc991e8ab4736d2665351ffd1468d authored by Linus Torvalds on 09 December 2018, 23:31:00 UTC
Linux 4.20-rc6
Tip revision: 40e020c
nf_conncount.c
/*
 * count the number of connections matching an arbitrary key.
 *
 * (C) 2017 Red Hat GmbH
 * Author: Florian Westphal <fw@strlen.de>
 *
 * split from xt_connlimit.c:
 *   (c) 2000 Gerd Knorr <kraxel@bytesex.org>
 *   Nov 2002: Martin Bene <martin.bene@icomedias.com>:
 *		only ignore TIME_WAIT or gone connections
 *   (C) CC Computer Consultants GmbH, 2007
 */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/jhash.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/rbtree.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/netfilter/nf_conntrack_tcp.h>
#include <linux/netfilter/x_tables.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_count.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_tuple.h>
#include <net/netfilter/nf_conntrack_zones.h>

#define CONNCOUNT_SLOTS		256U

#ifdef CONFIG_LOCKDEP
#define CONNCOUNT_LOCK_SLOTS	8U
#else
#define CONNCOUNT_LOCK_SLOTS	256U
#endif

#define CONNCOUNT_GC_MAX_NODES	8
#define MAX_KEYLEN		5

/* we will save the tuples of all connections we care about */
struct nf_conncount_tuple {
	struct list_head		node;
	struct nf_conntrack_tuple	tuple;
	struct nf_conntrack_zone	zone;
	int				cpu;
	u32				jiffies32;
	bool				dead;
	struct rcu_head			rcu_head;
};

struct nf_conncount_rb {
	struct rb_node node;
	struct nf_conncount_list list;
	u32 key[MAX_KEYLEN];
	struct rcu_head rcu_head;
};

static spinlock_t nf_conncount_locks[CONNCOUNT_LOCK_SLOTS] __cacheline_aligned_in_smp;

struct nf_conncount_data {
	unsigned int keylen;
	struct rb_root root[CONNCOUNT_SLOTS];
	struct net *net;
	struct work_struct gc_work;
	unsigned long pending_trees[BITS_TO_LONGS(CONNCOUNT_SLOTS)];
	unsigned int gc_tree;
};

static u_int32_t conncount_rnd __read_mostly;
static struct kmem_cache *conncount_rb_cachep __read_mostly;
static struct kmem_cache *conncount_conn_cachep __read_mostly;

static inline bool already_closed(const struct nf_conn *conn)
{
	if (nf_ct_protonum(conn) == IPPROTO_TCP)
		return conn->proto.tcp.state == TCP_CONNTRACK_TIME_WAIT ||
		       conn->proto.tcp.state == TCP_CONNTRACK_CLOSE;
	else
		return false;
}

static int key_diff(const u32 *a, const u32 *b, unsigned int klen)
{
	return memcmp(a, b, klen * sizeof(u32));
}

enum nf_conncount_list_add
nf_conncount_add(struct nf_conncount_list *list,
		 const struct nf_conntrack_tuple *tuple,
		 const struct nf_conntrack_zone *zone)
{
	struct nf_conncount_tuple *conn;

	if (WARN_ON_ONCE(list->count > INT_MAX))
		return NF_CONNCOUNT_ERR;

	conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
	if (conn == NULL)
		return NF_CONNCOUNT_ERR;

	conn->tuple = *tuple;
	conn->zone = *zone;
	conn->cpu = raw_smp_processor_id();
	conn->jiffies32 = (u32)jiffies;
	conn->dead = false;
	spin_lock_bh(&list->list_lock);
	if (list->dead == true) {
		kmem_cache_free(conncount_conn_cachep, conn);
		spin_unlock_bh(&list->list_lock);
		return NF_CONNCOUNT_SKIP;
	}
	list_add_tail(&conn->node, &list->head);
	list->count++;
	spin_unlock_bh(&list->list_lock);
	return NF_CONNCOUNT_ADDED;
}
EXPORT_SYMBOL_GPL(nf_conncount_add);

static void __conn_free(struct rcu_head *h)
{
	struct nf_conncount_tuple *conn;

	conn = container_of(h, struct nf_conncount_tuple, rcu_head);
	kmem_cache_free(conncount_conn_cachep, conn);
}

static bool conn_free(struct nf_conncount_list *list,
		      struct nf_conncount_tuple *conn)
{
	bool free_entry = false;

	spin_lock_bh(&list->list_lock);

	if (conn->dead) {
		spin_unlock_bh(&list->list_lock);
		return free_entry;
	}

	list->count--;
	conn->dead = true;
	list_del_rcu(&conn->node);
	if (list->count == 0) {
		list->dead = true;
		free_entry = true;
	}

	spin_unlock_bh(&list->list_lock);
	call_rcu(&conn->rcu_head, __conn_free);
	return free_entry;
}

static const struct nf_conntrack_tuple_hash *
find_or_evict(struct net *net, struct nf_conncount_list *list,
	      struct nf_conncount_tuple *conn, bool *free_entry)
{
	const struct nf_conntrack_tuple_hash *found;
	unsigned long a, b;
	int cpu = raw_smp_processor_id();
	__s32 age;

	found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple);
	if (found)
		return found;
	b = conn->jiffies32;
	a = (u32)jiffies;

	/* conn might have been added just before by another cpu and
	 * might still be unconfirmed.  In this case, nf_conntrack_find()
	 * returns no result.  Thus only evict if this cpu added the
	 * stale entry or if the entry is older than two jiffies.
	 */
	age = a - b;
	if (conn->cpu == cpu || age >= 2) {
		*free_entry = conn_free(list, conn);
		return ERR_PTR(-ENOENT);
	}

	return ERR_PTR(-EAGAIN);
}

void nf_conncount_lookup(struct net *net,
			 struct nf_conncount_list *list,
			 const struct nf_conntrack_tuple *tuple,
			 const struct nf_conntrack_zone *zone,
			 bool *addit)
{
	const struct nf_conntrack_tuple_hash *found;
	struct nf_conncount_tuple *conn, *conn_n;
	struct nf_conn *found_ct;
	unsigned int collect = 0;
	bool free_entry = false;

	/* best effort only */
	*addit = tuple ? true : false;

	/* check the saved connections */
	list_for_each_entry_safe(conn, conn_n, &list->head, node) {
		if (collect > CONNCOUNT_GC_MAX_NODES)
			break;

		found = find_or_evict(net, list, conn, &free_entry);
		if (IS_ERR(found)) {
			/* Not found, but might be about to be confirmed */
			if (PTR_ERR(found) == -EAGAIN) {
				if (!tuple)
					continue;

				if (nf_ct_tuple_equal(&conn->tuple, tuple) &&
				    nf_ct_zone_id(&conn->zone, conn->zone.dir) ==
				    nf_ct_zone_id(zone, zone->dir))
					*addit = false;
			} else if (PTR_ERR(found) == -ENOENT)
				collect++;
			continue;
		}

		found_ct = nf_ct_tuplehash_to_ctrack(found);

		if (tuple && nf_ct_tuple_equal(&conn->tuple, tuple) &&
		    nf_ct_zone_equal(found_ct, zone, zone->dir)) {
			/*
			 * We should not see tuples twice unless someone hooks
			 * this into a table without "-p tcp --syn".
			 *
			 * Attempt to avoid a re-add in this case.
			 */
			*addit = false;
		} else if (already_closed(found_ct)) {
			/*
			 * we do not care about connections which are
			 * closed already -> ditch it
			 */
			nf_ct_put(found_ct);
			conn_free(list, conn);
			collect++;
			continue;
		}

		nf_ct_put(found_ct);
	}
}
EXPORT_SYMBOL_GPL(nf_conncount_lookup);

void nf_conncount_list_init(struct nf_conncount_list *list)
{
	spin_lock_init(&list->list_lock);
	INIT_LIST_HEAD(&list->head);
	list->count = 0;
	list->dead = false;
}
EXPORT_SYMBOL_GPL(nf_conncount_list_init);

/* Return true if the list is empty */
bool nf_conncount_gc_list(struct net *net,
			  struct nf_conncount_list *list)
{
	const struct nf_conntrack_tuple_hash *found;
	struct nf_conncount_tuple *conn, *conn_n;
	struct nf_conn *found_ct;
	unsigned int collected = 0;
	bool free_entry = false;
	bool ret = false;

	list_for_each_entry_safe(conn, conn_n, &list->head, node) {
		found = find_or_evict(net, list, conn, &free_entry);
		if (IS_ERR(found)) {
			if (PTR_ERR(found) == -ENOENT)  {
				if (free_entry)
					return true;
				collected++;
			}
			continue;
		}

		found_ct = nf_ct_tuplehash_to_ctrack(found);
		if (already_closed(found_ct)) {
			/*
			 * we do not care about connections which are
			 * closed already -> ditch it
			 */
			nf_ct_put(found_ct);
			if (conn_free(list, conn))
				return true;
			collected++;
			continue;
		}

		nf_ct_put(found_ct);
		if (collected > CONNCOUNT_GC_MAX_NODES)
			return false;
	}

	spin_lock_bh(&list->list_lock);
	if (!list->count) {
		list->dead = true;
		ret = true;
	}
	spin_unlock_bh(&list->list_lock);

	return ret;
}
EXPORT_SYMBOL_GPL(nf_conncount_gc_list);

static void __tree_nodes_free(struct rcu_head *h)
{
	struct nf_conncount_rb *rbconn;

	rbconn = container_of(h, struct nf_conncount_rb, rcu_head);
	kmem_cache_free(conncount_rb_cachep, rbconn);
}

static void tree_nodes_free(struct rb_root *root,
			    struct nf_conncount_rb *gc_nodes[],
			    unsigned int gc_count)
{
	struct nf_conncount_rb *rbconn;

	while (gc_count) {
		rbconn = gc_nodes[--gc_count];
		spin_lock(&rbconn->list.list_lock);
		rb_erase(&rbconn->node, root);
		call_rcu(&rbconn->rcu_head, __tree_nodes_free);
		spin_unlock(&rbconn->list.list_lock);
	}
}

static void schedule_gc_worker(struct nf_conncount_data *data, int tree)
{
	set_bit(tree, data->pending_trees);
	schedule_work(&data->gc_work);
}

static unsigned int
insert_tree(struct net *net,
	    struct nf_conncount_data *data,
	    struct rb_root *root,
	    unsigned int hash,
	    const u32 *key,
	    u8 keylen,
	    const struct nf_conntrack_tuple *tuple,
	    const struct nf_conntrack_zone *zone)
{
	enum nf_conncount_list_add ret;
	struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES];
	struct rb_node **rbnode, *parent;
	struct nf_conncount_rb *rbconn;
	struct nf_conncount_tuple *conn;
	unsigned int count = 0, gc_count = 0;
	bool node_found = false;

	spin_lock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);

	parent = NULL;
	rbnode = &(root->rb_node);
	while (*rbnode) {
		int diff;
		rbconn = rb_entry(*rbnode, struct nf_conncount_rb, node);

		parent = *rbnode;
		diff = key_diff(key, rbconn->key, keylen);
		if (diff < 0) {
			rbnode = &((*rbnode)->rb_left);
		} else if (diff > 0) {
			rbnode = &((*rbnode)->rb_right);
		} else {
			/* unlikely: other cpu added node already */
			node_found = true;
			ret = nf_conncount_add(&rbconn->list, tuple, zone);
			if (ret == NF_CONNCOUNT_ERR) {
				count = 0; /* hotdrop */
			} else if (ret == NF_CONNCOUNT_ADDED) {
				count = rbconn->list.count;
			} else {
				/* NF_CONNCOUNT_SKIP, rbconn is already
				 * reclaimed by gc, insert a new tree node
				 */
				node_found = false;
			}
			break;
		}

		if (gc_count >= ARRAY_SIZE(gc_nodes))
			continue;

		if (nf_conncount_gc_list(net, &rbconn->list))
			gc_nodes[gc_count++] = rbconn;
	}

	if (gc_count) {
		tree_nodes_free(root, gc_nodes, gc_count);
		/* tree_node_free before new allocation permits
		 * allocator to re-use newly free'd object.
		 *
		 * This is a rare event; in most cases we will find
		 * existing node to re-use. (or gc_count is 0).
		 */

		if (gc_count >= ARRAY_SIZE(gc_nodes))
			schedule_gc_worker(data, hash);
	}

	if (node_found)
		goto out_unlock;

	/* expected case: match, insert new node */
	rbconn = kmem_cache_alloc(conncount_rb_cachep, GFP_ATOMIC);
	if (rbconn == NULL)
		goto out_unlock;

	conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
	if (conn == NULL) {
		kmem_cache_free(conncount_rb_cachep, rbconn);
		goto out_unlock;
	}

	conn->tuple = *tuple;
	conn->zone = *zone;
	memcpy(rbconn->key, key, sizeof(u32) * keylen);

	nf_conncount_list_init(&rbconn->list);
	list_add(&conn->node, &rbconn->list.head);
	count = 1;
	rbconn->list.count = count;

	rb_link_node(&rbconn->node, parent, rbnode);
	rb_insert_color(&rbconn->node, root);
out_unlock:
	spin_unlock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
	return count;
}

static unsigned int
count_tree(struct net *net,
	   struct nf_conncount_data *data,
	   const u32 *key,
	   const struct nf_conntrack_tuple *tuple,
	   const struct nf_conntrack_zone *zone)
{
	enum nf_conncount_list_add ret;
	struct rb_root *root;
	struct rb_node *parent;
	struct nf_conncount_rb *rbconn;
	unsigned int hash;
	u8 keylen = data->keylen;

	hash = jhash2(key, data->keylen, conncount_rnd) % CONNCOUNT_SLOTS;
	root = &data->root[hash];

	parent = rcu_dereference_raw(root->rb_node);
	while (parent) {
		int diff;
		bool addit;

		rbconn = rb_entry(parent, struct nf_conncount_rb, node);

		diff = key_diff(key, rbconn->key, keylen);
		if (diff < 0) {
			parent = rcu_dereference_raw(parent->rb_left);
		} else if (diff > 0) {
			parent = rcu_dereference_raw(parent->rb_right);
		} else {
			/* same source network -> be counted! */
			nf_conncount_lookup(net, &rbconn->list, tuple, zone,
					    &addit);

			if (!addit)
				return rbconn->list.count;

			ret = nf_conncount_add(&rbconn->list, tuple, zone);
			if (ret == NF_CONNCOUNT_ERR) {
				return 0; /* hotdrop */
			} else if (ret == NF_CONNCOUNT_ADDED) {
				return rbconn->list.count;
			} else {
				/* NF_CONNCOUNT_SKIP, rbconn is already
				 * reclaimed by gc, insert a new tree node
				 */
				break;
			}
		}
	}

	if (!tuple)
		return 0;

	return insert_tree(net, data, root, hash, key, keylen, tuple, zone);
}

static void tree_gc_worker(struct work_struct *work)
{
	struct nf_conncount_data *data = container_of(work, struct nf_conncount_data, gc_work);
	struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES], *rbconn;
	struct rb_root *root;
	struct rb_node *node;
	unsigned int tree, next_tree, gc_count = 0;

	tree = data->gc_tree % CONNCOUNT_LOCK_SLOTS;
	root = &data->root[tree];

	rcu_read_lock();
	for (node = rb_first(root); node != NULL; node = rb_next(node)) {
		rbconn = rb_entry(node, struct nf_conncount_rb, node);
		if (nf_conncount_gc_list(data->net, &rbconn->list))
			gc_nodes[gc_count++] = rbconn;
	}
	rcu_read_unlock();

	spin_lock_bh(&nf_conncount_locks[tree]);

	if (gc_count) {
		tree_nodes_free(root, gc_nodes, gc_count);
	}

	clear_bit(tree, data->pending_trees);

	next_tree = (tree + 1) % CONNCOUNT_SLOTS;
	next_tree = find_next_bit(data->pending_trees, next_tree, CONNCOUNT_SLOTS);

	if (next_tree < CONNCOUNT_SLOTS) {
		data->gc_tree = next_tree;
		schedule_work(work);
	}

	spin_unlock_bh(&nf_conncount_locks[tree]);
}

/* Count and return number of conntrack entries in 'net' with particular 'key'.
 * If 'tuple' is not null, insert it into the accounting data structure.
 * Call with RCU read lock.
 */
unsigned int nf_conncount_count(struct net *net,
				struct nf_conncount_data *data,
				const u32 *key,
				const struct nf_conntrack_tuple *tuple,
				const struct nf_conntrack_zone *zone)
{
	return count_tree(net, data, key, tuple, zone);
}
EXPORT_SYMBOL_GPL(nf_conncount_count);

struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family,
					    unsigned int keylen)
{
	struct nf_conncount_data *data;
	int ret, i;

	if (keylen % sizeof(u32) ||
	    keylen / sizeof(u32) > MAX_KEYLEN ||
	    keylen == 0)
		return ERR_PTR(-EINVAL);

	net_get_random_once(&conncount_rnd, sizeof(conncount_rnd));

	data = kmalloc(sizeof(*data), GFP_KERNEL);
	if (!data)
		return ERR_PTR(-ENOMEM);

	ret = nf_ct_netns_get(net, family);
	if (ret < 0) {
		kfree(data);
		return ERR_PTR(ret);
	}

	for (i = 0; i < ARRAY_SIZE(data->root); ++i)
		data->root[i] = RB_ROOT;

	data->keylen = keylen / sizeof(u32);
	data->net = net;
	INIT_WORK(&data->gc_work, tree_gc_worker);

	return data;
}
EXPORT_SYMBOL_GPL(nf_conncount_init);

void nf_conncount_cache_free(struct nf_conncount_list *list)
{
	struct nf_conncount_tuple *conn, *conn_n;

	list_for_each_entry_safe(conn, conn_n, &list->head, node)
		kmem_cache_free(conncount_conn_cachep, conn);
}
EXPORT_SYMBOL_GPL(nf_conncount_cache_free);

static void destroy_tree(struct rb_root *r)
{
	struct nf_conncount_rb *rbconn;
	struct rb_node *node;

	while ((node = rb_first(r)) != NULL) {
		rbconn = rb_entry(node, struct nf_conncount_rb, node);

		rb_erase(node, r);

		nf_conncount_cache_free(&rbconn->list);

		kmem_cache_free(conncount_rb_cachep, rbconn);
	}
}

void nf_conncount_destroy(struct net *net, unsigned int family,
			  struct nf_conncount_data *data)
{
	unsigned int i;

	cancel_work_sync(&data->gc_work);
	nf_ct_netns_put(net, family);

	for (i = 0; i < ARRAY_SIZE(data->root); ++i)
		destroy_tree(&data->root[i]);

	kfree(data);
}
EXPORT_SYMBOL_GPL(nf_conncount_destroy);

static int __init nf_conncount_modinit(void)
{
	int i;

	BUILD_BUG_ON(CONNCOUNT_LOCK_SLOTS > CONNCOUNT_SLOTS);
	BUILD_BUG_ON((CONNCOUNT_SLOTS % CONNCOUNT_LOCK_SLOTS) != 0);

	for (i = 0; i < CONNCOUNT_LOCK_SLOTS; ++i)
		spin_lock_init(&nf_conncount_locks[i]);

	conncount_conn_cachep = kmem_cache_create("nf_conncount_tuple",
					   sizeof(struct nf_conncount_tuple),
					   0, 0, NULL);
	if (!conncount_conn_cachep)
		return -ENOMEM;

	conncount_rb_cachep = kmem_cache_create("nf_conncount_rb",
					   sizeof(struct nf_conncount_rb),
					   0, 0, NULL);
	if (!conncount_rb_cachep) {
		kmem_cache_destroy(conncount_conn_cachep);
		return -ENOMEM;
	}

	return 0;
}

static void __exit nf_conncount_modexit(void)
{
	kmem_cache_destroy(conncount_conn_cachep);
	kmem_cache_destroy(conncount_rb_cachep);
}

module_init(nf_conncount_modinit);
module_exit(nf_conncount_modexit);
MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
MODULE_DESCRIPTION("netfilter: count number of connections matching a key");
MODULE_LICENSE("GPL");
back to top