blob: 70eb2b4984ddb277e052458f325599910e57d875 [file] [log] [blame]
/*
* netfilter module to enforce network quotas
*
* Sam Johnston <samj@samj.net>
*/
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_quota.h>
struct xt_quota_priv {
spinlock_t lock;
uint64_t quota;
};
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sam Johnston <samj@samj.net>");
MODULE_DESCRIPTION("Xtables: countdown quota match");
MODULE_ALIAS("ipt_quota");
MODULE_ALIAS("ip6t_quota");
static bool
quota_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
struct xt_quota_info *q = (void *)par->matchinfo;
struct xt_quota_priv *priv = q->master;
bool ret = q->flags & XT_QUOTA_INVERT;
spin_lock_bh(&priv->lock);
if (priv->quota >= skb->len) {
priv->quota -= skb->len;
ret = !ret;
} else {
/* we do not allow even small packets from now on */
priv->quota = 0;
}
spin_unlock_bh(&priv->lock);
return ret;
}
static int quota_mt_check(const struct xt_mtchk_param *par)
{
struct xt_quota_info *q = par->matchinfo;
if (q->flags & ~XT_QUOTA_MASK)
return -EINVAL;
q->master = kmalloc(sizeof(*q->master), GFP_KERNEL);
if (q->master == NULL)
return -ENOMEM;
spin_lock_init(&q->master->lock);
q->master->quota = q->quota;
return 0;
}
static void quota_mt_destroy(const struct xt_mtdtor_param *par)
{
const struct xt_quota_info *q = par->matchinfo;
kfree(q->master);
}
static struct xt_match quota_mt_reg __read_mostly = {
.name = "quota",
.revision = 0,
.family = NFPROTO_UNSPEC,
.match = quota_mt,
.checkentry = quota_mt_check,
.destroy = quota_mt_destroy,
.matchsize = sizeof(struct xt_quota_info),
.me = THIS_MODULE,
};
static int __init quota_mt_init(void)
{
return xt_register_match(&quota_mt_reg);
}
static void __exit quota_mt_exit(void)
{
xt_unregister_match(&quota_mt_reg);
}
module_init(quota_mt_init);
module_exit(quota_mt_exit);