2 * net/sched/police.c Input police filter.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * J Hadi Salim (action changes)
13 #include <asm/uaccess.h>
14 #include <asm/system.h>
15 #include <linux/bitops.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/string.h>
22 #include <linux/socket.h>
23 #include <linux/sockios.h>
25 #include <linux/errno.h>
26 #include <linux/interrupt.h>
27 #include <linux/netdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/module.h>
30 #include <linux/rtnetlink.h>
31 #include <linux/init.h>
33 #include <net/act_api.h>
35 #define L2T(p,L) ((p)->R_tab->data[(L)>>(p)->R_tab->rate.cell_log])
36 #define L2T_P(p,L) ((p)->P_tab->data[(L)>>(p)->P_tab->rate.cell_log])
37 #define PRIV(a) ((struct tcf_police *) (a)->priv)
39 /* use generic hash table */
40 #define MY_TAB_SIZE 16
41 #define MY_TAB_MASK 15
43 static struct tcf_police *tcf_police_ht[MY_TAB_SIZE];
44 /* Policer hash table lock */
45 static DEFINE_RWLOCK(police_lock);
47 /* old policer structure from before tc actions */
48 struct tc_police_compat
55 struct tc_ratespec rate;
56 struct tc_ratespec peakrate;
59 /* Each policer is serialized by its individual spinlock */
61 static __inline__ unsigned tcf_police_hash(u32 index)
66 static __inline__ struct tcf_police * tcf_police_lookup(u32 index)
70 read_lock(&police_lock);
71 for (p = tcf_police_ht[tcf_police_hash(index)]; p; p = p->next) {
72 if (p->index == index)
75 read_unlock(&police_lock);
79 #ifdef CONFIG_NET_CLS_ACT
80 static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *cb,
81 int type, struct tc_action *a)
84 int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
87 read_lock(&police_lock);
91 for (i = 0; i < MY_TAB_SIZE; i++) {
92 p = tcf_police_ht[tcf_police_hash(i)];
94 for (; p; p = p->next) {
100 r = (struct rtattr*) skb->tail;
101 RTA_PUT(skb, a->order, 0, NULL);
102 if (type == RTM_DELACTION)
103 err = tcf_action_dump_1(skb, a, 0, 1);
105 err = tcf_action_dump_1(skb, a, 0, 0);
108 skb_trim(skb, (u8*)r - skb->data);
111 r->rta_len = skb->tail - (u8*)r;
116 read_unlock(&police_lock);
122 skb_trim(skb, (u8*)r - skb->data);
127 tcf_act_police_hash_search(struct tc_action *a, u32 index)
129 struct tcf_police *p = tcf_police_lookup(index);
140 static inline u32 tcf_police_new_index(void)
145 } while (tcf_police_lookup(idx_gen));
150 void tcf_police_destroy(struct tcf_police *p)
152 unsigned h = tcf_police_hash(p->index);
153 struct tcf_police **p1p;
155 for (p1p = &tcf_police_ht[h]; *p1p; p1p = &(*p1p)->next) {
157 write_lock_bh(&police_lock);
159 write_unlock_bh(&police_lock);
160 #ifdef CONFIG_NET_ESTIMATOR
161 gen_kill_estimator(&p->bstats, &p->rate_est);
164 qdisc_put_rtab(p->R_tab);
166 qdisc_put_rtab(p->P_tab);
174 #ifdef CONFIG_NET_CLS_ACT
175 static int tcf_act_police_locate(struct rtattr *rta, struct rtattr *est,
176 struct tc_action *a, int ovr, int bind)
180 struct rtattr *tb[TCA_POLICE_MAX];
181 struct tc_police *parm;
182 struct tcf_police *p;
183 struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
186 if (rta == NULL || rtattr_parse_nested(tb, TCA_POLICE_MAX, rta) < 0)
189 if (tb[TCA_POLICE_TBF-1] == NULL)
191 size = RTA_PAYLOAD(tb[TCA_POLICE_TBF-1]);
192 if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat))
194 parm = RTA_DATA(tb[TCA_POLICE_TBF-1]);
196 if (tb[TCA_POLICE_RESULT-1] != NULL &&
197 RTA_PAYLOAD(tb[TCA_POLICE_RESULT-1]) != sizeof(u32))
199 if (tb[TCA_POLICE_RESULT-1] != NULL &&
200 RTA_PAYLOAD(tb[TCA_POLICE_RESULT-1]) != sizeof(u32))
203 if (parm->index && (p = tcf_police_lookup(parm->index)) != NULL) {
214 p = kzalloc(sizeof(*p), GFP_KERNEL);
220 spin_lock_init(&p->lock);
221 p->stats_lock = &p->lock;
225 if (parm->rate.rate) {
227 R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE-1]);
230 if (parm->peakrate.rate) {
231 P_tab = qdisc_get_rtab(&parm->peakrate,
232 tb[TCA_POLICE_PEAKRATE-1]);
233 if (p->P_tab == NULL) {
234 qdisc_put_rtab(R_tab);
239 /* No failure allowed after this point */
240 spin_lock_bh(&p->lock);
242 qdisc_put_rtab(p->R_tab);
246 qdisc_put_rtab(p->P_tab);
250 if (tb[TCA_POLICE_RESULT-1])
251 p->result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]);
252 p->toks = p->burst = parm->burst;
257 p->mtu = 255<<p->R_tab->rate.cell_log;
260 p->ptoks = L2T_P(p, p->mtu);
261 p->action = parm->action;
263 #ifdef CONFIG_NET_ESTIMATOR
264 if (tb[TCA_POLICE_AVRATE-1])
265 p->ewma_rate = *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
267 gen_replace_estimator(&p->bstats, &p->rate_est, p->stats_lock, est);
270 spin_unlock_bh(&p->lock);
271 if (ret != ACT_P_CREATED)
274 PSCHED_GET_TIME(p->t_c);
275 p->index = parm->index ? : tcf_police_new_index();
276 h = tcf_police_hash(p->index);
277 write_lock_bh(&police_lock);
278 p->next = tcf_police_ht[h];
279 tcf_police_ht[h] = p;
280 write_unlock_bh(&police_lock);
286 if (ret == ACT_P_CREATED)
291 static int tcf_act_police_cleanup(struct tc_action *a, int bind)
293 struct tcf_police *p = PRIV(a);
296 return tcf_police_release(p, bind);
300 static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
301 struct tcf_result *res)
304 struct tcf_police *p = PRIV(a);
310 p->bstats.bytes += skb->len;
313 #ifdef CONFIG_NET_ESTIMATOR
314 if (p->ewma_rate && p->rate_est.bps >= p->ewma_rate) {
315 p->qstats.overlimits++;
316 spin_unlock(&p->lock);
321 if (skb->len <= p->mtu) {
322 if (p->R_tab == NULL) {
323 spin_unlock(&p->lock);
327 PSCHED_GET_TIME(now);
329 toks = PSCHED_TDIFF_SAFE(now, p->t_c, p->burst);
332 ptoks = toks + p->ptoks;
333 if (ptoks > (long)L2T_P(p, p->mtu))
334 ptoks = (long)L2T_P(p, p->mtu);
335 ptoks -= L2T_P(p, skb->len);
338 if (toks > (long)p->burst)
340 toks -= L2T(p, skb->len);
342 if ((toks|ptoks) >= 0) {
346 spin_unlock(&p->lock);
351 p->qstats.overlimits++;
352 spin_unlock(&p->lock);
357 tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
359 unsigned char *b = skb->tail;
360 struct tc_police opt;
361 struct tcf_police *p = PRIV(a);
363 opt.index = p->index;
364 opt.action = p->action;
366 opt.burst = p->burst;
367 opt.refcnt = p->refcnt - ref;
368 opt.bindcnt = p->bindcnt - bind;
370 opt.rate = p->R_tab->rate;
372 memset(&opt.rate, 0, sizeof(opt.rate));
374 opt.peakrate = p->P_tab->rate;
376 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
377 RTA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt);
379 RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int), &p->result);
380 #ifdef CONFIG_NET_ESTIMATOR
382 RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &p->ewma_rate);
387 skb_trim(skb, b - skb->data);
391 MODULE_AUTHOR("Alexey Kuznetsov");
392 MODULE_DESCRIPTION("Policing actions");
393 MODULE_LICENSE("GPL");
395 static struct tc_action_ops act_police_ops = {
397 .type = TCA_ID_POLICE,
398 .capab = TCA_CAP_NONE,
399 .owner = THIS_MODULE,
400 .act = tcf_act_police,
401 .dump = tcf_act_police_dump,
402 .cleanup = tcf_act_police_cleanup,
403 .lookup = tcf_act_police_hash_search,
404 .init = tcf_act_police_locate,
405 .walk = tcf_act_police_walker
409 police_init_module(void)
411 return tcf_register_action(&act_police_ops);
415 police_cleanup_module(void)
417 tcf_unregister_action(&act_police_ops);
420 module_init(police_init_module);
421 module_exit(police_cleanup_module);
423 #else /* CONFIG_NET_CLS_ACT */
425 struct tcf_police * tcf_police_locate(struct rtattr *rta, struct rtattr *est)
428 struct tcf_police *p;
429 struct rtattr *tb[TCA_POLICE_MAX];
430 struct tc_police *parm;
433 if (rtattr_parse_nested(tb, TCA_POLICE_MAX, rta) < 0)
436 if (tb[TCA_POLICE_TBF-1] == NULL)
438 size = RTA_PAYLOAD(tb[TCA_POLICE_TBF-1]);
439 if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat))
442 parm = RTA_DATA(tb[TCA_POLICE_TBF-1]);
444 if (parm->index && (p = tcf_police_lookup(parm->index)) != NULL) {
449 p = kzalloc(sizeof(*p), GFP_KERNEL);
454 spin_lock_init(&p->lock);
455 p->stats_lock = &p->lock;
456 if (parm->rate.rate) {
457 p->R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE-1]);
458 if (p->R_tab == NULL)
460 if (parm->peakrate.rate) {
461 p->P_tab = qdisc_get_rtab(&parm->peakrate,
462 tb[TCA_POLICE_PEAKRATE-1]);
463 if (p->P_tab == NULL)
467 if (tb[TCA_POLICE_RESULT-1]) {
468 if (RTA_PAYLOAD(tb[TCA_POLICE_RESULT-1]) != sizeof(u32))
470 p->result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]);
472 #ifdef CONFIG_NET_ESTIMATOR
473 if (tb[TCA_POLICE_AVRATE-1]) {
474 if (RTA_PAYLOAD(tb[TCA_POLICE_AVRATE-1]) != sizeof(u32))
476 p->ewma_rate = *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
479 p->toks = p->burst = parm->burst;
484 p->mtu = 255<<p->R_tab->rate.cell_log;
487 p->ptoks = L2T_P(p, p->mtu);
488 PSCHED_GET_TIME(p->t_c);
489 p->index = parm->index ? : tcf_police_new_index();
490 p->action = parm->action;
491 #ifdef CONFIG_NET_ESTIMATOR
493 gen_new_estimator(&p->bstats, &p->rate_est, p->stats_lock, est);
495 h = tcf_police_hash(p->index);
496 write_lock_bh(&police_lock);
497 p->next = tcf_police_ht[h];
498 tcf_police_ht[h] = p;
499 write_unlock_bh(&police_lock);
504 qdisc_put_rtab(p->R_tab);
509 int tcf_police(struct sk_buff *skb, struct tcf_police *p)
517 p->bstats.bytes += skb->len;
520 #ifdef CONFIG_NET_ESTIMATOR
521 if (p->ewma_rate && p->rate_est.bps >= p->ewma_rate) {
522 p->qstats.overlimits++;
523 spin_unlock(&p->lock);
528 if (skb->len <= p->mtu) {
529 if (p->R_tab == NULL) {
530 spin_unlock(&p->lock);
534 PSCHED_GET_TIME(now);
536 toks = PSCHED_TDIFF_SAFE(now, p->t_c, p->burst);
539 ptoks = toks + p->ptoks;
540 if (ptoks > (long)L2T_P(p, p->mtu))
541 ptoks = (long)L2T_P(p, p->mtu);
542 ptoks -= L2T_P(p, skb->len);
545 if (toks > (long)p->burst)
547 toks -= L2T(p, skb->len);
549 if ((toks|ptoks) >= 0) {
553 spin_unlock(&p->lock);
558 p->qstats.overlimits++;
559 spin_unlock(&p->lock);
562 EXPORT_SYMBOL(tcf_police);
564 int tcf_police_dump(struct sk_buff *skb, struct tcf_police *p)
566 unsigned char *b = skb->tail;
567 struct tc_police opt;
569 opt.index = p->index;
570 opt.action = p->action;
572 opt.burst = p->burst;
574 opt.rate = p->R_tab->rate;
576 memset(&opt.rate, 0, sizeof(opt.rate));
578 opt.peakrate = p->P_tab->rate;
580 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
581 RTA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt);
583 RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int), &p->result);
584 #ifdef CONFIG_NET_ESTIMATOR
586 RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &p->ewma_rate);
591 skb_trim(skb, b - skb->data);
595 int tcf_police_dump_stats(struct sk_buff *skb, struct tcf_police *p)
599 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
600 TCA_XSTATS, p->stats_lock, &d) < 0)
603 if (gnet_stats_copy_basic(&d, &p->bstats) < 0 ||
604 #ifdef CONFIG_NET_ESTIMATOR
605 gnet_stats_copy_rate_est(&d, &p->rate_est) < 0 ||
607 gnet_stats_copy_queue(&d, &p->qstats) < 0)
610 if (gnet_stats_finish_copy(&d) < 0)
619 #endif /* CONFIG_NET_CLS_ACT */