2 * net/sched/police.c Input police filter.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * J Hadi Salim (action changes)
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/skbuff.h>
19 #include <linux/rtnetlink.h>
20 #include <linux/init.h>
21 #include <linux/slab.h>
22 #include <net/act_api.h>
23 #include <net/netlink.h>
26 struct tcf_common common;
35 struct psched_ratecfg rate;
37 struct psched_ratecfg peak;
40 #define to_police(pc) \
41 container_of(pc, struct tcf_police, common)
43 #define POL_TAB_MASK 15
45 /* old policer structure from before tc actions */
46 struct tc_police_compat {
52 struct tc_ratespec rate;
53 struct tc_ratespec peakrate;
56 /* Each policer is serialized by its individual spinlock */
58 static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *cb,
59 int type, struct tc_action *a)
61 struct tcf_hashinfo *hinfo = a->ops->hinfo;
62 struct hlist_head *head;
64 int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
67 spin_lock_bh(&hinfo->lock);
71 for (i = 0; i < (POL_TAB_MASK + 1); i++) {
72 head = &hinfo->htab[tcf_hash(i, POL_TAB_MASK)];
74 hlist_for_each_entry_rcu(p, head, tcfc_head) {
80 nest = nla_nest_start(skb, a->order);
83 if (type == RTM_DELACTION)
84 err = tcf_action_dump_1(skb, a, 0, 1);
86 err = tcf_action_dump_1(skb, a, 0, 0);
89 nla_nest_cancel(skb, nest);
92 nla_nest_end(skb, nest);
97 spin_unlock_bh(&hinfo->lock);
103 nla_nest_cancel(skb, nest);
107 static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
108 [TCA_POLICE_RATE] = { .len = TC_RTAB_SIZE },
109 [TCA_POLICE_PEAKRATE] = { .len = TC_RTAB_SIZE },
110 [TCA_POLICE_AVRATE] = { .type = NLA_U32 },
111 [TCA_POLICE_RESULT] = { .type = NLA_U32 },
114 static int tcf_act_police_locate(struct net *net, struct nlattr *nla,
115 struct nlattr *est, struct tc_action *a,
120 struct nlattr *tb[TCA_POLICE_MAX + 1];
121 struct tc_police *parm;
122 struct tcf_police *police;
123 struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
124 struct tcf_hashinfo *hinfo = a->ops->hinfo;
130 err = nla_parse_nested(tb, TCA_POLICE_MAX, nla, police_policy);
134 if (tb[TCA_POLICE_TBF] == NULL)
136 size = nla_len(tb[TCA_POLICE_TBF]);
137 if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat))
139 parm = nla_data(tb[TCA_POLICE_TBF]);
142 if (tcf_hash_search(a, parm->index)) {
143 police = to_police(a->priv);
145 police->tcf_bindcnt += 1;
146 police->tcf_refcnt += 1;
156 police = kzalloc(sizeof(*police), GFP_KERNEL);
160 police->tcf_refcnt = 1;
161 spin_lock_init(&police->tcf_lock);
163 police->tcf_bindcnt = 1;
165 if (parm->rate.rate) {
167 R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE]);
171 if (parm->peakrate.rate) {
172 P_tab = qdisc_get_rtab(&parm->peakrate,
173 tb[TCA_POLICE_PEAKRATE]);
179 spin_lock_bh(&police->tcf_lock);
181 err = gen_replace_estimator(&police->tcf_bstats,
182 &police->tcf_rate_est,
183 &police->tcf_lock, est);
186 } else if (tb[TCA_POLICE_AVRATE] &&
187 (ret == ACT_P_CREATED ||
188 !gen_estimator_active(&police->tcf_bstats,
189 &police->tcf_rate_est))) {
194 /* No failure allowed after this point */
195 police->tcfp_mtu = parm->mtu;
196 if (police->tcfp_mtu == 0) {
197 police->tcfp_mtu = ~0;
199 police->tcfp_mtu = 255 << R_tab->rate.cell_log;
202 police->rate_present = true;
203 psched_ratecfg_precompute(&police->rate, &R_tab->rate, 0);
204 qdisc_put_rtab(R_tab);
206 police->rate_present = false;
209 police->peak_present = true;
210 psched_ratecfg_precompute(&police->peak, &P_tab->rate, 0);
211 qdisc_put_rtab(P_tab);
213 police->peak_present = false;
216 if (tb[TCA_POLICE_RESULT])
217 police->tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]);
218 police->tcfp_burst = PSCHED_TICKS2NS(parm->burst);
219 police->tcfp_toks = police->tcfp_burst;
220 if (police->peak_present) {
221 police->tcfp_mtu_ptoks = (s64) psched_l2t_ns(&police->peak,
223 police->tcfp_ptoks = police->tcfp_mtu_ptoks;
225 police->tcf_action = parm->action;
227 if (tb[TCA_POLICE_AVRATE])
228 police->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]);
230 spin_unlock_bh(&police->tcf_lock);
231 if (ret != ACT_P_CREATED)
234 police->tcfp_t_c = ktime_to_ns(ktime_get());
235 police->tcf_index = parm->index ? parm->index :
236 tcf_hash_new_index(hinfo);
237 h = tcf_hash(police->tcf_index, POL_TAB_MASK);
238 spin_lock_bh(&hinfo->lock);
239 hlist_add_head(&police->tcf_head, &hinfo->htab[h]);
240 spin_unlock_bh(&hinfo->lock);
246 spin_unlock_bh(&police->tcf_lock);
248 qdisc_put_rtab(P_tab);
249 qdisc_put_rtab(R_tab);
250 if (ret == ACT_P_CREATED)
255 static int tcf_act_police(struct sk_buff *skb, const struct tc_action *a,
256 struct tcf_result *res)
258 struct tcf_police *police = a->priv;
263 spin_lock(&police->tcf_lock);
265 bstats_update(&police->tcf_bstats, skb);
267 if (police->tcfp_ewma_rate &&
268 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
269 police->tcf_qstats.overlimits++;
270 if (police->tcf_action == TC_ACT_SHOT)
271 police->tcf_qstats.drops++;
272 spin_unlock(&police->tcf_lock);
273 return police->tcf_action;
276 if (qdisc_pkt_len(skb) <= police->tcfp_mtu) {
277 if (!police->rate_present) {
278 spin_unlock(&police->tcf_lock);
279 return police->tcfp_result;
282 now = ktime_to_ns(ktime_get());
283 toks = min_t(s64, now - police->tcfp_t_c,
285 if (police->peak_present) {
286 ptoks = toks + police->tcfp_ptoks;
287 if (ptoks > police->tcfp_mtu_ptoks)
288 ptoks = police->tcfp_mtu_ptoks;
289 ptoks -= (s64) psched_l2t_ns(&police->peak,
292 toks += police->tcfp_toks;
293 if (toks > police->tcfp_burst)
294 toks = police->tcfp_burst;
295 toks -= (s64) psched_l2t_ns(&police->rate, qdisc_pkt_len(skb));
296 if ((toks|ptoks) >= 0) {
297 police->tcfp_t_c = now;
298 police->tcfp_toks = toks;
299 police->tcfp_ptoks = ptoks;
300 spin_unlock(&police->tcf_lock);
301 return police->tcfp_result;
305 police->tcf_qstats.overlimits++;
306 if (police->tcf_action == TC_ACT_SHOT)
307 police->tcf_qstats.drops++;
308 spin_unlock(&police->tcf_lock);
309 return police->tcf_action;
313 tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
315 unsigned char *b = skb_tail_pointer(skb);
316 struct tcf_police *police = a->priv;
317 struct tc_police opt = {
318 .index = police->tcf_index,
319 .action = police->tcf_action,
320 .mtu = police->tcfp_mtu,
321 .burst = PSCHED_NS2TICKS(police->tcfp_burst),
322 .refcnt = police->tcf_refcnt - ref,
323 .bindcnt = police->tcf_bindcnt - bind,
326 if (police->rate_present)
327 psched_ratecfg_getrate(&opt.rate, &police->rate);
328 if (police->peak_present)
329 psched_ratecfg_getrate(&opt.peakrate, &police->peak);
330 if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt))
331 goto nla_put_failure;
332 if (police->tcfp_result &&
333 nla_put_u32(skb, TCA_POLICE_RESULT, police->tcfp_result))
334 goto nla_put_failure;
335 if (police->tcfp_ewma_rate &&
336 nla_put_u32(skb, TCA_POLICE_AVRATE, police->tcfp_ewma_rate))
337 goto nla_put_failure;
345 MODULE_AUTHOR("Alexey Kuznetsov");
346 MODULE_DESCRIPTION("Policing actions");
347 MODULE_LICENSE("GPL");
349 static struct tc_action_ops act_police_ops = {
351 .type = TCA_ID_POLICE,
352 .owner = THIS_MODULE,
353 .act = tcf_act_police,
354 .dump = tcf_act_police_dump,
355 .init = tcf_act_police_locate,
356 .walk = tcf_act_police_walker
360 police_init_module(void)
362 return tcf_register_action(&act_police_ops, POL_TAB_MASK);
366 police_cleanup_module(void)
368 tcf_unregister_action(&act_police_ops);
371 module_init(police_init_module);
372 module_exit(police_cleanup_module);