2 * Berkeley Packet Filter based traffic classifier
4 * Might be used to classify traffic through flexible, user-defined and
5 * possibly JIT-ed BPF filters for traffic control as an alternative to
8 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/skbuff.h>
18 #include <linux/filter.h>
19 #include <linux/bpf.h>
21 #include <net/rtnetlink.h>
22 #include <net/pkt_cls.h>
25 MODULE_LICENSE("GPL");
26 MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
27 MODULE_DESCRIPTION("TC BPF based classifier");
29 #define CLS_BPF_NAME_LEN 256
32 struct list_head plist;
38 struct bpf_prog *filter;
39 struct list_head link;
40 struct tcf_result res;
49 struct sock_filter *bpf_ops;
55 static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
56 [TCA_BPF_CLASSID] = { .type = NLA_U32 },
57 [TCA_BPF_FLAGS] = { .type = NLA_U32 },
58 [TCA_BPF_FD] = { .type = NLA_U32 },
59 [TCA_BPF_NAME] = { .type = NLA_NUL_STRING,
60 .len = CLS_BPF_NAME_LEN },
61 [TCA_BPF_OPS_LEN] = { .type = NLA_U16 },
62 [TCA_BPF_OPS] = { .type = NLA_BINARY,
63 .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
66 static int cls_bpf_exec_opcode(int code)
80 static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
81 struct tcf_result *res)
83 struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
84 bool at_ingress = skb_at_tc_ingress(skb);
85 struct cls_bpf_prog *prog;
88 /* Needed here for accessing maps. */
90 list_for_each_entry_rcu(prog, &head->plist, link) {
93 qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
96 /* It is safe to push/pull even if skb_shared() */
97 __skb_push(skb, skb->mac_len);
98 bpf_compute_data_end(skb);
99 filter_res = BPF_PROG_RUN(prog->filter, skb);
100 __skb_pull(skb, skb->mac_len);
102 bpf_compute_data_end(skb);
103 filter_res = BPF_PROG_RUN(prog->filter, skb);
106 if (prog->exts_integrated) {
108 res->classid = TC_H_MAJ(prog->res.classid) |
109 qdisc_skb_cb(skb)->tc_classid;
111 ret = cls_bpf_exec_opcode(filter_res);
112 if (ret == TC_ACT_UNSPEC)
119 if (filter_res != -1) {
121 res->classid = filter_res;
126 ret = tcf_exts_exec(skb, &prog->exts, res);
137 static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
139 return !prog->bpf_ops;
142 static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
143 enum tc_clsbpf_command cmd)
145 struct net_device *dev = tp->q->dev_queue->dev;
146 struct tc_cls_bpf_offload bpf_offload = {};
147 struct tc_to_netdev offload;
149 offload.type = TC_SETUP_CLSBPF;
150 offload.cls_bpf = &bpf_offload;
152 bpf_offload.command = cmd;
153 bpf_offload.exts = &prog->exts;
154 bpf_offload.prog = prog->filter;
155 bpf_offload.name = prog->bpf_name;
156 bpf_offload.exts_integrated = prog->exts_integrated;
158 return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
159 tp->protocol, &offload);
162 static void cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
163 struct cls_bpf_prog *oldprog)
165 struct net_device *dev = tp->q->dev_queue->dev;
166 struct cls_bpf_prog *obj = prog;
167 enum tc_clsbpf_command cmd;
169 if (oldprog && oldprog->offloaded) {
170 if (tc_should_offload(dev, tp, 0)) {
171 cmd = TC_CLSBPF_REPLACE;
174 cmd = TC_CLSBPF_DESTROY;
177 if (!tc_should_offload(dev, tp, 0))
182 if (cls_bpf_offload_cmd(tp, obj, cmd))
185 obj->offloaded = true;
187 oldprog->offloaded = false;
190 static void cls_bpf_stop_offload(struct tcf_proto *tp,
191 struct cls_bpf_prog *prog)
195 if (!prog->offloaded)
198 err = cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY);
200 pr_err("Stopping hardware offload failed: %d\n", err);
204 prog->offloaded = false;
207 static int cls_bpf_init(struct tcf_proto *tp)
209 struct cls_bpf_head *head;
211 head = kzalloc(sizeof(*head), GFP_KERNEL);
215 INIT_LIST_HEAD_RCU(&head->plist);
216 rcu_assign_pointer(tp->root, head);
221 static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
223 tcf_exts_destroy(&prog->exts);
225 if (cls_bpf_is_ebpf(prog))
226 bpf_prog_put(prog->filter);
228 bpf_prog_destroy(prog->filter);
230 kfree(prog->bpf_name);
231 kfree(prog->bpf_ops);
235 static void __cls_bpf_delete_prog(struct rcu_head *rcu)
237 struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu);
239 cls_bpf_delete_prog(prog->tp, prog);
242 static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
244 struct cls_bpf_prog *prog = (struct cls_bpf_prog *) arg;
246 cls_bpf_stop_offload(tp, prog);
247 list_del_rcu(&prog->link);
248 tcf_unbind_filter(tp, &prog->res);
249 call_rcu(&prog->rcu, __cls_bpf_delete_prog);
254 static bool cls_bpf_destroy(struct tcf_proto *tp, bool force)
256 struct cls_bpf_head *head = rtnl_dereference(tp->root);
257 struct cls_bpf_prog *prog, *tmp;
259 if (!force && !list_empty(&head->plist))
262 list_for_each_entry_safe(prog, tmp, &head->plist, link) {
263 cls_bpf_stop_offload(tp, prog);
264 list_del_rcu(&prog->link);
265 tcf_unbind_filter(tp, &prog->res);
266 call_rcu(&prog->rcu, __cls_bpf_delete_prog);
269 RCU_INIT_POINTER(tp->root, NULL);
270 kfree_rcu(head, rcu);
274 static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
276 struct cls_bpf_head *head = rtnl_dereference(tp->root);
277 struct cls_bpf_prog *prog;
278 unsigned long ret = 0UL;
283 list_for_each_entry(prog, &head->plist, link) {
284 if (prog->handle == handle) {
285 ret = (unsigned long) prog;
293 static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
295 struct sock_filter *bpf_ops;
296 struct sock_fprog_kern fprog_tmp;
298 u16 bpf_size, bpf_num_ops;
301 bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
302 if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
305 bpf_size = bpf_num_ops * sizeof(*bpf_ops);
306 if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
309 bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
313 memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
315 fprog_tmp.len = bpf_num_ops;
316 fprog_tmp.filter = bpf_ops;
318 ret = bpf_prog_create(&fp, &fprog_tmp);
324 prog->bpf_ops = bpf_ops;
325 prog->bpf_num_ops = bpf_num_ops;
326 prog->bpf_name = NULL;
332 static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
333 const struct tcf_proto *tp)
339 bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
341 fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_CLS);
345 if (tb[TCA_BPF_NAME]) {
346 name = kmemdup(nla_data(tb[TCA_BPF_NAME]),
347 nla_len(tb[TCA_BPF_NAME]),
355 prog->bpf_ops = NULL;
356 prog->bpf_fd = bpf_fd;
357 prog->bpf_name = name;
360 if (fp->dst_needed && !(tp->q->flags & TCQ_F_INGRESS))
361 netif_keep_dst(qdisc_dev(tp->q));
366 static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
367 struct cls_bpf_prog *prog,
368 unsigned long base, struct nlattr **tb,
369 struct nlattr *est, bool ovr)
371 bool is_bpf, is_ebpf, have_exts = false;
372 struct tcf_exts exts;
375 is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
376 is_ebpf = tb[TCA_BPF_FD];
377 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
380 ret = tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
383 ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
387 if (tb[TCA_BPF_FLAGS]) {
388 u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
390 if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT) {
395 have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
398 prog->exts_integrated = have_exts;
400 ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
401 cls_bpf_prog_from_efd(tb, prog, tp);
405 if (tb[TCA_BPF_CLASSID]) {
406 prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
407 tcf_bind_filter(tp, &prog->res, base);
410 tcf_exts_change(tp, &prog->exts, &exts);
414 tcf_exts_destroy(&exts);
418 static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
419 struct cls_bpf_head *head)
421 unsigned int i = 0x80000000;
425 if (++head->hgen == 0x7FFFFFFF)
427 } while (--i > 0 && cls_bpf_get(tp, head->hgen));
429 if (unlikely(i == 0)) {
430 pr_err("Insufficient number of handles\n");
439 static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
440 struct tcf_proto *tp, unsigned long base,
441 u32 handle, struct nlattr **tca,
442 unsigned long *arg, bool ovr)
444 struct cls_bpf_head *head = rtnl_dereference(tp->root);
445 struct cls_bpf_prog *oldprog = (struct cls_bpf_prog *) *arg;
446 struct nlattr *tb[TCA_BPF_MAX + 1];
447 struct cls_bpf_prog *prog;
450 if (tca[TCA_OPTIONS] == NULL)
453 ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy);
457 prog = kzalloc(sizeof(*prog), GFP_KERNEL);
461 ret = tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
466 if (handle && oldprog->handle != handle) {
473 prog->handle = cls_bpf_grab_new_handle(tp, head);
475 prog->handle = handle;
476 if (prog->handle == 0) {
481 ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE],
486 cls_bpf_offload(tp, prog, oldprog);
489 list_replace_rcu(&oldprog->link, &prog->link);
490 tcf_unbind_filter(tp, &oldprog->res);
491 call_rcu(&oldprog->rcu, __cls_bpf_delete_prog);
493 list_add_rcu(&prog->link, &head->plist);
496 *arg = (unsigned long) prog;
500 tcf_exts_destroy(&prog->exts);
505 static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
510 if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
513 nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
514 sizeof(struct sock_filter));
518 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
523 static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
526 if (nla_put_u32(skb, TCA_BPF_FD, prog->bpf_fd))
529 if (prog->bpf_name &&
530 nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
536 static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
537 struct sk_buff *skb, struct tcmsg *tm)
539 struct cls_bpf_prog *prog = (struct cls_bpf_prog *) fh;
547 tm->tcm_handle = prog->handle;
549 nest = nla_nest_start(skb, TCA_OPTIONS);
551 goto nla_put_failure;
553 if (prog->res.classid &&
554 nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
555 goto nla_put_failure;
557 if (cls_bpf_is_ebpf(prog))
558 ret = cls_bpf_dump_ebpf_info(prog, skb);
560 ret = cls_bpf_dump_bpf_info(prog, skb);
562 goto nla_put_failure;
564 if (tcf_exts_dump(skb, &prog->exts) < 0)
565 goto nla_put_failure;
567 if (prog->exts_integrated)
568 bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
569 if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
570 goto nla_put_failure;
572 nla_nest_end(skb, nest);
574 if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
575 goto nla_put_failure;
580 nla_nest_cancel(skb, nest);
584 static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
586 struct cls_bpf_head *head = rtnl_dereference(tp->root);
587 struct cls_bpf_prog *prog;
589 list_for_each_entry(prog, &head->plist, link) {
590 if (arg->count < arg->skip)
592 if (arg->fn(tp, (unsigned long) prog, arg) < 0) {
601 static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
603 .owner = THIS_MODULE,
604 .classify = cls_bpf_classify,
605 .init = cls_bpf_init,
606 .destroy = cls_bpf_destroy,
608 .change = cls_bpf_change,
609 .delete = cls_bpf_delete,
610 .walk = cls_bpf_walk,
611 .dump = cls_bpf_dump,
614 static int __init cls_bpf_init_mod(void)
616 return register_tcf_proto_ops(&cls_bpf_ops);
619 static void __exit cls_bpf_exit_mod(void)
621 unregister_tcf_proto_ops(&cls_bpf_ops);
624 module_init(cls_bpf_init_mod);
625 module_exit(cls_bpf_exit_mod);