]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - net/sched/cls_bpf.c
6523c5b4c0a5d504de3b21d48eb50969796baa90
[karo-tx-linux.git] / net / sched / cls_bpf.c
1 /*
2  * Berkeley Packet Filter based traffic classifier
3  *
4  * Might be used to classify traffic through flexible, user-defined and
5  * possibly JIT-ed BPF filters for traffic control as an alternative to
6  * ematches.
7  *
8  * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/skbuff.h>
18 #include <linux/filter.h>
19 #include <linux/bpf.h>
20
21 #include <net/rtnetlink.h>
22 #include <net/pkt_cls.h>
23 #include <net/sock.h>
24
25 MODULE_LICENSE("GPL");
26 MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
27 MODULE_DESCRIPTION("TC BPF based classifier");
28
29 #define CLS_BPF_NAME_LEN        256
30
31 struct cls_bpf_head {
32         struct list_head plist;
33         u32 hgen;
34         struct rcu_head rcu;
35 };
36
37 struct cls_bpf_prog {
38         struct bpf_prog *filter;
39         struct list_head link;
40         struct tcf_result res;
41         bool exts_integrated;
42         bool offloaded;
43         struct tcf_exts exts;
44         u32 handle;
45         union {
46                 u32 bpf_fd;
47                 u16 bpf_num_ops;
48         };
49         struct sock_filter *bpf_ops;
50         const char *bpf_name;
51         struct tcf_proto *tp;
52         struct rcu_head rcu;
53 };
54
55 static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
56         [TCA_BPF_CLASSID]       = { .type = NLA_U32 },
57         [TCA_BPF_FLAGS]         = { .type = NLA_U32 },
58         [TCA_BPF_FD]            = { .type = NLA_U32 },
59         [TCA_BPF_NAME]          = { .type = NLA_NUL_STRING,
60                                     .len = CLS_BPF_NAME_LEN },
61         [TCA_BPF_OPS_LEN]       = { .type = NLA_U16 },
62         [TCA_BPF_OPS]           = { .type = NLA_BINARY,
63                                     .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
64 };
65
66 static int cls_bpf_exec_opcode(int code)
67 {
68         switch (code) {
69         case TC_ACT_OK:
70         case TC_ACT_SHOT:
71         case TC_ACT_STOLEN:
72         case TC_ACT_REDIRECT:
73         case TC_ACT_UNSPEC:
74                 return code;
75         default:
76                 return TC_ACT_UNSPEC;
77         }
78 }
79
80 static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
81                             struct tcf_result *res)
82 {
83         struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
84         bool at_ingress = skb_at_tc_ingress(skb);
85         struct cls_bpf_prog *prog;
86         int ret = -1;
87
88         /* Needed here for accessing maps. */
89         rcu_read_lock();
90         list_for_each_entry_rcu(prog, &head->plist, link) {
91                 int filter_res;
92
93                 qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
94
95                 if (at_ingress) {
96                         /* It is safe to push/pull even if skb_shared() */
97                         __skb_push(skb, skb->mac_len);
98                         bpf_compute_data_end(skb);
99                         filter_res = BPF_PROG_RUN(prog->filter, skb);
100                         __skb_pull(skb, skb->mac_len);
101                 } else {
102                         bpf_compute_data_end(skb);
103                         filter_res = BPF_PROG_RUN(prog->filter, skb);
104                 }
105
106                 if (prog->exts_integrated) {
107                         res->class   = 0;
108                         res->classid = TC_H_MAJ(prog->res.classid) |
109                                        qdisc_skb_cb(skb)->tc_classid;
110
111                         ret = cls_bpf_exec_opcode(filter_res);
112                         if (ret == TC_ACT_UNSPEC)
113                                 continue;
114                         break;
115                 }
116
117                 if (filter_res == 0)
118                         continue;
119                 if (filter_res != -1) {
120                         res->class   = 0;
121                         res->classid = filter_res;
122                 } else {
123                         *res = prog->res;
124                 }
125
126                 ret = tcf_exts_exec(skb, &prog->exts, res);
127                 if (ret < 0)
128                         continue;
129
130                 break;
131         }
132         rcu_read_unlock();
133
134         return ret;
135 }
136
137 static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
138 {
139         return !prog->bpf_ops;
140 }
141
142 static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
143                                enum tc_clsbpf_command cmd)
144 {
145         struct net_device *dev = tp->q->dev_queue->dev;
146         struct tc_cls_bpf_offload bpf_offload = {};
147         struct tc_to_netdev offload;
148
149         offload.type = TC_SETUP_CLSBPF;
150         offload.cls_bpf = &bpf_offload;
151
152         bpf_offload.command = cmd;
153         bpf_offload.exts = &prog->exts;
154         bpf_offload.prog = prog->filter;
155         bpf_offload.name = prog->bpf_name;
156         bpf_offload.exts_integrated = prog->exts_integrated;
157
158         return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
159                                              tp->protocol, &offload);
160 }
161
162 static void cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
163                             struct cls_bpf_prog *oldprog)
164 {
165         struct net_device *dev = tp->q->dev_queue->dev;
166         struct cls_bpf_prog *obj = prog;
167         enum tc_clsbpf_command cmd;
168
169         if (oldprog && oldprog->offloaded) {
170                 if (tc_should_offload(dev, tp, 0)) {
171                         cmd = TC_CLSBPF_REPLACE;
172                 } else {
173                         obj = oldprog;
174                         cmd = TC_CLSBPF_DESTROY;
175                 }
176         } else {
177                 if (!tc_should_offload(dev, tp, 0))
178                         return;
179                 cmd = TC_CLSBPF_ADD;
180         }
181
182         if (cls_bpf_offload_cmd(tp, obj, cmd))
183                 return;
184
185         obj->offloaded = true;
186         if (oldprog)
187                 oldprog->offloaded = false;
188 }
189
190 static void cls_bpf_stop_offload(struct tcf_proto *tp,
191                                  struct cls_bpf_prog *prog)
192 {
193         int err;
194
195         if (!prog->offloaded)
196                 return;
197
198         err = cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY);
199         if (err) {
200                 pr_err("Stopping hardware offload failed: %d\n", err);
201                 return;
202         }
203
204         prog->offloaded = false;
205 }
206
207 static int cls_bpf_init(struct tcf_proto *tp)
208 {
209         struct cls_bpf_head *head;
210
211         head = kzalloc(sizeof(*head), GFP_KERNEL);
212         if (head == NULL)
213                 return -ENOBUFS;
214
215         INIT_LIST_HEAD_RCU(&head->plist);
216         rcu_assign_pointer(tp->root, head);
217
218         return 0;
219 }
220
221 static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
222 {
223         tcf_exts_destroy(&prog->exts);
224
225         if (cls_bpf_is_ebpf(prog))
226                 bpf_prog_put(prog->filter);
227         else
228                 bpf_prog_destroy(prog->filter);
229
230         kfree(prog->bpf_name);
231         kfree(prog->bpf_ops);
232         kfree(prog);
233 }
234
235 static void __cls_bpf_delete_prog(struct rcu_head *rcu)
236 {
237         struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu);
238
239         cls_bpf_delete_prog(prog->tp, prog);
240 }
241
242 static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
243 {
244         struct cls_bpf_prog *prog = (struct cls_bpf_prog *) arg;
245
246         cls_bpf_stop_offload(tp, prog);
247         list_del_rcu(&prog->link);
248         tcf_unbind_filter(tp, &prog->res);
249         call_rcu(&prog->rcu, __cls_bpf_delete_prog);
250
251         return 0;
252 }
253
254 static bool cls_bpf_destroy(struct tcf_proto *tp, bool force)
255 {
256         struct cls_bpf_head *head = rtnl_dereference(tp->root);
257         struct cls_bpf_prog *prog, *tmp;
258
259         if (!force && !list_empty(&head->plist))
260                 return false;
261
262         list_for_each_entry_safe(prog, tmp, &head->plist, link) {
263                 cls_bpf_stop_offload(tp, prog);
264                 list_del_rcu(&prog->link);
265                 tcf_unbind_filter(tp, &prog->res);
266                 call_rcu(&prog->rcu, __cls_bpf_delete_prog);
267         }
268
269         RCU_INIT_POINTER(tp->root, NULL);
270         kfree_rcu(head, rcu);
271         return true;
272 }
273
274 static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
275 {
276         struct cls_bpf_head *head = rtnl_dereference(tp->root);
277         struct cls_bpf_prog *prog;
278         unsigned long ret = 0UL;
279
280         if (head == NULL)
281                 return 0UL;
282
283         list_for_each_entry(prog, &head->plist, link) {
284                 if (prog->handle == handle) {
285                         ret = (unsigned long) prog;
286                         break;
287                 }
288         }
289
290         return ret;
291 }
292
293 static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
294 {
295         struct sock_filter *bpf_ops;
296         struct sock_fprog_kern fprog_tmp;
297         struct bpf_prog *fp;
298         u16 bpf_size, bpf_num_ops;
299         int ret;
300
301         bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
302         if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
303                 return -EINVAL;
304
305         bpf_size = bpf_num_ops * sizeof(*bpf_ops);
306         if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
307                 return -EINVAL;
308
309         bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
310         if (bpf_ops == NULL)
311                 return -ENOMEM;
312
313         memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
314
315         fprog_tmp.len = bpf_num_ops;
316         fprog_tmp.filter = bpf_ops;
317
318         ret = bpf_prog_create(&fp, &fprog_tmp);
319         if (ret < 0) {
320                 kfree(bpf_ops);
321                 return ret;
322         }
323
324         prog->bpf_ops = bpf_ops;
325         prog->bpf_num_ops = bpf_num_ops;
326         prog->bpf_name = NULL;
327         prog->filter = fp;
328
329         return 0;
330 }
331
332 static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
333                                  const struct tcf_proto *tp)
334 {
335         struct bpf_prog *fp;
336         char *name = NULL;
337         u32 bpf_fd;
338
339         bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
340
341         fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_CLS);
342         if (IS_ERR(fp))
343                 return PTR_ERR(fp);
344
345         if (tb[TCA_BPF_NAME]) {
346                 name = kmemdup(nla_data(tb[TCA_BPF_NAME]),
347                                nla_len(tb[TCA_BPF_NAME]),
348                                GFP_KERNEL);
349                 if (!name) {
350                         bpf_prog_put(fp);
351                         return -ENOMEM;
352                 }
353         }
354
355         prog->bpf_ops = NULL;
356         prog->bpf_fd = bpf_fd;
357         prog->bpf_name = name;
358         prog->filter = fp;
359
360         if (fp->dst_needed && !(tp->q->flags & TCQ_F_INGRESS))
361                 netif_keep_dst(qdisc_dev(tp->q));
362
363         return 0;
364 }
365
366 static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
367                                    struct cls_bpf_prog *prog,
368                                    unsigned long base, struct nlattr **tb,
369                                    struct nlattr *est, bool ovr)
370 {
371         bool is_bpf, is_ebpf, have_exts = false;
372         struct tcf_exts exts;
373         int ret;
374
375         is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
376         is_ebpf = tb[TCA_BPF_FD];
377         if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
378                 return -EINVAL;
379
380         ret = tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
381         if (ret < 0)
382                 return ret;
383         ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
384         if (ret < 0)
385                 goto errout;
386
387         if (tb[TCA_BPF_FLAGS]) {
388                 u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
389
390                 if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT) {
391                         ret = -EINVAL;
392                         goto errout;
393                 }
394
395                 have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
396         }
397
398         prog->exts_integrated = have_exts;
399
400         ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
401                        cls_bpf_prog_from_efd(tb, prog, tp);
402         if (ret < 0)
403                 goto errout;
404
405         if (tb[TCA_BPF_CLASSID]) {
406                 prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
407                 tcf_bind_filter(tp, &prog->res, base);
408         }
409
410         tcf_exts_change(tp, &prog->exts, &exts);
411         return 0;
412
413 errout:
414         tcf_exts_destroy(&exts);
415         return ret;
416 }
417
418 static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
419                                    struct cls_bpf_head *head)
420 {
421         unsigned int i = 0x80000000;
422         u32 handle;
423
424         do {
425                 if (++head->hgen == 0x7FFFFFFF)
426                         head->hgen = 1;
427         } while (--i > 0 && cls_bpf_get(tp, head->hgen));
428
429         if (unlikely(i == 0)) {
430                 pr_err("Insufficient number of handles\n");
431                 handle = 0;
432         } else {
433                 handle = head->hgen;
434         }
435
436         return handle;
437 }
438
439 static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
440                           struct tcf_proto *tp, unsigned long base,
441                           u32 handle, struct nlattr **tca,
442                           unsigned long *arg, bool ovr)
443 {
444         struct cls_bpf_head *head = rtnl_dereference(tp->root);
445         struct cls_bpf_prog *oldprog = (struct cls_bpf_prog *) *arg;
446         struct nlattr *tb[TCA_BPF_MAX + 1];
447         struct cls_bpf_prog *prog;
448         int ret;
449
450         if (tca[TCA_OPTIONS] == NULL)
451                 return -EINVAL;
452
453         ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy);
454         if (ret < 0)
455                 return ret;
456
457         prog = kzalloc(sizeof(*prog), GFP_KERNEL);
458         if (!prog)
459                 return -ENOBUFS;
460
461         ret = tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
462         if (ret < 0)
463                 goto errout;
464
465         if (oldprog) {
466                 if (handle && oldprog->handle != handle) {
467                         ret = -EINVAL;
468                         goto errout;
469                 }
470         }
471
472         if (handle == 0)
473                 prog->handle = cls_bpf_grab_new_handle(tp, head);
474         else
475                 prog->handle = handle;
476         if (prog->handle == 0) {
477                 ret = -EINVAL;
478                 goto errout;
479         }
480
481         ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE],
482                                       ovr);
483         if (ret < 0)
484                 goto errout;
485
486         cls_bpf_offload(tp, prog, oldprog);
487
488         if (oldprog) {
489                 list_replace_rcu(&oldprog->link, &prog->link);
490                 tcf_unbind_filter(tp, &oldprog->res);
491                 call_rcu(&oldprog->rcu, __cls_bpf_delete_prog);
492         } else {
493                 list_add_rcu(&prog->link, &head->plist);
494         }
495
496         *arg = (unsigned long) prog;
497         return 0;
498
499 errout:
500         tcf_exts_destroy(&prog->exts);
501         kfree(prog);
502         return ret;
503 }
504
505 static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
506                                  struct sk_buff *skb)
507 {
508         struct nlattr *nla;
509
510         if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
511                 return -EMSGSIZE;
512
513         nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
514                           sizeof(struct sock_filter));
515         if (nla == NULL)
516                 return -EMSGSIZE;
517
518         memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
519
520         return 0;
521 }
522
523 static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
524                                   struct sk_buff *skb)
525 {
526         if (nla_put_u32(skb, TCA_BPF_FD, prog->bpf_fd))
527                 return -EMSGSIZE;
528
529         if (prog->bpf_name &&
530             nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
531                 return -EMSGSIZE;
532
533         return 0;
534 }
535
536 static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
537                         struct sk_buff *skb, struct tcmsg *tm)
538 {
539         struct cls_bpf_prog *prog = (struct cls_bpf_prog *) fh;
540         struct nlattr *nest;
541         u32 bpf_flags = 0;
542         int ret;
543
544         if (prog == NULL)
545                 return skb->len;
546
547         tm->tcm_handle = prog->handle;
548
549         nest = nla_nest_start(skb, TCA_OPTIONS);
550         if (nest == NULL)
551                 goto nla_put_failure;
552
553         if (prog->res.classid &&
554             nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
555                 goto nla_put_failure;
556
557         if (cls_bpf_is_ebpf(prog))
558                 ret = cls_bpf_dump_ebpf_info(prog, skb);
559         else
560                 ret = cls_bpf_dump_bpf_info(prog, skb);
561         if (ret)
562                 goto nla_put_failure;
563
564         if (tcf_exts_dump(skb, &prog->exts) < 0)
565                 goto nla_put_failure;
566
567         if (prog->exts_integrated)
568                 bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
569         if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
570                 goto nla_put_failure;
571
572         nla_nest_end(skb, nest);
573
574         if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
575                 goto nla_put_failure;
576
577         return skb->len;
578
579 nla_put_failure:
580         nla_nest_cancel(skb, nest);
581         return -1;
582 }
583
584 static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
585 {
586         struct cls_bpf_head *head = rtnl_dereference(tp->root);
587         struct cls_bpf_prog *prog;
588
589         list_for_each_entry(prog, &head->plist, link) {
590                 if (arg->count < arg->skip)
591                         goto skip;
592                 if (arg->fn(tp, (unsigned long) prog, arg) < 0) {
593                         arg->stop = 1;
594                         break;
595                 }
596 skip:
597                 arg->count++;
598         }
599 }
600
601 static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
602         .kind           =       "bpf",
603         .owner          =       THIS_MODULE,
604         .classify       =       cls_bpf_classify,
605         .init           =       cls_bpf_init,
606         .destroy        =       cls_bpf_destroy,
607         .get            =       cls_bpf_get,
608         .change         =       cls_bpf_change,
609         .delete         =       cls_bpf_delete,
610         .walk           =       cls_bpf_walk,
611         .dump           =       cls_bpf_dump,
612 };
613
614 static int __init cls_bpf_init_mod(void)
615 {
616         return register_tcf_proto_ops(&cls_bpf_ops);
617 }
618
619 static void __exit cls_bpf_exit_mod(void)
620 {
621         unregister_tcf_proto_ops(&cls_bpf_ops);
622 }
623
624 module_init(cls_bpf_init_mod);
625 module_exit(cls_bpf_exit_mod);