]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - net/sched/cls_u32.c
Merge tag 'pinctrl-v3.16-3' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[karo-tx-linux.git] / net / sched / cls_u32.c
1 /*
2  * net/sched/cls_u32.c  Ugly (or Universal) 32bit key Packet Classifier.
3  *
4  *              This program is free software; you can redistribute it and/or
5  *              modify it under the terms of the GNU General Public License
6  *              as published by the Free Software Foundation; either version
7  *              2 of the License, or (at your option) any later version.
8  *
9  * Authors:     Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  *
11  *      The filters are packed to hash tables of key nodes
12  *      with a set of 32bit key/mask pairs at every node.
13  *      Nodes reference next level hash tables etc.
14  *
15  *      This scheme is the best universal classifier I managed to
16  *      invent; it is not super-fast, but it is not slow (provided you
17  *      program it correctly), and general enough.  And its relative
18  *      speed grows as the number of rules becomes larger.
19  *
20  *      It seems that it represents the best middle point between
21  *      speed and manageability both by human and by machine.
22  *
23  *      It is especially useful for link sharing combined with QoS;
24  *      pure RSVP doesn't need such a general approach and can use
25  *      much simpler (and faster) schemes, sort of cls_rsvp.c.
26  *
27  *      JHS: We should remove the CONFIG_NET_CLS_IND from here
28  *      eventually when the meta match extension is made available
29  *
30  *      nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
31  */
32
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/types.h>
36 #include <linux/kernel.h>
37 #include <linux/string.h>
38 #include <linux/errno.h>
39 #include <linux/rtnetlink.h>
40 #include <linux/skbuff.h>
41 #include <linux/bitmap.h>
42 #include <net/netlink.h>
43 #include <net/act_api.h>
44 #include <net/pkt_cls.h>
45
46 struct tc_u_knode {
47         struct tc_u_knode       *next;
48         u32                     handle;
49         struct tc_u_hnode       *ht_up;
50         struct tcf_exts         exts;
51 #ifdef CONFIG_NET_CLS_IND
52         int                     ifindex;
53 #endif
54         u8                      fshift;
55         struct tcf_result       res;
56         struct tc_u_hnode       *ht_down;
57 #ifdef CONFIG_CLS_U32_PERF
58         struct tc_u32_pcnt      *pf;
59 #endif
60 #ifdef CONFIG_CLS_U32_MARK
61         struct tc_u32_mark      mark;
62 #endif
63         struct tc_u32_sel       sel;
64 };
65
66 struct tc_u_hnode {
67         struct tc_u_hnode       *next;
68         u32                     handle;
69         u32                     prio;
70         struct tc_u_common      *tp_c;
71         int                     refcnt;
72         unsigned int            divisor;
73         struct tc_u_knode       *ht[1];
74 };
75
76 struct tc_u_common {
77         struct tc_u_hnode       *hlist;
78         struct Qdisc            *q;
79         int                     refcnt;
80         u32                     hgenerator;
81 };
82
83 static inline unsigned int u32_hash_fold(__be32 key,
84                                          const struct tc_u32_sel *sel,
85                                          u8 fshift)
86 {
87         unsigned int h = ntohl(key & sel->hmask) >> fshift;
88
89         return h;
90 }
91
92 static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res)
93 {
94         struct {
95                 struct tc_u_knode *knode;
96                 unsigned int      off;
97         } stack[TC_U32_MAXDEPTH];
98
99         struct tc_u_hnode *ht = tp->root;
100         unsigned int off = skb_network_offset(skb);
101         struct tc_u_knode *n;
102         int sdepth = 0;
103         int off2 = 0;
104         int sel = 0;
105 #ifdef CONFIG_CLS_U32_PERF
106         int j;
107 #endif
108         int i, r;
109
110 next_ht:
111         n = ht->ht[sel];
112
113 next_knode:
114         if (n) {
115                 struct tc_u32_key *key = n->sel.keys;
116
117 #ifdef CONFIG_CLS_U32_PERF
118                 n->pf->rcnt += 1;
119                 j = 0;
120 #endif
121
122 #ifdef CONFIG_CLS_U32_MARK
123                 if ((skb->mark & n->mark.mask) != n->mark.val) {
124                         n = n->next;
125                         goto next_knode;
126                 } else {
127                         n->mark.success++;
128                 }
129 #endif
130
131                 for (i = n->sel.nkeys; i > 0; i--, key++) {
132                         int toff = off + key->off + (off2 & key->offmask);
133                         __be32 *data, hdata;
134
135                         if (skb_headroom(skb) + toff > INT_MAX)
136                                 goto out;
137
138                         data = skb_header_pointer(skb, toff, 4, &hdata);
139                         if (!data)
140                                 goto out;
141                         if ((*data ^ key->val) & key->mask) {
142                                 n = n->next;
143                                 goto next_knode;
144                         }
145 #ifdef CONFIG_CLS_U32_PERF
146                         n->pf->kcnts[j] += 1;
147                         j++;
148 #endif
149                 }
150                 if (n->ht_down == NULL) {
151 check_terminal:
152                         if (n->sel.flags & TC_U32_TERMINAL) {
153
154                                 *res = n->res;
155 #ifdef CONFIG_NET_CLS_IND
156                                 if (!tcf_match_indev(skb, n->ifindex)) {
157                                         n = n->next;
158                                         goto next_knode;
159                                 }
160 #endif
161 #ifdef CONFIG_CLS_U32_PERF
162                                 n->pf->rhit += 1;
163 #endif
164                                 r = tcf_exts_exec(skb, &n->exts, res);
165                                 if (r < 0) {
166                                         n = n->next;
167                                         goto next_knode;
168                                 }
169
170                                 return r;
171                         }
172                         n = n->next;
173                         goto next_knode;
174                 }
175
176                 /* PUSH */
177                 if (sdepth >= TC_U32_MAXDEPTH)
178                         goto deadloop;
179                 stack[sdepth].knode = n;
180                 stack[sdepth].off = off;
181                 sdepth++;
182
183                 ht = n->ht_down;
184                 sel = 0;
185                 if (ht->divisor) {
186                         __be32 *data, hdata;
187
188                         data = skb_header_pointer(skb, off + n->sel.hoff, 4,
189                                                   &hdata);
190                         if (!data)
191                                 goto out;
192                         sel = ht->divisor & u32_hash_fold(*data, &n->sel,
193                                                           n->fshift);
194                 }
195                 if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
196                         goto next_ht;
197
198                 if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
199                         off2 = n->sel.off + 3;
200                         if (n->sel.flags & TC_U32_VAROFFSET) {
201                                 __be16 *data, hdata;
202
203                                 data = skb_header_pointer(skb,
204                                                           off + n->sel.offoff,
205                                                           2, &hdata);
206                                 if (!data)
207                                         goto out;
208                                 off2 += ntohs(n->sel.offmask & *data) >>
209                                         n->sel.offshift;
210                         }
211                         off2 &= ~3;
212                 }
213                 if (n->sel.flags & TC_U32_EAT) {
214                         off += off2;
215                         off2 = 0;
216                 }
217
218                 if (off < skb->len)
219                         goto next_ht;
220         }
221
222         /* POP */
223         if (sdepth--) {
224                 n = stack[sdepth].knode;
225                 ht = n->ht_up;
226                 off = stack[sdepth].off;
227                 goto check_terminal;
228         }
229 out:
230         return -1;
231
232 deadloop:
233         net_warn_ratelimited("cls_u32: dead loop\n");
234         return -1;
235 }
236
237 static struct tc_u_hnode *
238 u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
239 {
240         struct tc_u_hnode *ht;
241
242         for (ht = tp_c->hlist; ht; ht = ht->next)
243                 if (ht->handle == handle)
244                         break;
245
246         return ht;
247 }
248
249 static struct tc_u_knode *
250 u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
251 {
252         unsigned int sel;
253         struct tc_u_knode *n = NULL;
254
255         sel = TC_U32_HASH(handle);
256         if (sel > ht->divisor)
257                 goto out;
258
259         for (n = ht->ht[sel]; n; n = n->next)
260                 if (n->handle == handle)
261                         break;
262 out:
263         return n;
264 }
265
266
267 static unsigned long u32_get(struct tcf_proto *tp, u32 handle)
268 {
269         struct tc_u_hnode *ht;
270         struct tc_u_common *tp_c = tp->data;
271
272         if (TC_U32_HTID(handle) == TC_U32_ROOT)
273                 ht = tp->root;
274         else
275                 ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
276
277         if (!ht)
278                 return 0;
279
280         if (TC_U32_KEY(handle) == 0)
281                 return (unsigned long)ht;
282
283         return (unsigned long)u32_lookup_key(ht, handle);
284 }
285
286 static void u32_put(struct tcf_proto *tp, unsigned long f)
287 {
288 }
289
290 static u32 gen_new_htid(struct tc_u_common *tp_c)
291 {
292         int i = 0x800;
293
294         do {
295                 if (++tp_c->hgenerator == 0x7FF)
296                         tp_c->hgenerator = 1;
297         } while (--i > 0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
298
299         return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0;
300 }
301
302 static int u32_init(struct tcf_proto *tp)
303 {
304         struct tc_u_hnode *root_ht;
305         struct tc_u_common *tp_c;
306
307         tp_c = tp->q->u32_node;
308
309         root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL);
310         if (root_ht == NULL)
311                 return -ENOBUFS;
312
313         root_ht->divisor = 0;
314         root_ht->refcnt++;
315         root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000;
316         root_ht->prio = tp->prio;
317
318         if (tp_c == NULL) {
319                 tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
320                 if (tp_c == NULL) {
321                         kfree(root_ht);
322                         return -ENOBUFS;
323                 }
324                 tp_c->q = tp->q;
325                 tp->q->u32_node = tp_c;
326         }
327
328         tp_c->refcnt++;
329         root_ht->next = tp_c->hlist;
330         tp_c->hlist = root_ht;
331         root_ht->tp_c = tp_c;
332
333         tp->root = root_ht;
334         tp->data = tp_c;
335         return 0;
336 }
337
338 static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n)
339 {
340         tcf_unbind_filter(tp, &n->res);
341         tcf_exts_destroy(tp, &n->exts);
342         if (n->ht_down)
343                 n->ht_down->refcnt--;
344 #ifdef CONFIG_CLS_U32_PERF
345         kfree(n->pf);
346 #endif
347         kfree(n);
348         return 0;
349 }
350
351 static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
352 {
353         struct tc_u_knode **kp;
354         struct tc_u_hnode *ht = key->ht_up;
355
356         if (ht) {
357                 for (kp = &ht->ht[TC_U32_HASH(key->handle)]; *kp; kp = &(*kp)->next) {
358                         if (*kp == key) {
359                                 tcf_tree_lock(tp);
360                                 *kp = key->next;
361                                 tcf_tree_unlock(tp);
362
363                                 u32_destroy_key(tp, key);
364                                 return 0;
365                         }
366                 }
367         }
368         WARN_ON(1);
369         return 0;
370 }
371
372 static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
373 {
374         struct tc_u_knode *n;
375         unsigned int h;
376
377         for (h = 0; h <= ht->divisor; h++) {
378                 while ((n = ht->ht[h]) != NULL) {
379                         ht->ht[h] = n->next;
380
381                         u32_destroy_key(tp, n);
382                 }
383         }
384 }
385
386 static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
387 {
388         struct tc_u_common *tp_c = tp->data;
389         struct tc_u_hnode **hn;
390
391         WARN_ON(ht->refcnt);
392
393         u32_clear_hnode(tp, ht);
394
395         for (hn = &tp_c->hlist; *hn; hn = &(*hn)->next) {
396                 if (*hn == ht) {
397                         *hn = ht->next;
398                         kfree(ht);
399                         return 0;
400                 }
401         }
402
403         WARN_ON(1);
404         return -ENOENT;
405 }
406
407 static void u32_destroy(struct tcf_proto *tp)
408 {
409         struct tc_u_common *tp_c = tp->data;
410         struct tc_u_hnode *root_ht = tp->root;
411
412         WARN_ON(root_ht == NULL);
413
414         if (root_ht && --root_ht->refcnt == 0)
415                 u32_destroy_hnode(tp, root_ht);
416
417         if (--tp_c->refcnt == 0) {
418                 struct tc_u_hnode *ht;
419
420                 tp->q->u32_node = NULL;
421
422                 for (ht = tp_c->hlist; ht; ht = ht->next) {
423                         ht->refcnt--;
424                         u32_clear_hnode(tp, ht);
425                 }
426
427                 while ((ht = tp_c->hlist) != NULL) {
428                         tp_c->hlist = ht->next;
429
430                         WARN_ON(ht->refcnt != 0);
431
432                         kfree(ht);
433                 }
434
435                 kfree(tp_c);
436         }
437
438         tp->data = NULL;
439 }
440
441 static int u32_delete(struct tcf_proto *tp, unsigned long arg)
442 {
443         struct tc_u_hnode *ht = (struct tc_u_hnode *)arg;
444
445         if (ht == NULL)
446                 return 0;
447
448         if (TC_U32_KEY(ht->handle))
449                 return u32_delete_key(tp, (struct tc_u_knode *)ht);
450
451         if (tp->root == ht)
452                 return -EINVAL;
453
454         if (ht->refcnt == 1) {
455                 ht->refcnt--;
456                 u32_destroy_hnode(tp, ht);
457         } else {
458                 return -EBUSY;
459         }
460
461         return 0;
462 }
463
464 #define NR_U32_NODE (1<<12)
465 static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
466 {
467         struct tc_u_knode *n;
468         unsigned long i;
469         unsigned long *bitmap = kzalloc(BITS_TO_LONGS(NR_U32_NODE) * sizeof(unsigned long),
470                                         GFP_KERNEL);
471         if (!bitmap)
472                 return handle | 0xFFF;
473
474         for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
475                 set_bit(TC_U32_NODE(n->handle), bitmap);
476
477         i = find_next_zero_bit(bitmap, NR_U32_NODE, 0x800);
478         if (i >= NR_U32_NODE)
479                 i = find_next_zero_bit(bitmap, NR_U32_NODE, 1);
480
481         kfree(bitmap);
482         return handle | (i >= NR_U32_NODE ? 0xFFF : i);
483 }
484
485 static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
486         [TCA_U32_CLASSID]       = { .type = NLA_U32 },
487         [TCA_U32_HASH]          = { .type = NLA_U32 },
488         [TCA_U32_LINK]          = { .type = NLA_U32 },
489         [TCA_U32_DIVISOR]       = { .type = NLA_U32 },
490         [TCA_U32_SEL]           = { .len = sizeof(struct tc_u32_sel) },
491         [TCA_U32_INDEV]         = { .type = NLA_STRING, .len = IFNAMSIZ },
492         [TCA_U32_MARK]          = { .len = sizeof(struct tc_u32_mark) },
493 };
494
495 static int u32_set_parms(struct net *net, struct tcf_proto *tp,
496                          unsigned long base, struct tc_u_hnode *ht,
497                          struct tc_u_knode *n, struct nlattr **tb,
498                          struct nlattr *est, bool ovr)
499 {
500         int err;
501         struct tcf_exts e;
502
503         tcf_exts_init(&e, TCA_U32_ACT, TCA_U32_POLICE);
504         err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
505         if (err < 0)
506                 return err;
507
508         err = -EINVAL;
509         if (tb[TCA_U32_LINK]) {
510                 u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
511                 struct tc_u_hnode *ht_down = NULL, *ht_old;
512
513                 if (TC_U32_KEY(handle))
514                         goto errout;
515
516                 if (handle) {
517                         ht_down = u32_lookup_ht(ht->tp_c, handle);
518
519                         if (ht_down == NULL)
520                                 goto errout;
521                         ht_down->refcnt++;
522                 }
523
524                 tcf_tree_lock(tp);
525                 ht_old = n->ht_down;
526                 n->ht_down = ht_down;
527                 tcf_tree_unlock(tp);
528
529                 if (ht_old)
530                         ht_old->refcnt--;
531         }
532         if (tb[TCA_U32_CLASSID]) {
533                 n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
534                 tcf_bind_filter(tp, &n->res, base);
535         }
536
537 #ifdef CONFIG_NET_CLS_IND
538         if (tb[TCA_U32_INDEV]) {
539                 int ret;
540                 ret = tcf_change_indev(net, tb[TCA_U32_INDEV]);
541                 if (ret < 0)
542                         goto errout;
543                 n->ifindex = ret;
544         }
545 #endif
546         tcf_exts_change(tp, &n->exts, &e);
547
548         return 0;
549 errout:
550         tcf_exts_destroy(tp, &e);
551         return err;
552 }
553
554 static int u32_change(struct net *net, struct sk_buff *in_skb,
555                       struct tcf_proto *tp, unsigned long base, u32 handle,
556                       struct nlattr **tca,
557                       unsigned long *arg, bool ovr)
558 {
559         struct tc_u_common *tp_c = tp->data;
560         struct tc_u_hnode *ht;
561         struct tc_u_knode *n;
562         struct tc_u32_sel *s;
563         struct nlattr *opt = tca[TCA_OPTIONS];
564         struct nlattr *tb[TCA_U32_MAX + 1];
565         u32 htid;
566         int err;
567
568         if (opt == NULL)
569                 return handle ? -EINVAL : 0;
570
571         err = nla_parse_nested(tb, TCA_U32_MAX, opt, u32_policy);
572         if (err < 0)
573                 return err;
574
575         n = (struct tc_u_knode *)*arg;
576         if (n) {
577                 if (TC_U32_KEY(n->handle) == 0)
578                         return -EINVAL;
579
580                 return u32_set_parms(net, tp, base, n->ht_up, n, tb,
581                                      tca[TCA_RATE], ovr);
582         }
583
584         if (tb[TCA_U32_DIVISOR]) {
585                 unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
586
587                 if (--divisor > 0x100)
588                         return -EINVAL;
589                 if (TC_U32_KEY(handle))
590                         return -EINVAL;
591                 if (handle == 0) {
592                         handle = gen_new_htid(tp->data);
593                         if (handle == 0)
594                                 return -ENOMEM;
595                 }
596                 ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL);
597                 if (ht == NULL)
598                         return -ENOBUFS;
599                 ht->tp_c = tp_c;
600                 ht->refcnt = 1;
601                 ht->divisor = divisor;
602                 ht->handle = handle;
603                 ht->prio = tp->prio;
604                 ht->next = tp_c->hlist;
605                 tp_c->hlist = ht;
606                 *arg = (unsigned long)ht;
607                 return 0;
608         }
609
610         if (tb[TCA_U32_HASH]) {
611                 htid = nla_get_u32(tb[TCA_U32_HASH]);
612                 if (TC_U32_HTID(htid) == TC_U32_ROOT) {
613                         ht = tp->root;
614                         htid = ht->handle;
615                 } else {
616                         ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
617                         if (ht == NULL)
618                                 return -EINVAL;
619                 }
620         } else {
621                 ht = tp->root;
622                 htid = ht->handle;
623         }
624
625         if (ht->divisor < TC_U32_HASH(htid))
626                 return -EINVAL;
627
628         if (handle) {
629                 if (TC_U32_HTID(handle) && TC_U32_HTID(handle^htid))
630                         return -EINVAL;
631                 handle = htid | TC_U32_NODE(handle);
632         } else
633                 handle = gen_new_kid(ht, htid);
634
635         if (tb[TCA_U32_SEL] == NULL)
636                 return -EINVAL;
637
638         s = nla_data(tb[TCA_U32_SEL]);
639
640         n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
641         if (n == NULL)
642                 return -ENOBUFS;
643
644 #ifdef CONFIG_CLS_U32_PERF
645         n->pf = kzalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL);
646         if (n->pf == NULL) {
647                 kfree(n);
648                 return -ENOBUFS;
649         }
650 #endif
651
652         memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
653         n->ht_up = ht;
654         n->handle = handle;
655         n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
656         tcf_exts_init(&n->exts, TCA_U32_ACT, TCA_U32_POLICE);
657
658 #ifdef CONFIG_CLS_U32_MARK
659         if (tb[TCA_U32_MARK]) {
660                 struct tc_u32_mark *mark;
661
662                 mark = nla_data(tb[TCA_U32_MARK]);
663                 memcpy(&n->mark, mark, sizeof(struct tc_u32_mark));
664                 n->mark.success = 0;
665         }
666 #endif
667
668         err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE], ovr);
669         if (err == 0) {
670                 struct tc_u_knode **ins;
671                 for (ins = &ht->ht[TC_U32_HASH(handle)]; *ins; ins = &(*ins)->next)
672                         if (TC_U32_NODE(handle) < TC_U32_NODE((*ins)->handle))
673                                 break;
674
675                 n->next = *ins;
676                 tcf_tree_lock(tp);
677                 *ins = n;
678                 tcf_tree_unlock(tp);
679
680                 *arg = (unsigned long)n;
681                 return 0;
682         }
683 #ifdef CONFIG_CLS_U32_PERF
684         kfree(n->pf);
685 #endif
686         kfree(n);
687         return err;
688 }
689
690 static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
691 {
692         struct tc_u_common *tp_c = tp->data;
693         struct tc_u_hnode *ht;
694         struct tc_u_knode *n;
695         unsigned int h;
696
697         if (arg->stop)
698                 return;
699
700         for (ht = tp_c->hlist; ht; ht = ht->next) {
701                 if (ht->prio != tp->prio)
702                         continue;
703                 if (arg->count >= arg->skip) {
704                         if (arg->fn(tp, (unsigned long)ht, arg) < 0) {
705                                 arg->stop = 1;
706                                 return;
707                         }
708                 }
709                 arg->count++;
710                 for (h = 0; h <= ht->divisor; h++) {
711                         for (n = ht->ht[h]; n; n = n->next) {
712                                 if (arg->count < arg->skip) {
713                                         arg->count++;
714                                         continue;
715                                 }
716                                 if (arg->fn(tp, (unsigned long)n, arg) < 0) {
717                                         arg->stop = 1;
718                                         return;
719                                 }
720                                 arg->count++;
721                         }
722                 }
723         }
724 }
725
726 static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
727                      struct sk_buff *skb, struct tcmsg *t)
728 {
729         struct tc_u_knode *n = (struct tc_u_knode *)fh;
730         struct nlattr *nest;
731
732         if (n == NULL)
733                 return skb->len;
734
735         t->tcm_handle = n->handle;
736
737         nest = nla_nest_start(skb, TCA_OPTIONS);
738         if (nest == NULL)
739                 goto nla_put_failure;
740
741         if (TC_U32_KEY(n->handle) == 0) {
742                 struct tc_u_hnode *ht = (struct tc_u_hnode *)fh;
743                 u32 divisor = ht->divisor + 1;
744
745                 if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
746                         goto nla_put_failure;
747         } else {
748                 if (nla_put(skb, TCA_U32_SEL,
749                             sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
750                             &n->sel))
751                         goto nla_put_failure;
752                 if (n->ht_up) {
753                         u32 htid = n->handle & 0xFFFFF000;
754                         if (nla_put_u32(skb, TCA_U32_HASH, htid))
755                                 goto nla_put_failure;
756                 }
757                 if (n->res.classid &&
758                     nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid))
759                         goto nla_put_failure;
760                 if (n->ht_down &&
761                     nla_put_u32(skb, TCA_U32_LINK, n->ht_down->handle))
762                         goto nla_put_failure;
763
764 #ifdef CONFIG_CLS_U32_MARK
765                 if ((n->mark.val || n->mark.mask) &&
766                     nla_put(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark))
767                         goto nla_put_failure;
768 #endif
769
770                 if (tcf_exts_dump(skb, &n->exts) < 0)
771                         goto nla_put_failure;
772
773 #ifdef CONFIG_NET_CLS_IND
774                 if (n->ifindex) {
775                         struct net_device *dev;
776                         dev = __dev_get_by_index(net, n->ifindex);
777                         if (dev && nla_put_string(skb, TCA_U32_INDEV, dev->name))
778                                 goto nla_put_failure;
779                 }
780 #endif
781 #ifdef CONFIG_CLS_U32_PERF
782                 if (nla_put(skb, TCA_U32_PCNT,
783                             sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64),
784                             n->pf))
785                         goto nla_put_failure;
786 #endif
787         }
788
789         nla_nest_end(skb, nest);
790
791         if (TC_U32_KEY(n->handle))
792                 if (tcf_exts_dump_stats(skb, &n->exts) < 0)
793                         goto nla_put_failure;
794         return skb->len;
795
796 nla_put_failure:
797         nla_nest_cancel(skb, nest);
798         return -1;
799 }
800
801 static struct tcf_proto_ops cls_u32_ops __read_mostly = {
802         .kind           =       "u32",
803         .classify       =       u32_classify,
804         .init           =       u32_init,
805         .destroy        =       u32_destroy,
806         .get            =       u32_get,
807         .put            =       u32_put,
808         .change         =       u32_change,
809         .delete         =       u32_delete,
810         .walk           =       u32_walk,
811         .dump           =       u32_dump,
812         .owner          =       THIS_MODULE,
813 };
814
815 static int __init init_u32(void)
816 {
817         pr_info("u32 classifier\n");
818 #ifdef CONFIG_CLS_U32_PERF
819         pr_info("    Performance counters on\n");
820 #endif
821 #ifdef CONFIG_NET_CLS_IND
822         pr_info("    input device check on\n");
823 #endif
824 #ifdef CONFIG_NET_CLS_ACT
825         pr_info("    Actions configured\n");
826 #endif
827         return register_tcf_proto_ops(&cls_u32_ops);
828 }
829
830 static void __exit exit_u32(void)
831 {
832         unregister_tcf_proto_ops(&cls_u32_ops);
833 }
834
835 module_init(init_u32)
836 module_exit(exit_u32)
837 MODULE_LICENSE("GPL");