]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - net/sched/sch_hhf.c
sched: Don't scan all-offline ->cpus_allowed twice if !CONFIG_CPUSETS
[karo-tx-linux.git] / net / sched / sch_hhf.c
1 /* net/sched/sch_hhf.c          Heavy-Hitter Filter (HHF)
2  *
3  * Copyright (C) 2013 Terry Lam <vtlam@google.com>
4  * Copyright (C) 2013 Nandita Dukkipati <nanditad@google.com>
5  */
6
7 #include <linux/jhash.h>
8 #include <linux/jiffies.h>
9 #include <linux/module.h>
10 #include <linux/skbuff.h>
11 #include <linux/vmalloc.h>
12 #include <net/pkt_sched.h>
13 #include <net/sock.h>
14
15 /*      Heavy-Hitter Filter (HHF)
16  *
17  * Principles :
18  * Flows are classified into two buckets: non-heavy-hitter and heavy-hitter
19  * buckets. Initially, a new flow starts as non-heavy-hitter. Once classified
20  * as heavy-hitter, it is immediately switched to the heavy-hitter bucket.
21  * The buckets are dequeued by a Weighted Deficit Round Robin (WDRR) scheduler,
22  * in which the heavy-hitter bucket is served with less weight.
23  * In other words, non-heavy-hitters (e.g., short bursts of critical traffic)
24  * are isolated from heavy-hitters (e.g., persistent bulk traffic) and also have
25  * higher share of bandwidth.
26  *
27  * To capture heavy-hitters, we use the "multi-stage filter" algorithm in the
28  * following paper:
29  * [EV02] C. Estan and G. Varghese, "New Directions in Traffic Measurement and
30  * Accounting", in ACM SIGCOMM, 2002.
31  *
32  * Conceptually, a multi-stage filter comprises k independent hash functions
33  * and k counter arrays. Packets are indexed into k counter arrays by k hash
34  * functions, respectively. The counters are then increased by the packet sizes.
35  * Therefore,
36  *    - For a heavy-hitter flow: *all* of its k array counters must be large.
37  *    - For a non-heavy-hitter flow: some of its k array counters can be large
38  *      due to hash collision with other small flows; however, with high
39  *      probability, not *all* k counters are large.
40  *
41  * By the design of the multi-stage filter algorithm, the false negative rate
42  * (heavy-hitters getting away uncaptured) is zero. However, the algorithm is
43  * susceptible to false positives (non-heavy-hitters mistakenly classified as
44  * heavy-hitters).
45  * Therefore, we also implement the following optimizations to reduce false
46  * positives by avoiding unnecessary increment of the counter values:
47  *    - Optimization O1: once a heavy-hitter is identified, its bytes are not
48  *        accounted in the array counters. This technique is called "shielding"
49  *        in Section 3.3.1 of [EV02].
50  *    - Optimization O2: conservative update of counters
51  *                       (Section 3.3.2 of [EV02]),
52  *        New counter value = max {old counter value,
53  *                                 smallest counter value + packet bytes}
54  *
55  * Finally, we refresh the counters periodically since otherwise the counter
56  * values will keep accumulating.
57  *
58  * Once a flow is classified as heavy-hitter, we also save its per-flow state
59  * in an exact-matching flow table so that its subsequent packets can be
60  * dispatched to the heavy-hitter bucket accordingly.
61  *
62  *
63  * At a high level, this qdisc works as follows:
64  * Given a packet p:
65  *   - If the flow-id of p (e.g., TCP 5-tuple) is already in the exact-matching
66  *     heavy-hitter flow table, denoted table T, then send p to the heavy-hitter
67  *     bucket.
68  *   - Otherwise, forward p to the multi-stage filter, denoted filter F
69  *        + If F decides that p belongs to a non-heavy-hitter flow, then send p
70  *          to the non-heavy-hitter bucket.
71  *        + Otherwise, if F decides that p belongs to a new heavy-hitter flow,
72  *          then set up a new flow entry for the flow-id of p in the table T and
73  *          send p to the heavy-hitter bucket.
74  *
75  * In this implementation:
76  *   - T is a fixed-size hash-table with 1024 entries. Hash collision is
77  *     resolved by linked-list chaining.
78  *   - F has four counter arrays, each array containing 1024 32-bit counters.
79  *     That means 4 * 1024 * 32 bits = 16KB of memory.
80  *   - Since each array in F contains 1024 counters, 10 bits are sufficient to
81  *     index into each array.
82  *     Hence, instead of having four hash functions, we chop the 32-bit
83  *     skb-hash into three 10-bit chunks, and the remaining 10-bit chunk is
84  *     computed as XOR sum of those three chunks.
85  *   - We need to clear the counter arrays periodically; however, directly
86  *     memsetting 16KB of memory can lead to cache eviction and unwanted delay.
87  *     So by representing each counter by a valid bit, we only need to reset
88  *     4K of 1 bit (i.e. 512 bytes) instead of 16KB of memory.
89  *   - The Deficit Round Robin engine is taken from fq_codel implementation
90  *     (net/sched/sch_fq_codel.c). Note that wdrr_bucket corresponds to
91  *     fq_codel_flow in fq_codel implementation.
92  *
93  */
94
95 /* Non-configurable parameters */
96 #define HH_FLOWS_CNT     1024  /* number of entries in exact-matching table T */
97 #define HHF_ARRAYS_CNT   4     /* number of arrays in multi-stage filter F */
98 #define HHF_ARRAYS_LEN   1024  /* number of counters in each array of F */
99 #define HHF_BIT_MASK_LEN 10    /* masking 10 bits */
100 #define HHF_BIT_MASK     0x3FF /* bitmask of 10 bits */
101
102 #define WDRR_BUCKET_CNT  2     /* two buckets for Weighted DRR */
103 enum wdrr_bucket_idx {
104         WDRR_BUCKET_FOR_HH      = 0, /* bucket id for heavy-hitters */
105         WDRR_BUCKET_FOR_NON_HH  = 1  /* bucket id for non-heavy-hitters */
106 };
107
108 #define hhf_time_before(a, b)   \
109         (typecheck(u32, a) && typecheck(u32, b) && ((s32)((a) - (b)) < 0))
110
111 /* Heavy-hitter per-flow state */
112 struct hh_flow_state {
113         u32              hash_id;       /* hash of flow-id (e.g. TCP 5-tuple) */
114         u32              hit_timestamp; /* last time heavy-hitter was seen */
115         struct list_head flowchain;     /* chaining under hash collision */
116 };
117
118 /* Weighted Deficit Round Robin (WDRR) scheduler */
119 struct wdrr_bucket {
120         struct sk_buff    *head;
121         struct sk_buff    *tail;
122         struct list_head  bucketchain;
123         int               deficit;
124 };
125
126 struct hhf_sched_data {
127         struct wdrr_bucket buckets[WDRR_BUCKET_CNT];
128         u32                perturbation;   /* hash perturbation */
129         u32                quantum;        /* psched_mtu(qdisc_dev(sch)); */
130         u32                drop_overlimit; /* number of times max qdisc packet
131                                             * limit was hit
132                                             */
133         struct list_head   *hh_flows;       /* table T (currently active HHs) */
134         u32                hh_flows_limit;            /* max active HH allocs */
135         u32                hh_flows_overlimit; /* num of disallowed HH allocs */
136         u32                hh_flows_total_cnt;          /* total admitted HHs */
137         u32                hh_flows_current_cnt;        /* total current HHs  */
138         u32                *hhf_arrays[HHF_ARRAYS_CNT]; /* HH filter F */
139         u32                hhf_arrays_reset_timestamp;  /* last time hhf_arrays
140                                                          * was reset
141                                                          */
142         unsigned long      *hhf_valid_bits[HHF_ARRAYS_CNT]; /* shadow valid bits
143                                                              * of hhf_arrays
144                                                              */
145         /* Similar to the "new_flows" vs. "old_flows" concept in fq_codel DRR */
146         struct list_head   new_buckets; /* list of new buckets */
147         struct list_head   old_buckets; /* list of old buckets */
148
149         /* Configurable HHF parameters */
150         u32                hhf_reset_timeout; /* interval to reset counter
151                                                * arrays in filter F
152                                                * (default 40ms)
153                                                */
154         u32                hhf_admit_bytes;   /* counter thresh to classify as
155                                                * HH (default 128KB).
156                                                * With these default values,
157                                                * 128KB / 40ms = 25 Mbps
158                                                * i.e., we expect to capture HHs
159                                                * sending > 25 Mbps.
160                                                */
161         u32                hhf_evict_timeout; /* aging threshold to evict idle
162                                                * HHs out of table T. This should
163                                                * be large enough to avoid
164                                                * reordering during HH eviction.
165                                                * (default 1s)
166                                                */
167         u32                hhf_non_hh_weight; /* WDRR weight for non-HHs
168                                                * (default 2,
169                                                *  i.e., non-HH : HH = 2 : 1)
170                                                */
171 };
172
173 static u32 hhf_time_stamp(void)
174 {
175         return jiffies;
176 }
177
178 /* Looks up a heavy-hitter flow in a chaining list of table T. */
179 static struct hh_flow_state *seek_list(const u32 hash,
180                                        struct list_head *head,
181                                        struct hhf_sched_data *q)
182 {
183         struct hh_flow_state *flow, *next;
184         u32 now = hhf_time_stamp();
185
186         if (list_empty(head))
187                 return NULL;
188
189         list_for_each_entry_safe(flow, next, head, flowchain) {
190                 u32 prev = flow->hit_timestamp + q->hhf_evict_timeout;
191
192                 if (hhf_time_before(prev, now)) {
193                         /* Delete expired heavy-hitters, but preserve one entry
194                          * to avoid kzalloc() when next time this slot is hit.
195                          */
196                         if (list_is_last(&flow->flowchain, head))
197                                 return NULL;
198                         list_del(&flow->flowchain);
199                         kfree(flow);
200                         q->hh_flows_current_cnt--;
201                 } else if (flow->hash_id == hash) {
202                         return flow;
203                 }
204         }
205         return NULL;
206 }
207
208 /* Returns a flow state entry for a new heavy-hitter.  Either reuses an expired
209  * entry or dynamically alloc a new entry.
210  */
211 static struct hh_flow_state *alloc_new_hh(struct list_head *head,
212                                           struct hhf_sched_data *q)
213 {
214         struct hh_flow_state *flow;
215         u32 now = hhf_time_stamp();
216
217         if (!list_empty(head)) {
218                 /* Find an expired heavy-hitter flow entry. */
219                 list_for_each_entry(flow, head, flowchain) {
220                         u32 prev = flow->hit_timestamp + q->hhf_evict_timeout;
221
222                         if (hhf_time_before(prev, now))
223                                 return flow;
224                 }
225         }
226
227         if (q->hh_flows_current_cnt >= q->hh_flows_limit) {
228                 q->hh_flows_overlimit++;
229                 return NULL;
230         }
231         /* Create new entry. */
232         flow = kzalloc(sizeof(struct hh_flow_state), GFP_ATOMIC);
233         if (!flow)
234                 return NULL;
235
236         q->hh_flows_current_cnt++;
237         INIT_LIST_HEAD(&flow->flowchain);
238         list_add_tail(&flow->flowchain, head);
239
240         return flow;
241 }
242
243 /* Assigns packets to WDRR buckets.  Implements a multi-stage filter to
244  * classify heavy-hitters.
245  */
246 static enum wdrr_bucket_idx hhf_classify(struct sk_buff *skb, struct Qdisc *sch)
247 {
248         struct hhf_sched_data *q = qdisc_priv(sch);
249         u32 tmp_hash, hash;
250         u32 xorsum, filter_pos[HHF_ARRAYS_CNT], flow_pos;
251         struct hh_flow_state *flow;
252         u32 pkt_len, min_hhf_val;
253         int i;
254         u32 prev;
255         u32 now = hhf_time_stamp();
256
257         /* Reset the HHF counter arrays if this is the right time. */
258         prev = q->hhf_arrays_reset_timestamp + q->hhf_reset_timeout;
259         if (hhf_time_before(prev, now)) {
260                 for (i = 0; i < HHF_ARRAYS_CNT; i++)
261                         bitmap_zero(q->hhf_valid_bits[i], HHF_ARRAYS_LEN);
262                 q->hhf_arrays_reset_timestamp = now;
263         }
264
265         /* Get hashed flow-id of the skb. */
266         hash = skb_get_hash_perturb(skb, q->perturbation);
267
268         /* Check if this packet belongs to an already established HH flow. */
269         flow_pos = hash & HHF_BIT_MASK;
270         flow = seek_list(hash, &q->hh_flows[flow_pos], q);
271         if (flow) { /* found its HH flow */
272                 flow->hit_timestamp = now;
273                 return WDRR_BUCKET_FOR_HH;
274         }
275
276         /* Now pass the packet through the multi-stage filter. */
277         tmp_hash = hash;
278         xorsum = 0;
279         for (i = 0; i < HHF_ARRAYS_CNT - 1; i++) {
280                 /* Split the skb_hash into three 10-bit chunks. */
281                 filter_pos[i] = tmp_hash & HHF_BIT_MASK;
282                 xorsum ^= filter_pos[i];
283                 tmp_hash >>= HHF_BIT_MASK_LEN;
284         }
285         /* The last chunk is computed as XOR sum of other chunks. */
286         filter_pos[HHF_ARRAYS_CNT - 1] = xorsum ^ tmp_hash;
287
288         pkt_len = qdisc_pkt_len(skb);
289         min_hhf_val = ~0U;
290         for (i = 0; i < HHF_ARRAYS_CNT; i++) {
291                 u32 val;
292
293                 if (!test_bit(filter_pos[i], q->hhf_valid_bits[i])) {
294                         q->hhf_arrays[i][filter_pos[i]] = 0;
295                         __set_bit(filter_pos[i], q->hhf_valid_bits[i]);
296                 }
297
298                 val = q->hhf_arrays[i][filter_pos[i]] + pkt_len;
299                 if (min_hhf_val > val)
300                         min_hhf_val = val;
301         }
302
303         /* Found a new HH iff all counter values > HH admit threshold. */
304         if (min_hhf_val > q->hhf_admit_bytes) {
305                 /* Just captured a new heavy-hitter. */
306                 flow = alloc_new_hh(&q->hh_flows[flow_pos], q);
307                 if (!flow) /* memory alloc problem */
308                         return WDRR_BUCKET_FOR_NON_HH;
309                 flow->hash_id = hash;
310                 flow->hit_timestamp = now;
311                 q->hh_flows_total_cnt++;
312
313                 /* By returning without updating counters in q->hhf_arrays,
314                  * we implicitly implement "shielding" (see Optimization O1).
315                  */
316                 return WDRR_BUCKET_FOR_HH;
317         }
318
319         /* Conservative update of HHF arrays (see Optimization O2). */
320         for (i = 0; i < HHF_ARRAYS_CNT; i++) {
321                 if (q->hhf_arrays[i][filter_pos[i]] < min_hhf_val)
322                         q->hhf_arrays[i][filter_pos[i]] = min_hhf_val;
323         }
324         return WDRR_BUCKET_FOR_NON_HH;
325 }
326
327 /* Removes one skb from head of bucket. */
328 static struct sk_buff *dequeue_head(struct wdrr_bucket *bucket)
329 {
330         struct sk_buff *skb = bucket->head;
331
332         bucket->head = skb->next;
333         skb->next = NULL;
334         return skb;
335 }
336
337 /* Tail-adds skb to bucket. */
338 static void bucket_add(struct wdrr_bucket *bucket, struct sk_buff *skb)
339 {
340         if (bucket->head == NULL)
341                 bucket->head = skb;
342         else
343                 bucket->tail->next = skb;
344         bucket->tail = skb;
345         skb->next = NULL;
346 }
347
348 static unsigned int hhf_drop(struct Qdisc *sch)
349 {
350         struct hhf_sched_data *q = qdisc_priv(sch);
351         struct wdrr_bucket *bucket;
352
353         /* Always try to drop from heavy-hitters first. */
354         bucket = &q->buckets[WDRR_BUCKET_FOR_HH];
355         if (!bucket->head)
356                 bucket = &q->buckets[WDRR_BUCKET_FOR_NON_HH];
357
358         if (bucket->head) {
359                 struct sk_buff *skb = dequeue_head(bucket);
360
361                 sch->q.qlen--;
362                 qdisc_qstats_drop(sch);
363                 qdisc_qstats_backlog_dec(sch, skb);
364                 kfree_skb(skb);
365         }
366
367         /* Return id of the bucket from which the packet was dropped. */
368         return bucket - q->buckets;
369 }
370
371 static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
372 {
373         struct hhf_sched_data *q = qdisc_priv(sch);
374         enum wdrr_bucket_idx idx;
375         struct wdrr_bucket *bucket;
376
377         idx = hhf_classify(skb, sch);
378
379         bucket = &q->buckets[idx];
380         bucket_add(bucket, skb);
381         qdisc_qstats_backlog_inc(sch, skb);
382
383         if (list_empty(&bucket->bucketchain)) {
384                 unsigned int weight;
385
386                 /* The logic of new_buckets vs. old_buckets is the same as
387                  * new_flows vs. old_flows in the implementation of fq_codel,
388                  * i.e., short bursts of non-HHs should have strict priority.
389                  */
390                 if (idx == WDRR_BUCKET_FOR_HH) {
391                         /* Always move heavy-hitters to old bucket. */
392                         weight = 1;
393                         list_add_tail(&bucket->bucketchain, &q->old_buckets);
394                 } else {
395                         weight = q->hhf_non_hh_weight;
396                         list_add_tail(&bucket->bucketchain, &q->new_buckets);
397                 }
398                 bucket->deficit = weight * q->quantum;
399         }
400         if (++sch->q.qlen <= sch->limit)
401                 return NET_XMIT_SUCCESS;
402
403         q->drop_overlimit++;
404         /* Return Congestion Notification only if we dropped a packet from this
405          * bucket.
406          */
407         if (hhf_drop(sch) == idx)
408                 return NET_XMIT_CN;
409
410         /* As we dropped a packet, better let upper stack know this. */
411         qdisc_tree_decrease_qlen(sch, 1);
412         return NET_XMIT_SUCCESS;
413 }
414
415 static struct sk_buff *hhf_dequeue(struct Qdisc *sch)
416 {
417         struct hhf_sched_data *q = qdisc_priv(sch);
418         struct sk_buff *skb = NULL;
419         struct wdrr_bucket *bucket;
420         struct list_head *head;
421
422 begin:
423         head = &q->new_buckets;
424         if (list_empty(head)) {
425                 head = &q->old_buckets;
426                 if (list_empty(head))
427                         return NULL;
428         }
429         bucket = list_first_entry(head, struct wdrr_bucket, bucketchain);
430
431         if (bucket->deficit <= 0) {
432                 int weight = (bucket - q->buckets == WDRR_BUCKET_FOR_HH) ?
433                               1 : q->hhf_non_hh_weight;
434
435                 bucket->deficit += weight * q->quantum;
436                 list_move_tail(&bucket->bucketchain, &q->old_buckets);
437                 goto begin;
438         }
439
440         if (bucket->head) {
441                 skb = dequeue_head(bucket);
442                 sch->q.qlen--;
443                 qdisc_qstats_backlog_dec(sch, skb);
444         }
445
446         if (!skb) {
447                 /* Force a pass through old_buckets to prevent starvation. */
448                 if ((head == &q->new_buckets) && !list_empty(&q->old_buckets))
449                         list_move_tail(&bucket->bucketchain, &q->old_buckets);
450                 else
451                         list_del_init(&bucket->bucketchain);
452                 goto begin;
453         }
454         qdisc_bstats_update(sch, skb);
455         bucket->deficit -= qdisc_pkt_len(skb);
456
457         return skb;
458 }
459
460 static void hhf_reset(struct Qdisc *sch)
461 {
462         struct sk_buff *skb;
463
464         while ((skb = hhf_dequeue(sch)) != NULL)
465                 kfree_skb(skb);
466 }
467
468 static void *hhf_zalloc(size_t sz)
469 {
470         void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
471
472         if (!ptr)
473                 ptr = vzalloc(sz);
474
475         return ptr;
476 }
477
478 static void hhf_free(void *addr)
479 {
480         kvfree(addr);
481 }
482
483 static void hhf_destroy(struct Qdisc *sch)
484 {
485         int i;
486         struct hhf_sched_data *q = qdisc_priv(sch);
487
488         for (i = 0; i < HHF_ARRAYS_CNT; i++) {
489                 hhf_free(q->hhf_arrays[i]);
490                 hhf_free(q->hhf_valid_bits[i]);
491         }
492
493         for (i = 0; i < HH_FLOWS_CNT; i++) {
494                 struct hh_flow_state *flow, *next;
495                 struct list_head *head = &q->hh_flows[i];
496
497                 if (list_empty(head))
498                         continue;
499                 list_for_each_entry_safe(flow, next, head, flowchain) {
500                         list_del(&flow->flowchain);
501                         kfree(flow);
502                 }
503         }
504         hhf_free(q->hh_flows);
505 }
506
507 static const struct nla_policy hhf_policy[TCA_HHF_MAX + 1] = {
508         [TCA_HHF_BACKLOG_LIMIT]  = { .type = NLA_U32 },
509         [TCA_HHF_QUANTUM]        = { .type = NLA_U32 },
510         [TCA_HHF_HH_FLOWS_LIMIT] = { .type = NLA_U32 },
511         [TCA_HHF_RESET_TIMEOUT]  = { .type = NLA_U32 },
512         [TCA_HHF_ADMIT_BYTES]    = { .type = NLA_U32 },
513         [TCA_HHF_EVICT_TIMEOUT]  = { .type = NLA_U32 },
514         [TCA_HHF_NON_HH_WEIGHT]  = { .type = NLA_U32 },
515 };
516
517 static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
518 {
519         struct hhf_sched_data *q = qdisc_priv(sch);
520         struct nlattr *tb[TCA_HHF_MAX + 1];
521         unsigned int qlen;
522         int err;
523         u64 non_hh_quantum;
524         u32 new_quantum = q->quantum;
525         u32 new_hhf_non_hh_weight = q->hhf_non_hh_weight;
526
527         if (!opt)
528                 return -EINVAL;
529
530         err = nla_parse_nested(tb, TCA_HHF_MAX, opt, hhf_policy);
531         if (err < 0)
532                 return err;
533
534         if (tb[TCA_HHF_QUANTUM])
535                 new_quantum = nla_get_u32(tb[TCA_HHF_QUANTUM]);
536
537         if (tb[TCA_HHF_NON_HH_WEIGHT])
538                 new_hhf_non_hh_weight = nla_get_u32(tb[TCA_HHF_NON_HH_WEIGHT]);
539
540         non_hh_quantum = (u64)new_quantum * new_hhf_non_hh_weight;
541         if (non_hh_quantum > INT_MAX)
542                 return -EINVAL;
543
544         sch_tree_lock(sch);
545
546         if (tb[TCA_HHF_BACKLOG_LIMIT])
547                 sch->limit = nla_get_u32(tb[TCA_HHF_BACKLOG_LIMIT]);
548
549         q->quantum = new_quantum;
550         q->hhf_non_hh_weight = new_hhf_non_hh_weight;
551
552         if (tb[TCA_HHF_HH_FLOWS_LIMIT])
553                 q->hh_flows_limit = nla_get_u32(tb[TCA_HHF_HH_FLOWS_LIMIT]);
554
555         if (tb[TCA_HHF_RESET_TIMEOUT]) {
556                 u32 us = nla_get_u32(tb[TCA_HHF_RESET_TIMEOUT]);
557
558                 q->hhf_reset_timeout = usecs_to_jiffies(us);
559         }
560
561         if (tb[TCA_HHF_ADMIT_BYTES])
562                 q->hhf_admit_bytes = nla_get_u32(tb[TCA_HHF_ADMIT_BYTES]);
563
564         if (tb[TCA_HHF_EVICT_TIMEOUT]) {
565                 u32 us = nla_get_u32(tb[TCA_HHF_EVICT_TIMEOUT]);
566
567                 q->hhf_evict_timeout = usecs_to_jiffies(us);
568         }
569
570         qlen = sch->q.qlen;
571         while (sch->q.qlen > sch->limit) {
572                 struct sk_buff *skb = hhf_dequeue(sch);
573
574                 kfree_skb(skb);
575         }
576         qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
577
578         sch_tree_unlock(sch);
579         return 0;
580 }
581
582 static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
583 {
584         struct hhf_sched_data *q = qdisc_priv(sch);
585         int i;
586
587         sch->limit = 1000;
588         q->quantum = psched_mtu(qdisc_dev(sch));
589         q->perturbation = prandom_u32();
590         INIT_LIST_HEAD(&q->new_buckets);
591         INIT_LIST_HEAD(&q->old_buckets);
592
593         /* Configurable HHF parameters */
594         q->hhf_reset_timeout = HZ / 25; /* 40  ms */
595         q->hhf_admit_bytes = 131072;    /* 128 KB */
596         q->hhf_evict_timeout = HZ;      /* 1  sec */
597         q->hhf_non_hh_weight = 2;
598
599         if (opt) {
600                 int err = hhf_change(sch, opt);
601
602                 if (err)
603                         return err;
604         }
605
606         if (!q->hh_flows) {
607                 /* Initialize heavy-hitter flow table. */
608                 q->hh_flows = hhf_zalloc(HH_FLOWS_CNT *
609                                          sizeof(struct list_head));
610                 if (!q->hh_flows)
611                         return -ENOMEM;
612                 for (i = 0; i < HH_FLOWS_CNT; i++)
613                         INIT_LIST_HEAD(&q->hh_flows[i]);
614
615                 /* Cap max active HHs at twice len of hh_flows table. */
616                 q->hh_flows_limit = 2 * HH_FLOWS_CNT;
617                 q->hh_flows_overlimit = 0;
618                 q->hh_flows_total_cnt = 0;
619                 q->hh_flows_current_cnt = 0;
620
621                 /* Initialize heavy-hitter filter arrays. */
622                 for (i = 0; i < HHF_ARRAYS_CNT; i++) {
623                         q->hhf_arrays[i] = hhf_zalloc(HHF_ARRAYS_LEN *
624                                                       sizeof(u32));
625                         if (!q->hhf_arrays[i]) {
626                                 hhf_destroy(sch);
627                                 return -ENOMEM;
628                         }
629                 }
630                 q->hhf_arrays_reset_timestamp = hhf_time_stamp();
631
632                 /* Initialize valid bits of heavy-hitter filter arrays. */
633                 for (i = 0; i < HHF_ARRAYS_CNT; i++) {
634                         q->hhf_valid_bits[i] = hhf_zalloc(HHF_ARRAYS_LEN /
635                                                           BITS_PER_BYTE);
636                         if (!q->hhf_valid_bits[i]) {
637                                 hhf_destroy(sch);
638                                 return -ENOMEM;
639                         }
640                 }
641
642                 /* Initialize Weighted DRR buckets. */
643                 for (i = 0; i < WDRR_BUCKET_CNT; i++) {
644                         struct wdrr_bucket *bucket = q->buckets + i;
645
646                         INIT_LIST_HEAD(&bucket->bucketchain);
647                 }
648         }
649
650         return 0;
651 }
652
653 static int hhf_dump(struct Qdisc *sch, struct sk_buff *skb)
654 {
655         struct hhf_sched_data *q = qdisc_priv(sch);
656         struct nlattr *opts;
657
658         opts = nla_nest_start(skb, TCA_OPTIONS);
659         if (opts == NULL)
660                 goto nla_put_failure;
661
662         if (nla_put_u32(skb, TCA_HHF_BACKLOG_LIMIT, sch->limit) ||
663             nla_put_u32(skb, TCA_HHF_QUANTUM, q->quantum) ||
664             nla_put_u32(skb, TCA_HHF_HH_FLOWS_LIMIT, q->hh_flows_limit) ||
665             nla_put_u32(skb, TCA_HHF_RESET_TIMEOUT,
666                         jiffies_to_usecs(q->hhf_reset_timeout)) ||
667             nla_put_u32(skb, TCA_HHF_ADMIT_BYTES, q->hhf_admit_bytes) ||
668             nla_put_u32(skb, TCA_HHF_EVICT_TIMEOUT,
669                         jiffies_to_usecs(q->hhf_evict_timeout)) ||
670             nla_put_u32(skb, TCA_HHF_NON_HH_WEIGHT, q->hhf_non_hh_weight))
671                 goto nla_put_failure;
672
673         return nla_nest_end(skb, opts);
674
675 nla_put_failure:
676         return -1;
677 }
678
679 static int hhf_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
680 {
681         struct hhf_sched_data *q = qdisc_priv(sch);
682         struct tc_hhf_xstats st = {
683                 .drop_overlimit = q->drop_overlimit,
684                 .hh_overlimit   = q->hh_flows_overlimit,
685                 .hh_tot_count   = q->hh_flows_total_cnt,
686                 .hh_cur_count   = q->hh_flows_current_cnt,
687         };
688
689         return gnet_stats_copy_app(d, &st, sizeof(st));
690 }
691
692 static struct Qdisc_ops hhf_qdisc_ops __read_mostly = {
693         .id             =       "hhf",
694         .priv_size      =       sizeof(struct hhf_sched_data),
695
696         .enqueue        =       hhf_enqueue,
697         .dequeue        =       hhf_dequeue,
698         .peek           =       qdisc_peek_dequeued,
699         .drop           =       hhf_drop,
700         .init           =       hhf_init,
701         .reset          =       hhf_reset,
702         .destroy        =       hhf_destroy,
703         .change         =       hhf_change,
704         .dump           =       hhf_dump,
705         .dump_stats     =       hhf_dump_stats,
706         .owner          =       THIS_MODULE,
707 };
708
709 static int __init hhf_module_init(void)
710 {
711         return register_qdisc(&hhf_qdisc_ops);
712 }
713
714 static void __exit hhf_module_exit(void)
715 {
716         unregister_qdisc(&hhf_qdisc_ops);
717 }
718
719 module_init(hhf_module_init)
720 module_exit(hhf_module_exit)
721 MODULE_AUTHOR("Terry Lam");
722 MODULE_AUTHOR("Nandita Dukkipati");
723 MODULE_LICENSE("GPL");