2 * net/sched/sch_netem.c Network emulator
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
9 * Many of the algorithms and ideas for this came from
10 * NIST Net which is not copyrighted.
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/skbuff.h>
23 #include <linux/vmalloc.h>
24 #include <linux/rtnetlink.h>
25 #include <linux/reciprocal_div.h>
26 #include <linux/rbtree.h>
28 #include <net/netlink.h>
29 #include <net/pkt_sched.h>
30 #include <net/inet_ecn.h>
34 /* Network Emulation Queuing algorithm.
35 ====================================
37 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
38 Network Emulation Tool
39 [2] Luigi Rizzo, DummyNet for FreeBSD
41 ----------------------------------------------------------------
43 This started out as a simple way to delay outgoing packets to
44 test TCP but has grown to include most of the functionality
45 of a full blown network emulator like NISTnet. It can delay
46 packets and add random jitter (and correlation). The random
47 distribution can be loaded from a table as well to provide
48 normal, Pareto, or experimental curves. Packet loss,
49 duplication, and reordering can also be emulated.
51 This qdisc does not do classification that can be handled in
52 layering other disciplines. It does not need to do bandwidth
53 control either since that can be handled by using token
54 bucket or other rate control.
56 Correlated Loss Generator models
58 Added generation of correlated loss according to the
59 "Gilbert-Elliot" model, a 4-state markov model.
62 [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
63 [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
64 and intuitive loss model for packet networks and its implementation
65 in the Netem module in the Linux kernel", available in [1]
67 Authors: Stefano Salsano <stefano.salsano at uniroma2.it
68 Fabio Ludovici <fabio.ludovici at yahoo.it>
71 struct netem_sched_data {
72 /* internal t(ime)fifo qdisc uses t_root and sch->limit */
73 struct rb_root t_root;
75 /* optional qdisc for classful handling (NULL at netem init) */
78 struct qdisc_watchdog watchdog;
80 psched_tdiff_t latency;
81 psched_tdiff_t jitter;
94 u32 cell_size_reciprocal;
100 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
113 /* Correlated Loss Generation models */
115 /* state of the Markov chain */
118 /* 4-states and Gilbert-Elliot models */
119 u32 a1; /* p13 for 4-states or p for GE */
120 u32 a2; /* p31 for 4-states or r for GE */
121 u32 a3; /* p32 for 4-states or h for GE */
122 u32 a4; /* p14 for 4-states or 1-k for GE */
123 u32 a5; /* p23 used only in 4-states */
128 /* Time stamp put into socket buffer control block
129 * Only valid when skbs are in our internal t(ime)fifo queue.
131 struct netem_skb_cb {
132 psched_time_t time_to_send;
136 /* Because space in skb->cb[] is tight, netem overloads skb->next/prev/tstamp
137 * to hold a rb_node structure.
139 * If struct sk_buff layout is changed, the following checks will complain.
141 static struct rb_node *netem_rb_node(struct sk_buff *skb)
143 BUILD_BUG_ON(offsetof(struct sk_buff, next) != 0);
144 BUILD_BUG_ON(offsetof(struct sk_buff, prev) !=
145 offsetof(struct sk_buff, next) + sizeof(skb->next));
146 BUILD_BUG_ON(offsetof(struct sk_buff, tstamp) !=
147 offsetof(struct sk_buff, prev) + sizeof(skb->prev));
148 BUILD_BUG_ON(sizeof(struct rb_node) > sizeof(skb->next) +
150 sizeof(skb->tstamp));
151 return (struct rb_node *)&skb->next;
154 static struct sk_buff *netem_rb_to_skb(struct rb_node *rb)
156 return (struct sk_buff *)rb;
159 static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
161 /* we assume we can use skb next/prev/tstamp as storage for rb_node */
162 qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
163 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
166 /* init_crandom - initialize correlated random number generator
167 * Use entropy source for initial seed.
169 static void init_crandom(struct crndstate *state, unsigned long rho)
172 state->last = net_random();
175 /* get_crandom - correlated random number generator
176 * Next number depends on last value.
177 * rho is scaled to avoid floating point.
179 static u32 get_crandom(struct crndstate *state)
182 unsigned long answer;
184 if (state->rho == 0) /* no correlation */
187 value = net_random();
188 rho = (u64)state->rho + 1;
189 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
190 state->last = answer;
194 /* loss_4state - 4-state model loss generator
195 * Generates losses according to the 4-state Markov chain adopted in
196 * the GI (General and Intuitive) loss model.
198 static bool loss_4state(struct netem_sched_data *q)
200 struct clgstate *clg = &q->clg;
201 u32 rnd = net_random();
204 * Makes a comparison between rnd and the transition
205 * probabilities outgoing from the current state, then decides the
206 * next state and if the next packet has to be transmitted or lost.
207 * The four states correspond to:
208 * 1 => successfully transmitted packets within a gap period
209 * 4 => isolated losses within a gap period
210 * 3 => lost packets within a burst period
211 * 2 => successfully transmitted packets within a burst period
213 switch (clg->state) {
218 } else if (clg->a4 < rnd && rnd < clg->a1) {
221 } else if (clg->a1 < rnd)
236 else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
239 } else if (clg->a2 + clg->a3 < rnd) {
252 /* loss_gilb_ell - Gilbert-Elliot model loss generator
253 * Generates losses according to the Gilbert-Elliot loss model or
254 * its special cases (Gilbert or Simple Gilbert)
256 * Makes a comparison between random number and the transition
257 * probabilities outgoing from the current state, then decides the
258 * next state. A second random number is extracted and the comparison
259 * with the loss probability of the current state decides if the next
260 * packet will be transmitted or lost.
262 static bool loss_gilb_ell(struct netem_sched_data *q)
264 struct clgstate *clg = &q->clg;
266 switch (clg->state) {
268 if (net_random() < clg->a1)
270 if (net_random() < clg->a4)
273 if (net_random() < clg->a2)
275 if (clg->a3 > net_random())
282 static bool loss_event(struct netem_sched_data *q)
284 switch (q->loss_model) {
286 /* Random packet drop 0 => none, ~0 => all */
287 return q->loss && q->loss >= get_crandom(&q->loss_cor);
290 /* 4state loss model algorithm (used also for GI model)
291 * Extracts a value from the markov 4 state loss generator,
292 * if it is 1 drops a packet and if needed writes the event in
295 return loss_4state(q);
298 /* Gilbert-Elliot loss model algorithm
299 * Extracts a value from the Gilbert-Elliot loss generator,
300 * if it is 1 drops a packet and if needed writes the event in
303 return loss_gilb_ell(q);
306 return false; /* not reached */
310 /* tabledist - return a pseudo-randomly distributed value with mean mu and
311 * std deviation sigma. Uses table lookup to approximate the desired
312 * distribution, and a uniformly-distributed pseudo-random source.
314 static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
315 struct crndstate *state,
316 const struct disttable *dist)
325 rnd = get_crandom(state);
327 /* default uniform distribution */
329 return (rnd % (2*sigma)) - sigma + mu;
331 t = dist->table[rnd % dist->size];
332 x = (sigma % NETEM_DIST_SCALE) * t;
334 x += NETEM_DIST_SCALE/2;
336 x -= NETEM_DIST_SCALE/2;
338 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
341 static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q)
345 len += q->packet_overhead;
348 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
350 if (len > cells * q->cell_size) /* extra cell needed for remainder */
352 len = cells * (q->cell_size + q->cell_overhead);
355 ticks = (u64)len * NSEC_PER_SEC;
357 do_div(ticks, q->rate);
358 return PSCHED_NS2TICKS(ticks);
361 static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
363 struct netem_sched_data *q = qdisc_priv(sch);
364 psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
365 struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
371 skb = netem_rb_to_skb(parent);
372 if (tnext >= netem_skb_cb(skb)->time_to_send)
373 p = &parent->rb_right;
375 p = &parent->rb_left;
377 rb_link_node(netem_rb_node(nskb), parent, p);
378 rb_insert_color(netem_rb_node(nskb), &q->t_root);
383 * Insert one skb into qdisc.
384 * Note: parent depends on return value to account for queue length.
385 * NET_XMIT_DROP: queue length didn't change.
386 * NET_XMIT_SUCCESS: one skb was queued.
388 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
390 struct netem_sched_data *q = qdisc_priv(sch);
391 /* We don't fill cb now as skb_unshare() may invalidate it */
392 struct netem_skb_cb *cb;
393 struct sk_buff *skb2;
396 /* Random duplication */
397 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
402 if (q->ecn && INET_ECN_set_ce(skb))
403 sch->qstats.drops++; /* mark packet */
410 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
413 /* If a delay is expected, orphan the skb. (orphaning usually takes
414 * place at TX completion time, so _before_ the link transit delay)
416 if (q->latency || q->jitter)
417 skb_orphan_partial(skb);
420 * If we need to duplicate packet, then re-insert at top of the
421 * qdisc tree, since parent queuer expects that only one
422 * skb will be queued.
424 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
425 struct Qdisc *rootq = qdisc_root(sch);
426 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
429 qdisc_enqueue_root(skb2, rootq);
430 q->duplicate = dupsave;
434 * Randomized packet corruption.
435 * Make copy if needed since we are modifying
436 * If packet is going to be hardware checksummed, then
437 * do it now in software before we mangle it.
439 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
440 if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
441 (skb->ip_summed == CHECKSUM_PARTIAL &&
442 skb_checksum_help(skb)))
443 return qdisc_drop(skb, sch);
445 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
448 if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
449 return qdisc_reshape_fail(skb, sch);
451 sch->qstats.backlog += qdisc_pkt_len(skb);
453 cb = netem_skb_cb(skb);
454 if (q->gap == 0 || /* not doing reordering */
455 q->counter < q->gap - 1 || /* inside last reordering gap */
456 q->reorder < get_crandom(&q->reorder_cor)) {
458 psched_tdiff_t delay;
460 delay = tabledist(q->latency, q->jitter,
461 &q->delay_cor, q->delay_dist);
463 now = psched_get_time();
466 struct sk_buff *last;
468 if (!skb_queue_empty(&sch->q))
469 last = skb_peek_tail(&sch->q);
471 last = netem_rb_to_skb(rb_last(&q->t_root));
474 * Last packet in queue is reference point (now),
475 * calculate this time bonus and subtract
478 delay -= netem_skb_cb(last)->time_to_send - now;
479 delay = max_t(psched_tdiff_t, 0, delay);
480 now = netem_skb_cb(last)->time_to_send;
483 delay += packet_len_2_sched_time(skb->len, q);
486 cb->time_to_send = now + delay;
487 cb->tstamp_save = skb->tstamp;
489 tfifo_enqueue(skb, sch);
492 * Do re-ordering by putting one out of N packets at the front
495 cb->time_to_send = psched_get_time();
498 __skb_queue_head(&sch->q, skb);
499 sch->qstats.requeues++;
502 return NET_XMIT_SUCCESS;
505 static unsigned int netem_drop(struct Qdisc *sch)
507 struct netem_sched_data *q = qdisc_priv(sch);
510 len = qdisc_queue_drop(sch);
513 struct rb_node *p = rb_first(&q->t_root);
516 struct sk_buff *skb = netem_rb_to_skb(p);
518 rb_erase(p, &q->t_root);
522 len = qdisc_pkt_len(skb);
526 if (!len && q->qdisc && q->qdisc->ops->drop)
527 len = q->qdisc->ops->drop(q->qdisc);
534 static struct sk_buff *netem_dequeue(struct Qdisc *sch)
536 struct netem_sched_data *q = qdisc_priv(sch);
540 if (qdisc_is_throttled(sch))
544 skb = __skb_dequeue(&sch->q);
547 sch->qstats.backlog -= qdisc_pkt_len(skb);
548 qdisc_unthrottled(sch);
549 qdisc_bstats_update(sch, skb);
552 p = rb_first(&q->t_root);
554 psched_time_t time_to_send;
556 skb = netem_rb_to_skb(p);
558 /* if more time remaining? */
559 time_to_send = netem_skb_cb(skb)->time_to_send;
560 if (time_to_send <= psched_get_time()) {
561 rb_erase(p, &q->t_root);
566 skb->tstamp = netem_skb_cb(skb)->tstamp_save;
568 #ifdef CONFIG_NET_CLS_ACT
570 * If it's at ingress let's pretend the delay is
571 * from the network (tstamp will be updated).
573 if (G_TC_FROM(skb->tc_verd) & AT_INGRESS)
574 skb->tstamp.tv64 = 0;
578 int err = qdisc_enqueue(skb, q->qdisc);
580 if (unlikely(err != NET_XMIT_SUCCESS)) {
581 if (net_xmit_drop_count(err)) {
583 qdisc_tree_decrease_qlen(sch, 1);
592 skb = q->qdisc->ops->dequeue(q->qdisc);
596 qdisc_watchdog_schedule(&q->watchdog, time_to_send);
600 skb = q->qdisc->ops->dequeue(q->qdisc);
607 static void netem_reset(struct Qdisc *sch)
609 struct netem_sched_data *q = qdisc_priv(sch);
611 qdisc_reset_queue(sch);
613 qdisc_reset(q->qdisc);
614 qdisc_watchdog_cancel(&q->watchdog);
617 static void dist_free(struct disttable *d)
620 if (is_vmalloc_addr(d))
628 * Distribution data is a variable size payload containing
629 * signed 16 bit values.
631 static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
633 struct netem_sched_data *q = qdisc_priv(sch);
634 size_t n = nla_len(attr)/sizeof(__s16);
635 const __s16 *data = nla_data(attr);
636 spinlock_t *root_lock;
641 if (n > NETEM_DIST_MAX)
644 s = sizeof(struct disttable) + n * sizeof(s16);
645 d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN);
652 for (i = 0; i < n; i++)
653 d->table[i] = data[i];
655 root_lock = qdisc_root_sleeping_lock(sch);
657 spin_lock_bh(root_lock);
658 swap(q->delay_dist, d);
659 spin_unlock_bh(root_lock);
665 static void get_correlation(struct Qdisc *sch, const struct nlattr *attr)
667 struct netem_sched_data *q = qdisc_priv(sch);
668 const struct tc_netem_corr *c = nla_data(attr);
670 init_crandom(&q->delay_cor, c->delay_corr);
671 init_crandom(&q->loss_cor, c->loss_corr);
672 init_crandom(&q->dup_cor, c->dup_corr);
675 static void get_reorder(struct Qdisc *sch, const struct nlattr *attr)
677 struct netem_sched_data *q = qdisc_priv(sch);
678 const struct tc_netem_reorder *r = nla_data(attr);
680 q->reorder = r->probability;
681 init_crandom(&q->reorder_cor, r->correlation);
684 static void get_corrupt(struct Qdisc *sch, const struct nlattr *attr)
686 struct netem_sched_data *q = qdisc_priv(sch);
687 const struct tc_netem_corrupt *r = nla_data(attr);
689 q->corrupt = r->probability;
690 init_crandom(&q->corrupt_cor, r->correlation);
693 static void get_rate(struct Qdisc *sch, const struct nlattr *attr)
695 struct netem_sched_data *q = qdisc_priv(sch);
696 const struct tc_netem_rate *r = nla_data(attr);
699 q->packet_overhead = r->packet_overhead;
700 q->cell_size = r->cell_size;
702 q->cell_size_reciprocal = reciprocal_value(q->cell_size);
703 q->cell_overhead = r->cell_overhead;
706 static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)
708 struct netem_sched_data *q = qdisc_priv(sch);
709 const struct nlattr *la;
712 nla_for_each_nested(la, attr, rem) {
713 u16 type = nla_type(la);
716 case NETEM_LOSS_GI: {
717 const struct tc_netem_gimodel *gi = nla_data(la);
719 if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
720 pr_info("netem: incorrect gi model size\n");
724 q->loss_model = CLG_4_STATES;
735 case NETEM_LOSS_GE: {
736 const struct tc_netem_gemodel *ge = nla_data(la);
738 if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
739 pr_info("netem: incorrect ge model size\n");
743 q->loss_model = CLG_GILB_ELL;
753 pr_info("netem: unknown loss type %u\n", type);
761 static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
762 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
763 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
764 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
765 [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) },
766 [TCA_NETEM_LOSS] = { .type = NLA_NESTED },
767 [TCA_NETEM_ECN] = { .type = NLA_U32 },
770 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
771 const struct nla_policy *policy, int len)
773 int nested_len = nla_len(nla) - NLA_ALIGN(len);
775 if (nested_len < 0) {
776 pr_info("netem: invalid attributes len %d\n", nested_len);
780 if (nested_len >= nla_attr_size(0))
781 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
784 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
788 /* Parse netlink message to set options */
789 static int netem_change(struct Qdisc *sch, struct nlattr *opt)
791 struct netem_sched_data *q = qdisc_priv(sch);
792 struct nlattr *tb[TCA_NETEM_MAX + 1];
793 struct tc_netem_qopt *qopt;
799 qopt = nla_data(opt);
800 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
804 sch->limit = qopt->limit;
806 q->latency = qopt->latency;
807 q->jitter = qopt->jitter;
808 q->limit = qopt->limit;
811 q->loss = qopt->loss;
812 q->duplicate = qopt->duplicate;
814 /* for compatibility with earlier versions.
815 * if gap is set, need to assume 100% probability
820 if (tb[TCA_NETEM_CORR])
821 get_correlation(sch, tb[TCA_NETEM_CORR]);
823 if (tb[TCA_NETEM_DELAY_DIST]) {
824 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
829 if (tb[TCA_NETEM_REORDER])
830 get_reorder(sch, tb[TCA_NETEM_REORDER]);
832 if (tb[TCA_NETEM_CORRUPT])
833 get_corrupt(sch, tb[TCA_NETEM_CORRUPT]);
835 if (tb[TCA_NETEM_RATE])
836 get_rate(sch, tb[TCA_NETEM_RATE]);
838 if (tb[TCA_NETEM_ECN])
839 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
841 q->loss_model = CLG_RANDOM;
842 if (tb[TCA_NETEM_LOSS])
843 ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]);
848 static int netem_init(struct Qdisc *sch, struct nlattr *opt)
850 struct netem_sched_data *q = qdisc_priv(sch);
856 qdisc_watchdog_init(&q->watchdog, sch);
858 q->loss_model = CLG_RANDOM;
859 ret = netem_change(sch, opt);
861 pr_info("netem: change failed\n");
865 static void netem_destroy(struct Qdisc *sch)
867 struct netem_sched_data *q = qdisc_priv(sch);
869 qdisc_watchdog_cancel(&q->watchdog);
871 qdisc_destroy(q->qdisc);
872 dist_free(q->delay_dist);
875 static int dump_loss_model(const struct netem_sched_data *q,
880 nest = nla_nest_start(skb, TCA_NETEM_LOSS);
882 goto nla_put_failure;
884 switch (q->loss_model) {
886 /* legacy loss model */
887 nla_nest_cancel(skb, nest);
888 return 0; /* no data */
891 struct tc_netem_gimodel gi = {
899 if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
900 goto nla_put_failure;
904 struct tc_netem_gemodel ge = {
911 if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
912 goto nla_put_failure;
917 nla_nest_end(skb, nest);
921 nla_nest_cancel(skb, nest);
925 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
927 const struct netem_sched_data *q = qdisc_priv(sch);
928 struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
929 struct tc_netem_qopt qopt;
930 struct tc_netem_corr cor;
931 struct tc_netem_reorder reorder;
932 struct tc_netem_corrupt corrupt;
933 struct tc_netem_rate rate;
935 qopt.latency = q->latency;
936 qopt.jitter = q->jitter;
937 qopt.limit = q->limit;
940 qopt.duplicate = q->duplicate;
941 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
942 goto nla_put_failure;
944 cor.delay_corr = q->delay_cor.rho;
945 cor.loss_corr = q->loss_cor.rho;
946 cor.dup_corr = q->dup_cor.rho;
947 if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
948 goto nla_put_failure;
950 reorder.probability = q->reorder;
951 reorder.correlation = q->reorder_cor.rho;
952 if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
953 goto nla_put_failure;
955 corrupt.probability = q->corrupt;
956 corrupt.correlation = q->corrupt_cor.rho;
957 if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
958 goto nla_put_failure;
961 rate.packet_overhead = q->packet_overhead;
962 rate.cell_size = q->cell_size;
963 rate.cell_overhead = q->cell_overhead;
964 if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
965 goto nla_put_failure;
967 if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
968 goto nla_put_failure;
970 if (dump_loss_model(q, skb) != 0)
971 goto nla_put_failure;
973 return nla_nest_end(skb, nla);
976 nlmsg_trim(skb, nla);
980 static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
981 struct sk_buff *skb, struct tcmsg *tcm)
983 struct netem_sched_data *q = qdisc_priv(sch);
985 if (cl != 1 || !q->qdisc) /* only one class */
988 tcm->tcm_handle |= TC_H_MIN(1);
989 tcm->tcm_info = q->qdisc->handle;
994 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
997 struct netem_sched_data *q = qdisc_priv(sch);
1003 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
1006 sch_tree_unlock(sch);
1011 static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1013 struct netem_sched_data *q = qdisc_priv(sch);
1017 static unsigned long netem_get(struct Qdisc *sch, u32 classid)
1022 static void netem_put(struct Qdisc *sch, unsigned long arg)
1026 static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1028 if (!walker->stop) {
1029 if (walker->count >= walker->skip)
1030 if (walker->fn(sch, 1, walker) < 0) {
1038 static const struct Qdisc_class_ops netem_class_ops = {
1039 .graft = netem_graft,
1044 .dump = netem_dump_class,
1047 static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
1049 .cl_ops = &netem_class_ops,
1050 .priv_size = sizeof(struct netem_sched_data),
1051 .enqueue = netem_enqueue,
1052 .dequeue = netem_dequeue,
1053 .peek = qdisc_peek_dequeued,
1056 .reset = netem_reset,
1057 .destroy = netem_destroy,
1058 .change = netem_change,
1060 .owner = THIS_MODULE,
1064 static int __init netem_module_init(void)
1066 pr_info("netem: version " VERSION "\n");
1067 return register_qdisc(&netem_qdisc_ops);
1069 static void __exit netem_module_exit(void)
1071 unregister_qdisc(&netem_qdisc_ops);
1073 module_init(netem_module_init)
1074 module_exit(netem_module_exit)
1075 MODULE_LICENSE("GPL");