2 * net/sched/sch_api.c Packet scheduler API.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/errno.h>
23 #include <linux/skbuff.h>
24 #include <linux/init.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/kmod.h>
28 #include <linux/list.h>
29 #include <linux/hrtimer.h>
30 #include <linux/lockdep.h>
32 #include <net/net_namespace.h>
34 #include <net/netlink.h>
35 #include <net/pkt_sched.h>
37 static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n, u32 clid,
38 struct Qdisc *old, struct Qdisc *new);
39 static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
40 struct Qdisc *q, unsigned long cl, int event);
47 This file consists of two interrelated parts:
49 1. queueing disciplines manager frontend.
50 2. traffic classes manager frontend.
52 Generally, queueing discipline ("qdisc") is a black box,
53 which is able to enqueue packets and to dequeue them (when
54 device is ready to send something) in order and at times
55 determined by algorithm hidden in it.
57 qdisc's are divided to two categories:
58 - "queues", which have no internal structure visible from outside.
59 - "schedulers", which split all the packets to "traffic classes",
60 using "packet classifiers" (look at cls_api.c)
62 In turn, classes may have child qdiscs (as rule, queues)
63 attached to them etc. etc. etc.
65 The goal of the routines in this file is to translate
66 information supplied by user in the form of handles
67 to more intelligible for kernel form, to make some sanity
68 checks and part of work, which is common to all qdiscs
69 and to provide rtnetlink notifications.
71 All real intelligent work is done inside qdisc modules.
75 Every discipline has two major routines: enqueue and dequeue.
79 dequeue usually returns a skb to send. It is allowed to return NULL,
80 but it does not mean that queue is empty, it just means that
81 discipline does not want to send anything this time.
82 Queue is really empty if q->q.qlen == 0.
83 For complicated disciplines with multiple queues q->q is not
84 real packet queue, but however q->q.qlen must be valid.
88 enqueue returns 0, if packet was enqueued successfully.
89 If packet (this one or another one) was dropped, it returns
91 NET_XMIT_DROP - this packet dropped
92 Expected action: do not backoff, but wait until queue will clear.
93 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
94 Expected action: backoff or ignore
95 NET_XMIT_POLICED - dropped by police.
96 Expected action: backoff or error to real-time apps.
102 requeues once dequeued packet. It is used for non-standard or
103 just buggy devices, which can defer output even if netif_queue_stopped()=0.
107 returns qdisc to initial state: purge all buffers, clear all
108 timers, counters (except for statistics) etc.
112 initializes newly created qdisc.
116 destroys resources allocated by init and during lifetime of qdisc.
120 changes qdisc parameters.
123 /* Protects list of registered TC modules. It is pure SMP lock. */
124 static DEFINE_RWLOCK(qdisc_mod_lock);
127 /************************************************
128 * Queueing disciplines manipulation. *
129 ************************************************/
132 /* The list of all installed queueing disciplines. */
134 static struct Qdisc_ops *qdisc_base;
136 /* Register/uregister queueing discipline */
138 int register_qdisc(struct Qdisc_ops *qops)
140 struct Qdisc_ops *q, **qp;
143 write_lock(&qdisc_mod_lock);
144 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
145 if (!strcmp(qops->id, q->id))
148 if (qops->enqueue == NULL)
149 qops->enqueue = noop_qdisc_ops.enqueue;
150 if (qops->requeue == NULL)
151 qops->requeue = noop_qdisc_ops.requeue;
152 if (qops->dequeue == NULL)
153 qops->dequeue = noop_qdisc_ops.dequeue;
159 write_unlock(&qdisc_mod_lock);
162 EXPORT_SYMBOL(register_qdisc);
164 int unregister_qdisc(struct Qdisc_ops *qops)
166 struct Qdisc_ops *q, **qp;
169 write_lock(&qdisc_mod_lock);
170 for (qp = &qdisc_base; (q=*qp)!=NULL; qp = &q->next)
178 write_unlock(&qdisc_mod_lock);
181 EXPORT_SYMBOL(unregister_qdisc);
183 /* We know handle. Find qdisc among all qdisc's attached to device
184 (root qdisc, all its children, children of children etc.)
187 struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
191 if (!(root->flags & TCQ_F_BUILTIN) &&
192 root->handle == handle)
195 list_for_each_entry(q, &root->list, list) {
196 if (q->handle == handle)
203 * This lock is needed until some qdiscs stop calling qdisc_tree_decrease_qlen()
204 * without rtnl_lock(); currently hfsc_dequeue(), netem_dequeue(), tbf_dequeue()
206 static DEFINE_SPINLOCK(qdisc_list_lock);
208 static void qdisc_list_add(struct Qdisc *q)
210 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
211 spin_lock_bh(&qdisc_list_lock);
212 list_add_tail(&q->list, &qdisc_root_sleeping(q)->list);
213 spin_unlock_bh(&qdisc_list_lock);
217 void qdisc_list_del(struct Qdisc *q)
219 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
220 spin_lock_bh(&qdisc_list_lock);
222 spin_unlock_bh(&qdisc_list_lock);
225 EXPORT_SYMBOL(qdisc_list_del);
227 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
232 spin_lock_bh(&qdisc_list_lock);
234 for (i = 0; i < dev->num_tx_queues; i++) {
235 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
236 struct Qdisc *txq_root = txq->qdisc_sleeping;
238 q = qdisc_match_from_root(txq_root, handle);
243 q = qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle);
246 spin_unlock_bh(&qdisc_list_lock);
251 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
255 const struct Qdisc_class_ops *cops = p->ops->cl_ops;
259 cl = cops->get(p, classid);
263 leaf = cops->leaf(p, cl);
268 /* Find queueing discipline by name */
270 static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
272 struct Qdisc_ops *q = NULL;
275 read_lock(&qdisc_mod_lock);
276 for (q = qdisc_base; q; q = q->next) {
277 if (nla_strcmp(kind, q->id) == 0) {
278 if (!try_module_get(q->owner))
283 read_unlock(&qdisc_mod_lock);
288 static struct qdisc_rate_table *qdisc_rtab_list;
290 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
292 struct qdisc_rate_table *rtab;
294 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
295 if (memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) == 0) {
301 if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
302 nla_len(tab) != TC_RTAB_SIZE)
305 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
309 memcpy(rtab->data, nla_data(tab), 1024);
310 rtab->next = qdisc_rtab_list;
311 qdisc_rtab_list = rtab;
315 EXPORT_SYMBOL(qdisc_get_rtab);
317 void qdisc_put_rtab(struct qdisc_rate_table *tab)
319 struct qdisc_rate_table *rtab, **rtabp;
321 if (!tab || --tab->refcnt)
324 for (rtabp = &qdisc_rtab_list; (rtab=*rtabp) != NULL; rtabp = &rtab->next) {
332 EXPORT_SYMBOL(qdisc_put_rtab);
334 static LIST_HEAD(qdisc_stab_list);
335 static DEFINE_SPINLOCK(qdisc_stab_lock);
337 static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
338 [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) },
339 [TCA_STAB_DATA] = { .type = NLA_BINARY },
342 static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
344 struct nlattr *tb[TCA_STAB_MAX + 1];
345 struct qdisc_size_table *stab;
346 struct tc_sizespec *s;
347 unsigned int tsize = 0;
351 err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy);
354 if (!tb[TCA_STAB_BASE])
355 return ERR_PTR(-EINVAL);
357 s = nla_data(tb[TCA_STAB_BASE]);
360 if (!tb[TCA_STAB_DATA])
361 return ERR_PTR(-EINVAL);
362 tab = nla_data(tb[TCA_STAB_DATA]);
363 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
366 if (!s || tsize != s->tsize || (!tab && tsize > 0))
367 return ERR_PTR(-EINVAL);
369 spin_lock(&qdisc_stab_lock);
371 list_for_each_entry(stab, &qdisc_stab_list, list) {
372 if (memcmp(&stab->szopts, s, sizeof(*s)))
374 if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
377 spin_unlock(&qdisc_stab_lock);
381 spin_unlock(&qdisc_stab_lock);
383 stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
385 return ERR_PTR(-ENOMEM);
390 memcpy(stab->data, tab, tsize * sizeof(u16));
392 spin_lock(&qdisc_stab_lock);
393 list_add_tail(&stab->list, &qdisc_stab_list);
394 spin_unlock(&qdisc_stab_lock);
399 void qdisc_put_stab(struct qdisc_size_table *tab)
404 spin_lock(&qdisc_stab_lock);
406 if (--tab->refcnt == 0) {
407 list_del(&tab->list);
411 spin_unlock(&qdisc_stab_lock);
413 EXPORT_SYMBOL(qdisc_put_stab);
415 static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
419 nest = nla_nest_start(skb, TCA_STAB);
420 NLA_PUT(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts);
421 nla_nest_end(skb, nest);
429 void qdisc_calculate_pkt_len(struct sk_buff *skb, struct qdisc_size_table *stab)
433 pkt_len = skb->len + stab->szopts.overhead;
434 if (unlikely(!stab->szopts.tsize))
437 slot = pkt_len + stab->szopts.cell_align;
438 if (unlikely(slot < 0))
441 slot >>= stab->szopts.cell_log;
442 if (likely(slot < stab->szopts.tsize))
443 pkt_len = stab->data[slot];
445 pkt_len = stab->data[stab->szopts.tsize - 1] *
446 (slot / stab->szopts.tsize) +
447 stab->data[slot % stab->szopts.tsize];
449 pkt_len <<= stab->szopts.size_log;
451 if (unlikely(pkt_len < 1))
453 qdisc_skb_cb(skb)->pkt_len = pkt_len;
455 EXPORT_SYMBOL(qdisc_calculate_pkt_len);
457 static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
459 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
462 wd->qdisc->flags &= ~TCQ_F_THROTTLED;
464 __netif_schedule(qdisc_root(wd->qdisc));
466 return HRTIMER_NORESTART;
469 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
471 hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
472 wd->timer.function = qdisc_watchdog;
475 EXPORT_SYMBOL(qdisc_watchdog_init);
477 void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
481 if (test_bit(__QDISC_STATE_DEACTIVATED,
482 &qdisc_root_sleeping(wd->qdisc)->state))
485 wd->qdisc->flags |= TCQ_F_THROTTLED;
486 time = ktime_set(0, 0);
487 time = ktime_add_ns(time, PSCHED_US2NS(expires));
488 hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS);
490 EXPORT_SYMBOL(qdisc_watchdog_schedule);
492 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
494 hrtimer_cancel(&wd->timer);
495 wd->qdisc->flags &= ~TCQ_F_THROTTLED;
497 EXPORT_SYMBOL(qdisc_watchdog_cancel);
499 static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
501 unsigned int size = n * sizeof(struct hlist_head), i;
502 struct hlist_head *h;
504 if (size <= PAGE_SIZE)
505 h = kmalloc(size, GFP_KERNEL);
507 h = (struct hlist_head *)
508 __get_free_pages(GFP_KERNEL, get_order(size));
511 for (i = 0; i < n; i++)
512 INIT_HLIST_HEAD(&h[i]);
517 static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n)
519 unsigned int size = n * sizeof(struct hlist_head);
521 if (size <= PAGE_SIZE)
524 free_pages((unsigned long)h, get_order(size));
527 void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
529 struct Qdisc_class_common *cl;
530 struct hlist_node *n, *next;
531 struct hlist_head *nhash, *ohash;
532 unsigned int nsize, nmask, osize;
535 /* Rehash when load factor exceeds 0.75 */
536 if (clhash->hashelems * 4 <= clhash->hashsize * 3)
538 nsize = clhash->hashsize * 2;
540 nhash = qdisc_class_hash_alloc(nsize);
544 ohash = clhash->hash;
545 osize = clhash->hashsize;
548 for (i = 0; i < osize; i++) {
549 hlist_for_each_entry_safe(cl, n, next, &ohash[i], hnode) {
550 h = qdisc_class_hash(cl->classid, nmask);
551 hlist_add_head(&cl->hnode, &nhash[h]);
554 clhash->hash = nhash;
555 clhash->hashsize = nsize;
556 clhash->hashmask = nmask;
557 sch_tree_unlock(sch);
559 qdisc_class_hash_free(ohash, osize);
561 EXPORT_SYMBOL(qdisc_class_hash_grow);
563 int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
565 unsigned int size = 4;
567 clhash->hash = qdisc_class_hash_alloc(size);
568 if (clhash->hash == NULL)
570 clhash->hashsize = size;
571 clhash->hashmask = size - 1;
572 clhash->hashelems = 0;
575 EXPORT_SYMBOL(qdisc_class_hash_init);
577 void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
579 qdisc_class_hash_free(clhash->hash, clhash->hashsize);
581 EXPORT_SYMBOL(qdisc_class_hash_destroy);
583 void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
584 struct Qdisc_class_common *cl)
588 INIT_HLIST_NODE(&cl->hnode);
589 h = qdisc_class_hash(cl->classid, clhash->hashmask);
590 hlist_add_head(&cl->hnode, &clhash->hash[h]);
593 EXPORT_SYMBOL(qdisc_class_hash_insert);
595 void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
596 struct Qdisc_class_common *cl)
598 hlist_del(&cl->hnode);
601 EXPORT_SYMBOL(qdisc_class_hash_remove);
603 /* Allocate an unique handle from space managed by kernel */
605 static u32 qdisc_alloc_handle(struct net_device *dev)
608 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
611 autohandle += TC_H_MAKE(0x10000U, 0);
612 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
613 autohandle = TC_H_MAKE(0x80000000U, 0);
614 } while (qdisc_lookup(dev, autohandle) && --i > 0);
616 return i>0 ? autohandle : 0;
619 /* Attach toplevel qdisc to device queue. */
621 static struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
624 struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
625 spinlock_t *root_lock;
627 root_lock = qdisc_lock(oqdisc);
628 spin_lock_bh(root_lock);
630 /* Prune old scheduler */
631 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
634 /* ... and graft new one */
637 dev_queue->qdisc_sleeping = qdisc;
638 rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
640 spin_unlock_bh(root_lock);
645 void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
647 const struct Qdisc_class_ops *cops;
653 while ((parentid = sch->parent)) {
654 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
657 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
659 WARN_ON(parentid != TC_H_ROOT);
662 cops = sch->ops->cl_ops;
663 if (cops->qlen_notify) {
664 cl = cops->get(sch, parentid);
665 cops->qlen_notify(sch, cl);
671 EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
673 static void notify_and_destroy(struct sk_buff *skb, struct nlmsghdr *n, u32 clid,
674 struct Qdisc *old, struct Qdisc *new)
677 qdisc_notify(skb, n, clid, old, new);
683 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
686 * When appropriate send a netlink notification using 'skb'
689 * On success, destroy old qdisc.
692 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
693 struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
694 struct Qdisc *new, struct Qdisc *old)
696 struct Qdisc *q = old;
699 if (parent == NULL) {
700 unsigned int i, num_q, ingress;
703 num_q = dev->num_tx_queues;
704 if ((q && q->flags & TCQ_F_INGRESS) ||
705 (new && new->flags & TCQ_F_INGRESS)) {
710 if (dev->flags & IFF_UP)
713 for (i = 0; i < num_q; i++) {
714 struct netdev_queue *dev_queue = &dev->rx_queue;
717 dev_queue = netdev_get_tx_queue(dev, i);
719 old = dev_graft_qdisc(dev_queue, new);
721 atomic_inc(&new->refcnt);
723 notify_and_destroy(skb, n, classid, old, new);
726 if (dev->flags & IFF_UP)
729 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
734 unsigned long cl = cops->get(parent, classid);
736 err = cops->graft(parent, cl, new, &old);
737 cops->put(parent, cl);
741 notify_and_destroy(skb, n, classid, old, new);
746 /* lockdep annotation is needed for ingress; egress gets it only for name */
747 static struct lock_class_key qdisc_tx_lock;
748 static struct lock_class_key qdisc_rx_lock;
751 Allocate and initialize new qdisc.
753 Parameters are passed via opt.
756 static struct Qdisc *
757 qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
758 u32 parent, u32 handle, struct nlattr **tca, int *errp)
761 struct nlattr *kind = tca[TCA_KIND];
763 struct Qdisc_ops *ops;
764 struct qdisc_size_table *stab;
766 ops = qdisc_lookup_ops(kind);
767 #ifdef CONFIG_MODULES
768 if (ops == NULL && kind != NULL) {
770 if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
771 /* We dropped the RTNL semaphore in order to
772 * perform the module load. So, even if we
773 * succeeded in loading the module we have to
774 * tell the caller to replay the request. We
775 * indicate this using -EAGAIN.
776 * We replay the request because the device may
777 * go away in the mean time.
780 request_module("sch_%s", name);
782 ops = qdisc_lookup_ops(kind);
784 /* We will try again qdisc_lookup_ops,
785 * so don't keep a reference.
787 module_put(ops->owner);
799 sch = qdisc_alloc(dev_queue, ops);
805 sch->parent = parent;
807 if (handle == TC_H_INGRESS) {
808 sch->flags |= TCQ_F_INGRESS;
809 handle = TC_H_MAKE(TC_H_INGRESS, 0);
810 lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock);
813 handle = qdisc_alloc_handle(dev);
818 lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
821 sch->handle = handle;
823 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
825 stab = qdisc_get_stab(tca[TCA_STAB]);
833 spinlock_t *root_lock;
835 if ((sch->parent != TC_H_ROOT) &&
836 !(sch->flags & TCQ_F_INGRESS))
837 root_lock = qdisc_root_sleeping_lock(sch);
839 root_lock = qdisc_lock(sch);
841 err = gen_new_estimator(&sch->bstats, &sch->rate_est,
842 root_lock, tca[TCA_RATE]);
845 * Any broken qdiscs that would require
846 * a ops->reset() here? The qdisc was never
847 * in action so it shouldn't be necessary.
860 qdisc_put_stab(sch->stab);
862 kfree((char *) sch - sch->padded);
864 module_put(ops->owner);
870 static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
872 struct qdisc_size_table *stab = NULL;
875 if (tca[TCA_OPTIONS]) {
876 if (sch->ops->change == NULL)
878 err = sch->ops->change(sch, tca[TCA_OPTIONS]);
884 stab = qdisc_get_stab(tca[TCA_STAB]);
886 return PTR_ERR(stab);
889 qdisc_put_stab(sch->stab);
893 gen_replace_estimator(&sch->bstats, &sch->rate_est,
894 qdisc_root_sleeping_lock(sch),
899 struct check_loop_arg
901 struct qdisc_walker w;
906 static int check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w);
908 static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
910 struct check_loop_arg arg;
912 if (q->ops->cl_ops == NULL)
915 arg.w.stop = arg.w.skip = arg.w.count = 0;
916 arg.w.fn = check_loop_fn;
919 q->ops->cl_ops->walk(q, &arg.w);
920 return arg.w.stop ? -ELOOP : 0;
924 check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
927 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
928 struct check_loop_arg *arg = (struct check_loop_arg *)w;
930 leaf = cops->leaf(q, cl);
932 if (leaf == arg->p || arg->depth > 7)
934 return check_loop(leaf, arg->p, arg->depth + 1);
943 static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
945 struct net *net = sock_net(skb->sk);
946 struct tcmsg *tcm = NLMSG_DATA(n);
947 struct nlattr *tca[TCA_MAX + 1];
948 struct net_device *dev;
949 u32 clid = tcm->tcm_parent;
950 struct Qdisc *q = NULL;
951 struct Qdisc *p = NULL;
954 if (net != &init_net)
957 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
960 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
965 if (clid != TC_H_ROOT) {
966 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
967 if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
969 q = qdisc_leaf(p, clid);
970 } else { /* ingress */
971 q = dev->rx_queue.qdisc_sleeping;
974 struct netdev_queue *dev_queue;
975 dev_queue = netdev_get_tx_queue(dev, 0);
976 q = dev_queue->qdisc_sleeping;
981 if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
984 if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL)
988 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
991 if (n->nlmsg_type == RTM_DELQDISC) {
996 if ((err = qdisc_graft(dev, p, skb, n, clid, NULL, q)) != 0)
999 qdisc_notify(skb, n, clid, NULL, q);
1005 Create/change qdisc.
1008 static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1010 struct net *net = sock_net(skb->sk);
1012 struct nlattr *tca[TCA_MAX + 1];
1013 struct net_device *dev;
1015 struct Qdisc *q, *p;
1018 if (net != &init_net)
1022 /* Reinit, just in case something touches this. */
1023 tcm = NLMSG_DATA(n);
1024 clid = tcm->tcm_parent;
1027 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
1030 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1035 if (clid != TC_H_ROOT) {
1036 if (clid != TC_H_INGRESS) {
1037 if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
1039 q = qdisc_leaf(p, clid);
1040 } else { /*ingress */
1041 q = dev->rx_queue.qdisc_sleeping;
1044 struct netdev_queue *dev_queue;
1045 dev_queue = netdev_get_tx_queue(dev, 0);
1046 q = dev_queue->qdisc_sleeping;
1049 /* It may be default qdisc, ignore it */
1050 if (q && q->handle == 0)
1053 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1054 if (tcm->tcm_handle) {
1055 if (q && !(n->nlmsg_flags&NLM_F_REPLACE))
1057 if (TC_H_MIN(tcm->tcm_handle))
1059 if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL)
1060 goto create_n_graft;
1061 if (n->nlmsg_flags&NLM_F_EXCL)
1063 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1066 (p && check_loop(q, p, 0)))
1068 atomic_inc(&q->refcnt);
1072 goto create_n_graft;
1074 /* This magic test requires explanation.
1076 * We know, that some child q is already
1077 * attached to this parent and have choice:
1078 * either to change it or to create/graft new one.
1080 * 1. We are allowed to create/graft only
1081 * if CREATE and REPLACE flags are set.
1083 * 2. If EXCL is set, requestor wanted to say,
1084 * that qdisc tcm_handle is not expected
1085 * to exist, so that we choose create/graft too.
1087 * 3. The last case is when no flags are set.
1088 * Alas, it is sort of hole in API, we
1089 * cannot decide what to do unambiguously.
1090 * For now we select create/graft, if
1091 * user gave KIND, which does not match existing.
1093 if ((n->nlmsg_flags&NLM_F_CREATE) &&
1094 (n->nlmsg_flags&NLM_F_REPLACE) &&
1095 ((n->nlmsg_flags&NLM_F_EXCL) ||
1097 nla_strcmp(tca[TCA_KIND], q->ops->id))))
1098 goto create_n_graft;
1102 if (!tcm->tcm_handle)
1104 q = qdisc_lookup(dev, tcm->tcm_handle);
1107 /* Change qdisc parameters */
1110 if (n->nlmsg_flags&NLM_F_EXCL)
1112 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1114 err = qdisc_change(q, tca);
1116 qdisc_notify(skb, n, clid, NULL, q);
1120 if (!(n->nlmsg_flags&NLM_F_CREATE))
1122 if (clid == TC_H_INGRESS)
1123 q = qdisc_create(dev, &dev->rx_queue,
1124 tcm->tcm_parent, tcm->tcm_parent,
1127 q = qdisc_create(dev, netdev_get_tx_queue(dev, 0),
1128 tcm->tcm_parent, tcm->tcm_handle,
1137 err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
1147 static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
1148 u32 pid, u32 seq, u16 flags, int event)
1151 struct nlmsghdr *nlh;
1152 unsigned char *b = skb_tail_pointer(skb);
1155 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
1156 tcm = NLMSG_DATA(nlh);
1157 tcm->tcm_family = AF_UNSPEC;
1160 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1161 tcm->tcm_parent = clid;
1162 tcm->tcm_handle = q->handle;
1163 tcm->tcm_info = atomic_read(&q->refcnt);
1164 NLA_PUT_STRING(skb, TCA_KIND, q->ops->id);
1165 if (q->ops->dump && q->ops->dump(q, skb) < 0)
1166 goto nla_put_failure;
1167 q->qstats.qlen = q->q.qlen;
1169 if (q->stab && qdisc_dump_stab(skb, q->stab) < 0)
1170 goto nla_put_failure;
1172 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1173 qdisc_root_sleeping_lock(q), &d) < 0)
1174 goto nla_put_failure;
1176 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
1177 goto nla_put_failure;
1179 if (gnet_stats_copy_basic(&d, &q->bstats) < 0 ||
1180 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
1181 gnet_stats_copy_queue(&d, &q->qstats) < 0)
1182 goto nla_put_failure;
1184 if (gnet_stats_finish_copy(&d) < 0)
1185 goto nla_put_failure;
1187 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1196 static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n,
1197 u32 clid, struct Qdisc *old, struct Qdisc *new)
1199 struct sk_buff *skb;
1200 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
1202 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1206 if (old && old->handle) {
1207 if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0)
1211 if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
1216 return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
1223 static bool tc_qdisc_dump_ignore(struct Qdisc *q)
1225 return (q->flags & TCQ_F_BUILTIN) ? true : false;
1228 static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1229 struct netlink_callback *cb,
1230 int *q_idx_p, int s_q_idx)
1232 int ret = 0, q_idx = *q_idx_p;
1239 if (q_idx < s_q_idx) {
1242 if (!tc_qdisc_dump_ignore(q) &&
1243 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
1244 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1248 list_for_each_entry(q, &root->list, list) {
1249 if (q_idx < s_q_idx) {
1253 if (!tc_qdisc_dump_ignore(q) &&
1254 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
1255 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1268 static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1270 struct net *net = sock_net(skb->sk);
1273 struct net_device *dev;
1275 if (net != &init_net)
1278 s_idx = cb->args[0];
1279 s_q_idx = q_idx = cb->args[1];
1280 read_lock(&dev_base_lock);
1282 for_each_netdev(&init_net, dev) {
1283 struct netdev_queue *dev_queue;
1291 dev_queue = netdev_get_tx_queue(dev, 0);
1292 if (tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, &q_idx, s_q_idx) < 0)
1295 dev_queue = &dev->rx_queue;
1296 if (tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, &q_idx, s_q_idx) < 0)
1304 read_unlock(&dev_base_lock);
1307 cb->args[1] = q_idx;
1314 /************************************************
1315 * Traffic classes manipulation. *
1316 ************************************************/
1320 static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1322 struct net *net = sock_net(skb->sk);
1323 struct netdev_queue *dev_queue;
1324 struct tcmsg *tcm = NLMSG_DATA(n);
1325 struct nlattr *tca[TCA_MAX + 1];
1326 struct net_device *dev;
1327 struct Qdisc *q = NULL;
1328 const struct Qdisc_class_ops *cops;
1329 unsigned long cl = 0;
1330 unsigned long new_cl;
1331 u32 pid = tcm->tcm_parent;
1332 u32 clid = tcm->tcm_handle;
1333 u32 qid = TC_H_MAJ(clid);
1336 if (net != &init_net)
1339 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
1342 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1347 parent == TC_H_UNSPEC - unspecified parent.
1348 parent == TC_H_ROOT - class is root, which has no parent.
1349 parent == X:0 - parent is root class.
1350 parent == X:Y - parent is a node in hierarchy.
1351 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
1353 handle == 0:0 - generate handle from kernel pool.
1354 handle == 0:Y - class is X:Y, where X:0 is qdisc.
1355 handle == X:Y - clear.
1356 handle == X:0 - root class.
1359 /* Step 1. Determine qdisc handle X:0 */
1361 dev_queue = netdev_get_tx_queue(dev, 0);
1362 if (pid != TC_H_ROOT) {
1363 u32 qid1 = TC_H_MAJ(pid);
1366 /* If both majors are known, they must be identical. */
1371 } else if (qid == 0)
1372 qid = dev_queue->qdisc_sleeping->handle;
1374 /* Now qid is genuine qdisc handle consistent
1375 both with parent and child.
1377 TC_H_MAJ(pid) still may be unspecified, complete it now.
1380 pid = TC_H_MAKE(qid, pid);
1383 qid = dev_queue->qdisc_sleeping->handle;
1386 /* OK. Locate qdisc */
1387 if ((q = qdisc_lookup(dev, qid)) == NULL)
1390 /* An check that it supports classes */
1391 cops = q->ops->cl_ops;
1395 /* Now try to get class */
1397 if (pid == TC_H_ROOT)
1400 clid = TC_H_MAKE(qid, clid);
1403 cl = cops->get(q, clid);
1407 if (n->nlmsg_type != RTM_NEWTCLASS || !(n->nlmsg_flags&NLM_F_CREATE))
1410 switch (n->nlmsg_type) {
1413 if (n->nlmsg_flags&NLM_F_EXCL)
1417 err = cops->delete(q, cl);
1419 tclass_notify(skb, n, q, cl, RTM_DELTCLASS);
1422 err = tclass_notify(skb, n, q, cl, RTM_NEWTCLASS);
1431 err = cops->change(q, clid, pid, tca, &new_cl);
1433 tclass_notify(skb, n, q, new_cl, RTM_NEWTCLASS);
1443 static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1445 u32 pid, u32 seq, u16 flags, int event)
1448 struct nlmsghdr *nlh;
1449 unsigned char *b = skb_tail_pointer(skb);
1451 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1453 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
1454 tcm = NLMSG_DATA(nlh);
1455 tcm->tcm_family = AF_UNSPEC;
1456 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1457 tcm->tcm_parent = q->handle;
1458 tcm->tcm_handle = q->handle;
1460 NLA_PUT_STRING(skb, TCA_KIND, q->ops->id);
1461 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1462 goto nla_put_failure;
1464 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1465 qdisc_root_sleeping_lock(q), &d) < 0)
1466 goto nla_put_failure;
1468 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1469 goto nla_put_failure;
1471 if (gnet_stats_finish_copy(&d) < 0)
1472 goto nla_put_failure;
1474 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1483 static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
1484 struct Qdisc *q, unsigned long cl, int event)
1486 struct sk_buff *skb;
1487 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
1489 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1493 if (tc_fill_tclass(skb, q, cl, pid, n->nlmsg_seq, 0, event) < 0) {
1498 return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
1501 struct qdisc_dump_args
1503 struct qdisc_walker w;
1504 struct sk_buff *skb;
1505 struct netlink_callback *cb;
1508 static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg)
1510 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
1512 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).pid,
1513 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS);
1516 static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
1517 struct tcmsg *tcm, struct netlink_callback *cb,
1520 struct qdisc_dump_args arg;
1522 if (tc_qdisc_dump_ignore(q) ||
1523 *t_p < s_t || !q->ops->cl_ops ||
1525 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
1530 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1531 arg.w.fn = qdisc_class_dump;
1535 arg.w.skip = cb->args[1];
1537 q->ops->cl_ops->walk(q, &arg.w);
1538 cb->args[1] = arg.w.count;
1545 static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
1546 struct tcmsg *tcm, struct netlink_callback *cb,
1554 if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
1557 list_for_each_entry(q, &root->list, list) {
1558 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
1565 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1567 struct tcmsg *tcm = (struct tcmsg*)NLMSG_DATA(cb->nlh);
1568 struct net *net = sock_net(skb->sk);
1569 struct netdev_queue *dev_queue;
1570 struct net_device *dev;
1573 if (net != &init_net)
1576 if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
1578 if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
1584 dev_queue = netdev_get_tx_queue(dev, 0);
1585 if (tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, &t, s_t) < 0)
1588 dev_queue = &dev->rx_queue;
1589 if (tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, &t, s_t) < 0)
1599 /* Main classifier routine: scans classifier chain attached
1600 to this qdisc, (optionally) tests for protocol and asks
1601 specific classifiers.
1603 int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp,
1604 struct tcf_result *res)
1606 __be16 protocol = skb->protocol;
1609 for (; tp; tp = tp->next) {
1610 if ((tp->protocol == protocol ||
1611 tp->protocol == htons(ETH_P_ALL)) &&
1612 (err = tp->classify(skb, tp, res)) >= 0) {
1613 #ifdef CONFIG_NET_CLS_ACT
1614 if (err != TC_ACT_RECLASSIFY && skb->tc_verd)
1615 skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0);
1622 EXPORT_SYMBOL(tc_classify_compat);
1624 int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
1625 struct tcf_result *res)
1629 #ifdef CONFIG_NET_CLS_ACT
1630 struct tcf_proto *otp = tp;
1633 protocol = skb->protocol;
1635 err = tc_classify_compat(skb, tp, res);
1636 #ifdef CONFIG_NET_CLS_ACT
1637 if (err == TC_ACT_RECLASSIFY) {
1638 u32 verd = G_TC_VERD(skb->tc_verd);
1641 if (verd++ >= MAX_REC_LOOP) {
1642 printk("rule prio %u protocol %02x reclassify loop, "
1644 tp->prio&0xffff, ntohs(tp->protocol));
1647 skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
1653 EXPORT_SYMBOL(tc_classify);
1655 void tcf_destroy(struct tcf_proto *tp)
1657 tp->ops->destroy(tp);
1658 module_put(tp->ops->owner);
1662 void tcf_destroy_chain(struct tcf_proto **fl)
1664 struct tcf_proto *tp;
1666 while ((tp = *fl) != NULL) {
1671 EXPORT_SYMBOL(tcf_destroy_chain);
1673 #ifdef CONFIG_PROC_FS
1674 static int psched_show(struct seq_file *seq, void *v)
1678 hrtimer_get_res(CLOCK_MONOTONIC, &ts);
1679 seq_printf(seq, "%08x %08x %08x %08x\n",
1680 (u32)NSEC_PER_USEC, (u32)PSCHED_US2NS(1),
1682 (u32)NSEC_PER_SEC/(u32)ktime_to_ns(timespec_to_ktime(ts)));
1687 static int psched_open(struct inode *inode, struct file *file)
1689 return single_open(file, psched_show, PDE(inode)->data);
1692 static const struct file_operations psched_fops = {
1693 .owner = THIS_MODULE,
1694 .open = psched_open,
1696 .llseek = seq_lseek,
1697 .release = single_release,
1701 static int __init pktsched_init(void)
1703 register_qdisc(&pfifo_qdisc_ops);
1704 register_qdisc(&bfifo_qdisc_ops);
1705 proc_net_fops_create(&init_net, "psched", 0, &psched_fops);
1707 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL);
1708 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL);
1709 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc);
1710 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL);
1711 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL);
1712 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass);
1717 subsys_initcall(pktsched_init);