]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
[NET_SCHED]: Remove CONFIG_NET_ESTIMATOR option
authorPatrick McHardy <kaber@trash.net>
Tue, 3 Jul 2007 05:46:07 +0000 (22:46 -0700)
committerDavid S. Miller <davem@sunset.davemloft.net>
Wed, 11 Jul 2007 05:16:37 +0000 (22:16 -0700)
The generic estimator is always built in anways and all the config options
does is prevent including a minimal amount of code for setting it up.
Additionally the option is already automatically selected for most cases.

Signed-off-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/sched/Kconfig
net/sched/act_api.c
net/sched/act_police.c
net/sched/sch_api.c
net/sched/sch_cbq.c
net/sched/sch_generic.c
net/sched/sch_hfsc.c

index f3217942ca871dd2c5eb33697639907392942fd9..b4662888bdbd9d6df32cb8c25d2ba2ca6188953d 100644 (file)
@@ -286,7 +286,6 @@ config CLS_U32_MARK
 config NET_CLS_RSVP
        tristate "IPv4 Resource Reservation Protocol (RSVP)"
        select NET_CLS
-       select NET_ESTIMATOR
        ---help---
          The Resource Reservation Protocol (RSVP) permits end systems to
          request a minimum and maximum data flow rate for a connection; this
@@ -301,7 +300,6 @@ config NET_CLS_RSVP
 config NET_CLS_RSVP6
        tristate "IPv6 Resource Reservation Protocol (RSVP6)"
        select NET_CLS
-       select NET_ESTIMATOR
        ---help---
          The Resource Reservation Protocol (RSVP) permits end systems to
          request a minimum and maximum data flow rate for a connection; this
@@ -393,7 +391,6 @@ config NET_EMATCH_TEXT
 
 config NET_CLS_ACT
        bool "Actions"
-       select NET_ESTIMATOR
        ---help---
          Say Y here if you want to use traffic control actions. Actions
          get attached to classifiers and are invoked after a successful
@@ -476,7 +473,6 @@ config NET_ACT_SIMP
 config NET_CLS_POLICE
        bool "Traffic Policing (obsolete)"
        depends on NET_CLS_ACT!=y
-       select NET_ESTIMATOR
        ---help---
          Say Y here if you want to do traffic policing, i.e. strict
          bandwidth limiting. This option is obsoleted by the traffic
@@ -491,14 +487,6 @@ config NET_CLS_IND
          classification based on the incoming device. This option is
          likely to disappear in favour of the metadata ematch.
 
-config NET_ESTIMATOR
-       bool "Rate estimator"
-       ---help---
-         Say Y here to allow using rate estimators to estimate the current
-         rate-of-flow for network devices, queues, etc. This module is
-         automatically selected if needed but can be selected manually for
-         statistical purposes.
-
 endif # NET_SCHED
 
 endmenu
index 711dd26c95c325a42937ba083f970c5d8b77b197..72bb9bd1a22af5baa939036818e647a457eff278 100644 (file)
@@ -42,10 +42,8 @@ void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo)
                        write_lock_bh(hinfo->lock);
                        *p1p = p->tcfc_next;
                        write_unlock_bh(hinfo->lock);
-#ifdef CONFIG_NET_ESTIMATOR
                        gen_kill_estimator(&p->tcfc_bstats,
                                           &p->tcfc_rate_est);
-#endif
                        kfree(p);
                        return;
                }
@@ -236,11 +234,9 @@ struct tcf_common *tcf_hash_create(u32 index, struct rtattr *est, struct tc_acti
        p->tcfc_index = index ? index : tcf_hash_new_index(idx_gen, hinfo);
        p->tcfc_tm.install = jiffies;
        p->tcfc_tm.lastuse = jiffies;
-#ifdef CONFIG_NET_ESTIMATOR
        if (est)
                gen_new_estimator(&p->tcfc_bstats, &p->tcfc_rate_est,
                                  p->tcfc_stats_lock, est);
-#endif
        a->priv = (void *) p;
        return p;
 }
@@ -614,9 +610,7 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
                        goto errout;
 
        if (gnet_stats_copy_basic(&d, &h->tcf_bstats) < 0 ||
-#ifdef CONFIG_NET_ESTIMATOR
            gnet_stats_copy_rate_est(&d, &h->tcf_rate_est) < 0 ||
-#endif
            gnet_stats_copy_queue(&d, &h->tcf_qstats) < 0)
                goto errout;
 
index 616f465f407e03d33feaffc17f45a937fb3fcd23..580698db578a44333d9c8f7a074300464e8a7039 100644 (file)
@@ -118,10 +118,8 @@ void tcf_police_destroy(struct tcf_police *p)
                        write_lock_bh(&police_lock);
                        *p1p = p->tcf_next;
                        write_unlock_bh(&police_lock);
-#ifdef CONFIG_NET_ESTIMATOR
                        gen_kill_estimator(&p->tcf_bstats,
                                           &p->tcf_rate_est);
-#endif
                        if (p->tcfp_R_tab)
                                qdisc_put_rtab(p->tcfp_R_tab);
                        if (p->tcfp_P_tab)
@@ -227,7 +225,6 @@ override:
                police->tcfp_ptoks = L2T_P(police, police->tcfp_mtu);
        police->tcf_action = parm->action;
 
-#ifdef CONFIG_NET_ESTIMATOR
        if (tb[TCA_POLICE_AVRATE-1])
                police->tcfp_ewma_rate =
                        *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
@@ -235,7 +232,6 @@ override:
                gen_replace_estimator(&police->tcf_bstats,
                                      &police->tcf_rate_est,
                                      police->tcf_stats_lock, est);
-#endif
 
        spin_unlock_bh(&police->tcf_lock);
        if (ret != ACT_P_CREATED)
@@ -281,14 +277,12 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
        police->tcf_bstats.bytes += skb->len;
        police->tcf_bstats.packets++;
 
-#ifdef CONFIG_NET_ESTIMATOR
        if (police->tcfp_ewma_rate &&
            police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
                police->tcf_qstats.overlimits++;
                spin_unlock(&police->tcf_lock);
                return police->tcf_action;
        }
-#endif
 
        if (skb->len <= police->tcfp_mtu) {
                if (police->tcfp_R_tab == NULL) {
@@ -348,10 +342,8 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
        if (police->tcfp_result)
                RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int),
                        &police->tcfp_result);
-#ifdef CONFIG_NET_ESTIMATOR
        if (police->tcfp_ewma_rate)
                RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate);
-#endif
        return skb->len;
 
 rtattr_failure:
@@ -477,14 +469,12 @@ struct tcf_police *tcf_police_locate(struct rtattr *rta, struct rtattr *est)
                        goto failure;
                police->tcfp_result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]);
        }
-#ifdef CONFIG_NET_ESTIMATOR
        if (tb[TCA_POLICE_AVRATE-1]) {
                if (RTA_PAYLOAD(tb[TCA_POLICE_AVRATE-1]) != sizeof(u32))
                        goto failure;
                police->tcfp_ewma_rate =
                        *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
        }
-#endif
        police->tcfp_toks = police->tcfp_burst = parm->burst;
        police->tcfp_mtu = parm->mtu;
        if (police->tcfp_mtu == 0) {
@@ -498,11 +488,9 @@ struct tcf_police *tcf_police_locate(struct rtattr *rta, struct rtattr *est)
        police->tcf_index = parm->index ? parm->index :
                tcf_police_new_index();
        police->tcf_action = parm->action;
-#ifdef CONFIG_NET_ESTIMATOR
        if (est)
                gen_new_estimator(&police->tcf_bstats, &police->tcf_rate_est,
                                  police->tcf_stats_lock, est);
-#endif
        h = tcf_hash(police->tcf_index, POL_TAB_MASK);
        write_lock_bh(&police_lock);
        police->tcf_next = tcf_police_ht[h];
@@ -528,14 +516,12 @@ int tcf_police(struct sk_buff *skb, struct tcf_police *police)
        police->tcf_bstats.bytes += skb->len;
        police->tcf_bstats.packets++;
 
-#ifdef CONFIG_NET_ESTIMATOR
        if (police->tcfp_ewma_rate &&
            police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
                police->tcf_qstats.overlimits++;
                spin_unlock(&police->tcf_lock);
                return police->tcf_action;
        }
-#endif
        if (skb->len <= police->tcfp_mtu) {
                if (police->tcfp_R_tab == NULL) {
                        spin_unlock(&police->tcf_lock);
@@ -591,10 +577,8 @@ int tcf_police_dump(struct sk_buff *skb, struct tcf_police *police)
        if (police->tcfp_result)
                RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int),
                        &police->tcfp_result);
-#ifdef CONFIG_NET_ESTIMATOR
        if (police->tcfp_ewma_rate)
                RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate);
-#endif
        return skb->len;
 
 rtattr_failure:
@@ -612,9 +596,7 @@ int tcf_police_dump_stats(struct sk_buff *skb, struct tcf_police *police)
                goto errout;
 
        if (gnet_stats_copy_basic(&d, &police->tcf_bstats) < 0 ||
-#ifdef CONFIG_NET_ESTIMATOR
            gnet_stats_copy_rate_est(&d, &police->tcf_rate_est) < 0 ||
-#endif
            gnet_stats_copy_queue(&d, &police->tcf_qstats) < 0)
                goto errout;
 
index bec600af03cac30ded0a6171127ff6bc1fb794d8..0f9e1c71746a9c0282ae4d2723c5f15ea1eb7e7e 100644 (file)
@@ -515,7 +515,6 @@ qdisc_create(struct net_device *dev, u32 handle, struct rtattr **tca, int *errp)
        sch->handle = handle;
 
        if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS-1])) == 0) {
-#ifdef CONFIG_NET_ESTIMATOR
                if (tca[TCA_RATE-1]) {
                        err = gen_new_estimator(&sch->bstats, &sch->rate_est,
                                                sch->stats_lock,
@@ -531,7 +530,6 @@ qdisc_create(struct net_device *dev, u32 handle, struct rtattr **tca, int *errp)
                                goto err_out3;
                        }
                }
-#endif
                qdisc_lock_tree(dev);
                list_add_tail(&sch->list, &dev->qdisc_list);
                qdisc_unlock_tree(dev);
@@ -559,11 +557,9 @@ static int qdisc_change(struct Qdisc *sch, struct rtattr **tca)
                if (err)
                        return err;
        }
-#ifdef CONFIG_NET_ESTIMATOR
        if (tca[TCA_RATE-1])
                gen_replace_estimator(&sch->bstats, &sch->rate_est,
                        sch->stats_lock, tca[TCA_RATE-1]);
-#endif
        return 0;
 }
 
@@ -839,9 +835,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
                goto rtattr_failure;
 
        if (gnet_stats_copy_basic(&d, &q->bstats) < 0 ||
-#ifdef CONFIG_NET_ESTIMATOR
            gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
-#endif
            gnet_stats_copy_queue(&d, &q->qstats) < 0)
                goto rtattr_failure;
 
index ee2d5967d109bdd61c7eb67c4ec04d585974b1f5..bf1ea9e75cd94c39f9e57af5c2dcd0c1c51aa71b 100644 (file)
@@ -1653,9 +1653,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
                cl->xstats.undertime = cl->undertime - q->now;
 
        if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
-#ifdef CONFIG_NET_ESTIMATOR
            gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
-#endif
            gnet_stats_copy_queue(d, &cl->qstats) < 0)
                return -1;
 
@@ -1726,9 +1724,7 @@ static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
        tcf_destroy_chain(cl->filter_list);
        qdisc_destroy(cl->q);
        qdisc_put_rtab(cl->R_tab);
-#ifdef CONFIG_NET_ESTIMATOR
        gen_kill_estimator(&cl->bstats, &cl->rate_est);
-#endif
        if (cl != &q->link)
                kfree(cl);
 }
@@ -1873,11 +1869,9 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
 
                sch_tree_unlock(sch);
 
-#ifdef CONFIG_NET_ESTIMATOR
                if (tca[TCA_RATE-1])
                        gen_replace_estimator(&cl->bstats, &cl->rate_est,
                                cl->stats_lock, tca[TCA_RATE-1]);
-#endif
                return 0;
        }
 
@@ -1963,11 +1957,9 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
                cbq_set_fopt(cl, RTA_DATA(tb[TCA_CBQ_FOPT-1]));
        sch_tree_unlock(sch);
 
-#ifdef CONFIG_NET_ESTIMATOR
        if (tca[TCA_RATE-1])
                gen_new_estimator(&cl->bstats, &cl->rate_est,
                        cl->stats_lock, tca[TCA_RATE-1]);
-#endif
 
        *arg = (unsigned long)cl;
        return 0;
index 2488dbb17b608426a831f6a2dda7c49e8de60fb1..e525fd723c128abb9950a8d4a5e6a659fdac988e 100644 (file)
@@ -514,9 +514,7 @@ void qdisc_destroy(struct Qdisc *qdisc)
                return;
 
        list_del(&qdisc->list);
-#ifdef CONFIG_NET_ESTIMATOR
        gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
-#endif
        if (ops->reset)
                ops->reset(qdisc);
        if (ops->destroy)
index 9d124c4ee3a76ad515b3601868cd50c9fc065e31..7ccdf63a0cb549e45cb0c46c368fb207823f6956 100644 (file)
@@ -1054,11 +1054,9 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
                }
                sch_tree_unlock(sch);
 
-#ifdef CONFIG_NET_ESTIMATOR
                if (tca[TCA_RATE-1])
                        gen_replace_estimator(&cl->bstats, &cl->rate_est,
                                cl->stats_lock, tca[TCA_RATE-1]);
-#endif
                return 0;
        }
 
@@ -1112,11 +1110,9 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
        cl->cl_pcvtoff = parent->cl_cvtoff;
        sch_tree_unlock(sch);
 
-#ifdef CONFIG_NET_ESTIMATOR
        if (tca[TCA_RATE-1])
                gen_new_estimator(&cl->bstats, &cl->rate_est,
                        cl->stats_lock, tca[TCA_RATE-1]);
-#endif
        *arg = (unsigned long)cl;
        return 0;
 }
@@ -1128,9 +1124,7 @@ hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
 
        tcf_destroy_chain(cl->filter_list);
        qdisc_destroy(cl->qdisc);
-#ifdef CONFIG_NET_ESTIMATOR
        gen_kill_estimator(&cl->bstats, &cl->rate_est);
-#endif
        if (cl != &q->root)
                kfree(cl);
 }
@@ -1384,9 +1378,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
        xstats.rtwork  = cl->cl_cumul;
 
        if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
-#ifdef CONFIG_NET_ESTIMATOR
            gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
-#endif
            gnet_stats_copy_queue(d, &cl->qstats) < 0)
                return -1;