/* Main transmission queue. */
/* Modifications to data participating in scheduling must be protected with
- * queue->lock spinlock.
+ * qdisc_root_lock(qdisc) spinlock.
*
* The idea is the following:
- * - enqueue, dequeue are serialized via top level device
- * spinlock queue->lock.
- * - ingress filtering is serialized via top level device
- * spinlock dev->rx_queue.lock.
+ * - enqueue, dequeue are serialized via qdisc root lock
+ * - ingress filtering is also serialized via qdisc root lock
* - updates to tree and tree walking are only done under the rtnl mutex.
*/
-void qdisc_lock_tree(struct net_device *dev)
- __acquires(dev->rx_queue.lock)
-{
- unsigned int i;
-
- local_bh_disable();
- for (i = 0; i < dev->num_tx_queues; i++) {
- struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
- spin_lock(&txq->lock);
- }
- spin_lock(&dev->rx_queue.lock);
-}
-EXPORT_SYMBOL(qdisc_lock_tree);
-
-void qdisc_unlock_tree(struct net_device *dev)
- __releases(dev->rx_queue.lock)
-{
- unsigned int i;
-
- spin_unlock(&dev->rx_queue.lock);
- for (i = 0; i < dev->num_tx_queues; i++) {
- struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
- spin_unlock(&txq->lock);
- }
- local_bh_enable();
-}
-EXPORT_SYMBOL(qdisc_unlock_tree);
-
static inline int qdisc_qlen(struct Qdisc *q)
{
return q->q.qlen;
}
-static inline int dev_requeue_skb(struct sk_buff *skb,
- struct netdev_queue *dev_queue,
- struct Qdisc *q)
+static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
{
if (unlikely(skb->next))
q->gso_skb = skb;
else
q->ops->requeue(skb, q);
- netif_schedule_queue(dev_queue);
+ __netif_schedule(q);
return 0;
}
* some time.
*/
__get_cpu_var(netdev_rx_stat).cpu_collision++;
- ret = dev_requeue_skb(skb, dev_queue, q);
+ ret = dev_requeue_skb(skb, q);
}
return ret;
}
/*
- * NOTE: Called under queue->lock with locally disabled BH.
+ * NOTE: Called under qdisc_lock(q) with locally disabled BH.
*
- * __QUEUE_STATE_QDISC_RUNNING guarantees only one CPU can process
- * this queue at a time. queue->lock serializes queue accesses for
- * this queue AND txq->qdisc pointer itself.
+ * __QDISC_STATE_RUNNING guarantees only one CPU can process
+ * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
+ * this queue.
*
* netif_tx_lock serializes accesses to device driver.
*
- * queue->lock and netif_tx_lock are mutually exclusive,
+ * qdisc_lock(q) and netif_tx_lock are mutually exclusive,
* if one is grabbed, another must be free.
*
* Note, that this procedure can be called by a watchdog timer
* >0 - queue is not empty.
*
*/
-static inline int qdisc_restart(struct netdev_queue *txq)
+static inline int qdisc_restart(struct Qdisc *q)
{
- struct Qdisc *q = txq->qdisc;
+ struct netdev_queue *txq;
int ret = NETDEV_TX_BUSY;
struct net_device *dev;
+ spinlock_t *root_lock;
struct sk_buff *skb;
/* Dequeue packet */
if (unlikely((skb = dequeue_skb(q)) == NULL))
return 0;
- /* And release queue */
- spin_unlock(&txq->lock);
+ root_lock = qdisc_root_lock(q);
+
+ /* And release qdisc */
+ spin_unlock(root_lock);
- dev = txq->dev;
+ dev = qdisc_dev(q);
+ txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
HARD_TX_LOCK(dev, txq, smp_processor_id());
if (!netif_subqueue_stopped(dev, skb))
ret = dev_hard_start_xmit(skb, dev, txq);
HARD_TX_UNLOCK(dev, txq);
- spin_lock(&txq->lock);
- q = txq->qdisc;
+ spin_lock(root_lock);
switch (ret) {
case NETDEV_TX_OK:
printk(KERN_WARNING "BUG %s code %d qlen %d\n",
dev->name, ret, q->q.qlen);
- ret = dev_requeue_skb(skb, txq, q);
+ ret = dev_requeue_skb(skb, q);
break;
}
+ if (ret && netif_tx_queue_stopped(txq))
+ ret = 0;
+
return ret;
}
-void __qdisc_run(struct netdev_queue *txq)
+void __qdisc_run(struct Qdisc *q)
{
unsigned long start_time = jiffies;
- while (qdisc_restart(txq)) {
- if (netif_tx_queue_stopped(txq))
- break;
-
+ while (qdisc_restart(q)) {
/*
* Postpone processing if
* 1. another process needs the CPU;
* 2. we've been doing it for too long.
*/
if (need_resched() || jiffies != start_time) {
- netif_schedule_queue(txq);
+ __netif_schedule(q);
break;
}
}
- clear_bit(__QUEUE_STATE_QDISC_RUNNING, &txq->state);
+ clear_bit(__QDISC_STATE_RUNNING, &q->state);
}
static void dev_watchdog(unsigned long arg)
if (some_queue_stopped &&
time_after(jiffies, (dev->trans_start +
dev->watchdog_timeo))) {
- printk(KERN_INFO "NETDEV WATCHDOG: %s: "
- "transmit timed out\n",
- dev->name);
+ char drivername[64];
+ printk(KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit timed out\n",
+ dev->name, netdev_drivername(dev, drivername, 64));
dev->tx_timeout(dev);
WARN_ON_ONCE(1);
}
.owner = THIS_MODULE,
};
+static struct netdev_queue noop_netdev_queue = {
+ .qdisc = &noop_qdisc,
+};
+
struct Qdisc noop_qdisc = {
.enqueue = noop_enqueue,
.dequeue = noop_dequeue,
.flags = TCQ_F_BUILTIN,
.ops = &noop_qdisc_ops,
.list = LIST_HEAD_INIT(noop_qdisc.list),
+ .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
+ .dev_queue = &noop_netdev_queue,
};
EXPORT_SYMBOL(noop_qdisc);
.owner = THIS_MODULE,
};
+static struct Qdisc noqueue_qdisc;
+static struct netdev_queue noqueue_netdev_queue = {
+ .qdisc = &noqueue_qdisc,
+};
+
static struct Qdisc noqueue_qdisc = {
.enqueue = NULL,
.dequeue = noop_dequeue,
.flags = TCQ_F_BUILTIN,
.ops = &noqueue_qdisc_ops,
.list = LIST_HEAD_INIT(noqueue_qdisc.list),
+ .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock),
+ .dev_queue = &noqueue_netdev_queue,
};
}
EXPORT_SYMBOL(qdisc_create_dflt);
-/* Under queue->lock and BH! */
+/* Under qdisc_root_lock(qdisc) and BH! */
void qdisc_reset(struct Qdisc *qdisc)
{
static void __qdisc_destroy(struct rcu_head *head)
{
struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu);
+ const struct Qdisc_ops *ops = qdisc->ops;
+
+#ifdef CONFIG_NET_SCHED
+ qdisc_put_stab(qdisc->stab);
+#endif
+ gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
+ if (ops->reset)
+ ops->reset(qdisc);
+ if (ops->destroy)
+ ops->destroy(qdisc);
+
+ module_put(ops->owner);
+ dev_put(qdisc_dev(qdisc));
+
+ kfree_skb(qdisc->gso_skb);
+
kfree((char *) qdisc - qdisc->padded);
}
-/* Under queue->lock and BH! */
+/* Under qdisc_root_lock(qdisc) and BH! */
void qdisc_destroy(struct Qdisc *qdisc)
{
- const struct Qdisc_ops *ops = qdisc->ops;
-
if (qdisc->flags & TCQ_F_BUILTIN ||
!atomic_dec_and_test(&qdisc->refcnt))
return;
- list_del(&qdisc->list);
- gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
- if (ops->reset)
- ops->reset(qdisc);
- if (ops->destroy)
- ops->destroy(qdisc);
+ if (qdisc->parent)
+ list_del(&qdisc->list);
- module_put(ops->owner);
- dev_put(qdisc_dev(qdisc));
call_rcu(&qdisc->q_rcu, __qdisc_destroy);
}
EXPORT_SYMBOL(qdisc_destroy);
printk(KERN_INFO "%s: activation failed\n", dev->name);
return;
}
- list_add_tail(&qdisc->list, &dev_queue->qdisc_list);
} else {
qdisc = &noqueue_qdisc;
}
struct netdev_queue *dev_queue,
void *_need_watchdog)
{
+ struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping;
int *need_watchdog_p = _need_watchdog;
- spin_lock_bh(&dev_queue->lock);
- rcu_assign_pointer(dev_queue->qdisc, dev_queue->qdisc_sleeping);
- if (dev_queue->qdisc != &noqueue_qdisc)
+ rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
+ if (new_qdisc != &noqueue_qdisc)
*need_watchdog_p = 1;
- spin_unlock_bh(&dev_queue->lock);
}
void dev_activate(struct net_device *dev)
void *_qdisc_default)
{
struct Qdisc *qdisc_default = _qdisc_default;
- struct sk_buff *skb = NULL;
struct Qdisc *qdisc;
- spin_lock_bh(&dev_queue->lock);
-
qdisc = dev_queue->qdisc;
if (qdisc) {
+ spin_lock_bh(qdisc_lock(qdisc));
+
dev_queue->qdisc = qdisc_default;
qdisc_reset(qdisc);
- skb = qdisc->gso_skb;
- qdisc->gso_skb = NULL;
+ spin_unlock_bh(qdisc_lock(qdisc));
}
-
- spin_unlock_bh(&dev_queue->lock);
-
- kfree_skb(skb);
}
static bool some_qdisc_is_running(struct net_device *dev, int lock)
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *dev_queue;
+ spinlock_t *root_lock;
+ struct Qdisc *q;
int val;
dev_queue = netdev_get_tx_queue(dev, i);
+ q = dev_queue->qdisc;
+ root_lock = qdisc_root_lock(q);
if (lock)
- spin_lock_bh(&dev_queue->lock);
+ spin_lock_bh(root_lock);
- val = test_bit(__QUEUE_STATE_QDISC_RUNNING, &dev_queue->state);
+ val = test_bit(__QDISC_STATE_RUNNING, &q->state);
if (lock)
- spin_unlock_bh(&dev_queue->lock);
+ spin_unlock_bh(root_lock);
if (val)
return true;
dev_queue->qdisc = qdisc;
dev_queue->qdisc_sleeping = qdisc;
- INIT_LIST_HEAD(&dev_queue->qdisc_list);
}
void dev_init_scheduler(struct net_device *dev)
{
- qdisc_lock_tree(dev);
netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
dev_init_scheduler_queue(dev, &dev->rx_queue, NULL);
- qdisc_unlock_tree(dev);
setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev);
}
struct Qdisc *qdisc_default = _qdisc_default;
if (qdisc) {
+ spinlock_t *root_lock = qdisc_root_lock(qdisc);
+
dev_queue->qdisc = qdisc_default;
dev_queue->qdisc_sleeping = qdisc_default;
+ spin_lock(root_lock);
qdisc_destroy(qdisc);
+ spin_unlock(root_lock);
}
}
void dev_shutdown(struct net_device *dev)
{
- qdisc_lock_tree(dev);
netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
shutdown_scheduler_queue(dev, &dev->rx_queue, NULL);
BUG_TRAP(!timer_pending(&dev->watchdog_timer));
- qdisc_unlock_tree(dev);
}