]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
qla3xxx: bugfix tx reset after stress conditions.
authorRon Mercer <ron.mercer@qlogic.com>
Mon, 26 Feb 2007 19:06:41 +0000 (11:06 -0800)
committerJeff Garzik <jeff@garzik.org>
Tue, 27 Feb 2007 09:21:44 +0000 (04:21 -0500)
To Reproduce the Problem:
To reproduce this panic consistently, we run an intensive network
application like 'netperf' and then switch to a different console.
After waiting for a couple of seconds, you will see a tx reset has occured.

Reason:
We enable interrupts even if we were not running.

Solution:
Now we will enable interrupts only after we are ready to give up the poll
routine.

Signed-off-by: Ron Mercer <ron.mercer@qlogic.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
drivers/net/qla3xxx.c

index 7a2f01a22cbf1ae1e974760302a7279f240d932a..5bf446f7be151eae05f953de42a9735b3e7c441b 100755 (executable)
@@ -1921,10 +1921,11 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
        struct net_rsp_iocb *net_rsp;
        struct net_device *ndev = qdev->ndev;
        unsigned long hw_flags;
+       int work_done = 0;
 
        /* While there are entries in the completion queue. */
        while ((cpu_to_le32(*(qdev->prsp_producer_index)) !=
-               qdev->rsp_consumer_index) && (*rx_cleaned < work_to_do)) {
+               qdev->rsp_consumer_index) && (work_done < work_to_do)) {
 
                net_rsp = qdev->rsp_current;
                switch (net_rsp->opcode) {
@@ -1975,37 +1976,41 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
                } else {
                        qdev->rsp_current++;
                }
+
+               work_done = *tx_cleaned + *rx_cleaned;
        }
 
-       spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+       if(work_done) {
+               spin_lock_irqsave(&qdev->hw_lock, hw_flags);
 
-       ql_update_lrg_bufq_prod_index(qdev);
+               ql_update_lrg_bufq_prod_index(qdev);
 
-       if (qdev->small_buf_release_cnt >= 16) {
-               while (qdev->small_buf_release_cnt >= 16) {
-                       qdev->small_buf_q_producer_index++;
+               if (qdev->small_buf_release_cnt >= 16) {
+                       while (qdev->small_buf_release_cnt >= 16) {
+                               qdev->small_buf_q_producer_index++;
 
-                       if (qdev->small_buf_q_producer_index ==
-                           NUM_SBUFQ_ENTRIES)
-                               qdev->small_buf_q_producer_index = 0;
-                       qdev->small_buf_release_cnt -= 8;
-               }
+                               if (qdev->small_buf_q_producer_index ==
+                                   NUM_SBUFQ_ENTRIES)
+                                       qdev->small_buf_q_producer_index = 0;
+                               qdev->small_buf_release_cnt -= 8;
+                       }
 
-               ql_write_common_reg(qdev,
-                                   &port_regs->CommonRegs.
-                                   rxSmallQProducerIndex,
-                                   qdev->small_buf_q_producer_index);
-       }
+                       wmb();
+                       ql_write_common_reg(qdev,
+                                           &port_regs->CommonRegs.
+                                           rxSmallQProducerIndex,
+                                           qdev->small_buf_q_producer_index);
 
-       ql_write_common_reg(qdev,
-                           &port_regs->CommonRegs.rspQConsumerIndex,
-                           qdev->rsp_consumer_index);
-       spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+               }
 
-       if (unlikely(netif_queue_stopped(qdev->ndev))) {
-               if (netif_queue_stopped(qdev->ndev) &&
-                   (atomic_read(&qdev->tx_count) > (NUM_REQ_Q_ENTRIES / 4)))
-                       netif_wake_queue(qdev->ndev);
+               spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+               if (unlikely(netif_queue_stopped(qdev->ndev))) {
+                       if (netif_queue_stopped(qdev->ndev) &&
+                           (atomic_read(&qdev->tx_count) > 
+                            (NUM_REQ_Q_ENTRIES / 4)))
+                               netif_wake_queue(qdev->ndev);
+               }
        }
 
        return *tx_cleaned + *rx_cleaned;
@@ -2016,6 +2021,8 @@ static int ql_poll(struct net_device *ndev, int *budget)
        struct ql3_adapter *qdev = netdev_priv(ndev);
        int work_to_do = min(*budget, ndev->quota);
        int rx_cleaned = 0, tx_cleaned = 0;
+       unsigned long hw_flags;
+       struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
 
        if (!netif_carrier_ok(ndev))
                goto quit_polling;
@@ -2027,6 +2034,13 @@ static int ql_poll(struct net_device *ndev, int *budget)
        if ((!tx_cleaned && !rx_cleaned) || !netif_running(ndev)) {
 quit_polling:
                netif_rx_complete(ndev);
+
+               spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+               ql_write_common_reg(qdev,
+                                   &port_regs->CommonRegs.rspQConsumerIndex,
+                                   qdev->rsp_consumer_index);
+               spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
                ql_enable_interrupts(qdev);
                return 0;
        }
@@ -2079,11 +2093,10 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
                queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
                spin_unlock(&qdev->adapter_lock);
        } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
-               ql_disable_interrupts(qdev);
-               if (likely(netif_rx_schedule_prep(ndev)))
+               if (likely(netif_rx_schedule_prep(ndev))) {
+                       ql_disable_interrupts(qdev);
                        __netif_rx_schedule(ndev);
-               else
-                       ql_enable_interrupts(qdev);
+               }
        } else {
                return IRQ_NONE;
        }