struct ib_event_handler event_handler;
 
-       struct net_device_stats stats;
-
        struct net_device *parent;
        struct list_head child_intfs;
        struct list_head list;
 
                ipoib_dbg(priv, "cm recv error "
                           "(status=%d, wrid=%d vend_err %x)\n",
                           wc->status, wr_id, wc->vendor_err);
-               ++priv->stats.rx_dropped;
+               ++dev->stats.rx_dropped;
                goto repost;
        }
 
                 * this packet and reuse the old buffer.
                 */
                ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
-               ++priv->stats.rx_dropped;
+               ++dev->stats.rx_dropped;
                goto repost;
        }
 
        skb_pull(skb, IPOIB_ENCAP_LEN);
 
        dev->last_rx = jiffies;
-       ++priv->stats.rx_packets;
-       priv->stats.rx_bytes += skb->len;
+       ++dev->stats.rx_packets;
+       dev->stats.rx_bytes += skb->len;
 
        skb->dev = dev;
        /* XXX get correct PACKET_ type here */
        if (unlikely(skb->len > tx->mtu)) {
                ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
                           skb->len, tx->mtu);
-               ++priv->stats.tx_dropped;
-               ++priv->stats.tx_errors;
+               ++dev->stats.tx_dropped;
+               ++dev->stats.tx_errors;
                ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
                return;
        }
        tx_req->skb = skb;
        addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
        if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
-               ++priv->stats.tx_errors;
+               ++dev->stats.tx_errors;
                dev_kfree_skb_any(skb);
                return;
        }
        if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
                                addr, skb->len))) {
                ipoib_warn(priv, "post_send failed\n");
-               ++priv->stats.tx_errors;
+               ++dev->stats.tx_errors;
                ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
                dev_kfree_skb_any(skb);
        } else {
        ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
 
        /* FIXME: is this right? Shouldn't we only increment on success? */
-       ++priv->stats.tx_packets;
-       priv->stats.tx_bytes += tx_req->skb->len;
+       ++dev->stats.tx_packets;
+       dev->stats.tx_bytes += tx_req->skb->len;
 
        dev_kfree_skb_any(tx_req->skb);
 
 
         * this packet and reuse the old buffer.
         */
        if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) {
-               ++priv->stats.rx_dropped;
+               ++dev->stats.rx_dropped;
                goto repost;
        }
 
        skb_pull(skb, IPOIB_ENCAP_LEN);
 
        dev->last_rx = jiffies;
-       ++priv->stats.rx_packets;
-       priv->stats.rx_bytes += skb->len;
+       ++dev->stats.rx_packets;
+       dev->stats.rx_bytes += skb->len;
 
        skb->dev = dev;
        /* XXX get correct PACKET_ type here */
        ib_dma_unmap_single(priv->ca, tx_req->mapping,
                            tx_req->skb->len, DMA_TO_DEVICE);
 
-       ++priv->stats.tx_packets;
-       priv->stats.tx_bytes += tx_req->skb->len;
+       ++dev->stats.tx_packets;
+       dev->stats.tx_bytes += tx_req->skb->len;
 
        dev_kfree_skb_any(tx_req->skb);
 
        if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
                ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
                           skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
-               ++priv->stats.tx_dropped;
-               ++priv->stats.tx_errors;
+               ++dev->stats.tx_dropped;
+               ++dev->stats.tx_errors;
                ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
                return;
        }
        addr = ib_dma_map_single(priv->ca, skb->data, skb->len,
                                 DMA_TO_DEVICE);
        if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
-               ++priv->stats.tx_errors;
+               ++dev->stats.tx_errors;
                dev_kfree_skb_any(skb);
                return;
        }
        if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
                               address->ah, qpn, addr, skb->len))) {
                ipoib_warn(priv, "post_send failed\n");
-               ++priv->stats.tx_errors;
+               ++dev->stats.tx_errors;
                ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
                dev_kfree_skb_any(skb);
        } else {
 
 
        neigh = ipoib_neigh_alloc(skb->dst->neighbour);
        if (!neigh) {
-               ++priv->stats.tx_dropped;
+               ++dev->stats.tx_dropped;
                dev_kfree_skb_any(skb);
                return;
        }
 err_path:
        ipoib_neigh_free(dev, neigh);
 err_drop:
-       ++priv->stats.tx_dropped;
+       ++dev->stats.tx_dropped;
        dev_kfree_skb_any(skb);
 
        spin_unlock(&priv->lock);
                        } else
                                __path_add(dev, path);
                } else {
-                       ++priv->stats.tx_dropped;
+                       ++dev->stats.tx_dropped;
                        dev_kfree_skb_any(skb);
                }
 
                skb_push(skb, sizeof *phdr);
                __skb_queue_tail(&path->queue, skb);
        } else {
-               ++priv->stats.tx_dropped;
+               ++dev->stats.tx_dropped;
                dev_kfree_skb_any(skb);
        }
 
                        __skb_queue_tail(&neigh->queue, skb);
                        spin_unlock(&priv->lock);
                } else {
-                       ++priv->stats.tx_dropped;
+                       ++dev->stats.tx_dropped;
                        dev_kfree_skb_any(skb);
                }
        } else {
                                           IPOIB_QPN(phdr->hwaddr),
                                           IPOIB_GID_RAW_ARG(phdr->hwaddr + 4));
                                dev_kfree_skb_any(skb);
-                               ++priv->stats.tx_dropped;
+                               ++dev->stats.tx_dropped;
                                goto out;
                        }
 
        return NETDEV_TX_OK;
 }
 
-static struct net_device_stats *ipoib_get_stats(struct net_device *dev)
-{
-       struct ipoib_dev_priv *priv = netdev_priv(dev);
-
-       return &priv->stats;
-}
-
 static void ipoib_timeout(struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct sk_buff *skb;
        *to_ipoib_neigh(neigh->neighbour) = NULL;
        while ((skb = __skb_dequeue(&neigh->queue))) {
-               ++priv->stats.tx_dropped;
+               ++dev->stats.tx_dropped;
                dev_kfree_skb_any(skb);
        }
        if (ipoib_cm_get(neigh))
        dev->stop                = ipoib_stop;
        dev->change_mtu          = ipoib_change_mtu;
        dev->hard_start_xmit     = ipoib_start_xmit;
-       dev->get_stats           = ipoib_get_stats;
        dev->tx_timeout          = ipoib_timeout;
        dev->header_ops          = &ipoib_header_ops;
        dev->set_multicast_list  = ipoib_set_mcast_list;
 
        }
 
        spin_lock_irqsave(&priv->tx_lock, flags);
-       priv->stats.tx_dropped += tx_dropped;
+       dev->stats.tx_dropped += tx_dropped;
        spin_unlock_irqrestore(&priv->tx_lock, flags);
 
        kfree(mcast);
                /* Flush out any queued packets */
                spin_lock_irq(&priv->tx_lock);
                while (!skb_queue_empty(&mcast->pkt_queue)) {
-                       ++priv->stats.tx_dropped;
+                       ++dev->stats.tx_dropped;
                        dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
                }
                spin_unlock_irq(&priv->tx_lock);
        if (!test_bit(IPOIB_MCAST_STARTED, &priv->flags)        ||
            !priv->broadcast                                    ||
            !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
-               ++priv->stats.tx_dropped;
+               ++dev->stats.tx_dropped;
                dev_kfree_skb_any(skb);
                goto unlock;
        }
                if (!mcast) {
                        ipoib_warn(priv, "unable to allocate memory for "
                                   "multicast structure\n");
-                       ++priv->stats.tx_dropped;
+                       ++dev->stats.tx_dropped;
                        dev_kfree_skb_any(skb);
                        goto out;
                }
                if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
                        skb_queue_tail(&mcast->pkt_queue, skb);
                else {
-                       ++priv->stats.tx_dropped;
+                       ++dev->stats.tx_dropped;
                        dev_kfree_skb_any(skb);
                }