* for 64bit hardware platforms.
*
* TODO
- * Big-endian support
* rx_copybreak/alignment
* Scatter gather
* More testing
* Internal board variants. At the moment we have only one
*/
-static const struct velocity_info_tbl chip_info_table[] __devinitdata = {
+static struct velocity_info_tbl chip_info_table[] = {
{CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 1, 0x00FFFFFFUL},
{ }
};
static void velocity_init_cam_filter(struct velocity_info *vptr)
{
struct mac_regs __iomem * regs = vptr->mac_regs;
- unsigned short vid;
/* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */
WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, ®s->MCFG);
mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
mac_set_cam_mask(regs, vptr->mCAMmask);
- /* Enable first VCAM */
+ /* Enable VCAMs */
if (vptr->vlgrp) {
- for (vid = 0; vid < VLAN_VID_MASK; vid++) {
- if (vlan_group_get_device(vptr->vlgrp, vid)) {
- /* If Tagging option is enabled and
- VLAN ID is not zero, then
- turn on MCFG_RTGOPT also */
- if (vid != 0)
- WORD_REG_BITS_ON(MCFG_RTGOPT, ®s->MCFG);
+ unsigned int vid, i = 0;
+
+ if (!vlan_group_get_device(vptr->vlgrp, 0))
+ WORD_REG_BITS_ON(MCFG_RTGOPT, ®s->MCFG);
- mac_set_vlan_cam(regs, 0, (u8 *) &vid);
+ for (vid = 1; (vid < VLAN_VID_MASK); vid++) {
+ if (vlan_group_get_device(vptr->vlgrp, vid)) {
+ mac_set_vlan_cam(regs, i, (u8 *) &vid);
+ vptr->vCAMmask[i / 8] |= 0x1 << (i % 8);
+ if (++i >= VCAM_SIZE)
+ break;
}
}
- vptr->vCAMmask[0] |= 1;
mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
- } else {
- u16 temp = 0;
- mac_set_vlan_cam(regs, 0, (u8 *) &temp);
- temp = 1;
- mac_set_vlan_cam_mask(regs, (u8 *) &temp);
}
}
+static void velocity_vlan_rx_register(struct net_device *dev,
+ struct vlan_group *grp)
+{
+ struct velocity_info *vptr = netdev_priv(dev);
+
+ vptr->vlgrp = grp;
+}
+
static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct velocity_info *vptr = netdev_priv(dev);
* Init state, all RD entries belong to the NIC
*/
for (i = 0; i < vptr->options.numrx; ++i)
- vptr->rd_ring[i].rdesc0.owner = OWNED_BY_NIC;
+ vptr->rd_ring[i].rdesc0.len |= OWNED_BY_NIC;
writew(vptr->options.numrx, ®s->RBRDU);
writel(vptr->rd_pool_dma, ®s->RDBaseLo);
vptr->int_mask = INT_MASK_DEF;
- writel(cpu_to_le32(vptr->rd_pool_dma), ®s->RDBaseLo);
+ writel(vptr->rd_pool_dma, ®s->RDBaseLo);
writew(vptr->options.numrx - 1, ®s->RDCSize);
mac_rx_queue_run(regs);
mac_rx_queue_wake(regs);
writew(vptr->options.numtx - 1, ®s->TDCSize);
for (i = 0; i < vptr->num_txq; i++) {
- writel(cpu_to_le32(vptr->td_pool_dma[i]), &(regs->TDBaseLo[i]));
+ writel(vptr->td_pool_dma[i], ®s->TDBaseLo[i]);
mac_tx_queue_run(regs, i);
}
dev->vlan_rx_add_vid = velocity_vlan_rx_add_vid;
dev->vlan_rx_kill_vid = velocity_vlan_rx_kill_vid;
+ dev->vlan_rx_register = velocity_vlan_rx_register;
#ifdef VELOCITY_ZERO_COPY_SUPPORT
dev->features |= NETIF_F_SG;
#endif
- dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER;
+ dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
+ NETIF_F_HW_VLAN_RX;
if (vptr->flags & VELOCITY_FLAGS_TX_CSUM)
dev->features |= NETIF_F_IP_CSUM;
dirty = vptr->rd_dirty - unusable;
for (avail = vptr->rd_filled & 0xfffc; avail; avail--) {
dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
- vptr->rd_ring[dirty].rdesc0.owner = OWNED_BY_NIC;
+ vptr->rd_ring[dirty].rdesc0.len |= OWNED_BY_NIC;
}
writew(vptr->rd_filled & 0xfffc, ®s->RBRDU);
struct rx_desc *rd = vptr->rd_ring + dirty;
/* Fine for an all zero Rx desc at init time as well */
- if (rd->rdesc0.owner == OWNED_BY_NIC)
+ if (rd->rdesc0.len & OWNED_BY_NIC)
break;
if (!vptr->rd_info[dirty].skb) {
static int velocity_init_rd_ring(struct velocity_info *vptr)
{
int ret;
+ int mtu = vptr->dev->mtu;
+
+ vptr->rx_buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
vptr->rd_info = kcalloc(vptr->options.numrx,
sizeof(struct velocity_rd_info), GFP_KERNEL);
if (!vptr->rd_info[rd_curr].skb)
break;
- if (rd->rdesc0.owner == OWNED_BY_NIC)
+ if (rd->rdesc0.len & OWNED_BY_NIC)
break;
rmb();
/*
* Don't drop CE or RL error frame although RXOK is off
*/
- if ((rd->rdesc0.RSR & RSR_RXOK) || (!(rd->rdesc0.RSR & RSR_RXOK) && (rd->rdesc0.RSR & (RSR_CE | RSR_RL)))) {
+ if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) {
if (velocity_receive_frame(vptr, rd_curr) < 0)
stats->rx_dropped++;
} else {
stats->rx_dropped++;
}
- rd->inten = 1;
+ rd->size |= RX_INTEN;
vptr->dev->last_rx = jiffies;
struct net_device_stats *stats = &vptr->stats;
struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]);
struct rx_desc *rd = &(vptr->rd_ring[idx]);
- int pkt_len = rd->rdesc0.len;
+ int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
struct sk_buff *skb;
if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) {
skb_put(skb, pkt_len - 4);
skb->protocol = eth_type_trans(skb, vptr->dev);
+ if (vptr->vlgrp && (rd->rdesc0.RSR & RSR_DETAG)) {
+ vlan_hwaccel_rx(skb, vptr->vlgrp,
+ swab16(le16_to_cpu(rd->rdesc1.PQTAG)));
+ } else
+ netif_rx(skb);
+
stats->rx_bytes += pkt_len;
- netif_rx(skb);
return 0;
}
*/
*((u32 *) & (rd->rdesc0)) = 0;
- rd->len = cpu_to_le32(vptr->rx_buf_sz);
- rd->inten = 1;
+ rd->size = cpu_to_le16(vptr->rx_buf_sz) | RX_INTEN;
rd->pa_low = cpu_to_le32(rd_info->skb_dma);
rd->pa_high = 0;
return 0;
td = &(vptr->td_rings[qnum][idx]);
tdinfo = &(vptr->td_infos[qnum][idx]);
- if (td->tdesc0.owner == OWNED_BY_NIC)
+ if (td->tdesc0.len & OWNED_BY_NIC)
break;
if ((works++ > 15))
for (i = 0; i < tdinfo->nskb_dma; i++) {
#ifdef VELOCITY_ZERO_COPY_SUPPORT
- pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], td->tdesc1.len, PCI_DMA_TODEVICE);
+ pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], le16_to_cpu(td->tdesc1.len), PCI_DMA_TODEVICE);
#else
pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], skb->len, PCI_DMA_TODEVICE);
#endif
struct velocity_info *vptr = netdev_priv(dev);
int ret;
- vptr->rx_buf_sz = (dev->mtu <= 1504 ? PKT_BUF_SZ : dev->mtu + 32);
-
ret = velocity_init_rings(vptr);
if (ret < 0)
goto out;
velocity_free_rd_ring(vptr);
dev->mtu = new_mtu;
- if (new_mtu > 8192)
- vptr->rx_buf_sz = 9 * 1024;
- else if (new_mtu > 4096)
- vptr->rx_buf_sz = 8192;
- else
- vptr->rx_buf_sz = 4 * 1024;
ret = velocity_init_rd_ring(vptr);
if (ret < 0)
struct velocity_td_info *tdinfo;
unsigned long flags;
int index;
-
int pktlen = skb->len;
+ __le16 len = cpu_to_le16(pktlen);
#ifdef VELOCITY_ZERO_COPY_SUPPORT
if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
td_ptr = &(vptr->td_rings[qnum][index]);
tdinfo = &(vptr->td_infos[qnum][index]);
- td_ptr->tdesc1.TCPLS = TCPLS_NORMAL;
td_ptr->tdesc1.TCR = TCR0_TIC;
- td_ptr->td_buf[0].queue = 0;
+ td_ptr->td_buf[0].size &= ~TD_QUEUE;
/*
* Pad short frames.
if (pktlen < ETH_ZLEN) {
/* Cannot occur until ZC support */
pktlen = ETH_ZLEN;
+ len = cpu_to_le16(ETH_ZLEN);
skb_copy_from_linear_data(skb, tdinfo->buf, skb->len);
memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len);
tdinfo->skb = skb;
tdinfo->skb_dma[0] = tdinfo->buf_dma;
- td_ptr->tdesc0.pktsize = pktlen;
+ td_ptr->tdesc0.len = len;
td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
td_ptr->td_buf[0].pa_high = 0;
- td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize;
+ td_ptr->td_buf[0].size = len; /* queue is 0 anyway */
tdinfo->nskb_dma = 1;
- td_ptr->tdesc1.CMDZ = 2;
} else
#ifdef VELOCITY_ZERO_COPY_SUPPORT
if (skb_shinfo(skb)->nr_frags > 0) {
if (nfrags > 6) {
skb_copy_from_linear_data(skb, tdinfo->buf, skb->len);
tdinfo->skb_dma[0] = tdinfo->buf_dma;
- td_ptr->tdesc0.pktsize =
+ td_ptr->tdesc0.len = len;
td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
td_ptr->td_buf[0].pa_high = 0;
- td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize;
+ td_ptr->td_buf[0].size = len; /* queue is 0 anyway */
tdinfo->nskb_dma = 1;
- td_ptr->tdesc1.CMDZ = 2;
} else {
int i = 0;
tdinfo->nskb_dma = 0;
- tdinfo->skb_dma[i] = pci_map_single(vptr->pdev, skb->data, skb->len - skb->data_len, PCI_DMA_TODEVICE);
+ tdinfo->skb_dma[i] = pci_map_single(vptr->pdev, skb->data,
+ skb_headlen(skb), PCI_DMA_TODEVICE);
- td_ptr->tdesc0.pktsize = pktlen;
+ td_ptr->tdesc0.len = len;
/* FIXME: support 48bit DMA later */
td_ptr->td_buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma);
td_ptr->td_buf[i].pa_high = 0;
- td_ptr->td_buf[i].bufsize = skb->len->skb->data_len;
+ td_ptr->td_buf[i].size = cpu_to_le16(skb_headlen(skb));
for (i = 0; i < nfrags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- void *addr = ((void *) page_address(frag->page + frag->page_offset));
+ void *addr = (void *)page_address(frag->page) + frag->page_offset;
tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE);
td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
td_ptr->td_buf[i + 1].pa_high = 0;
- td_ptr->td_buf[i + 1].bufsize = frag->size;
+ td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size);
}
tdinfo->nskb_dma = i - 1;
- td_ptr->tdesc1.CMDZ = i;
}
} else
*/
tdinfo->skb = skb;
tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
- td_ptr->tdesc0.pktsize = pktlen;
+ td_ptr->tdesc0.len = len;
td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
td_ptr->td_buf[0].pa_high = 0;
- td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize;
+ td_ptr->td_buf[0].size = len;
tdinfo->nskb_dma = 1;
- td_ptr->tdesc1.CMDZ = 2;
}
+ td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
if (vptr->vlgrp && vlan_tx_tag_present(skb)) {
- td_ptr->tdesc1.pqinf.VID = vlan_tx_tag_get(skb);
- td_ptr->tdesc1.pqinf.priority = 0;
- td_ptr->tdesc1.pqinf.CFI = 0;
+ td_ptr->tdesc1.vlan = cpu_to_le16(vlan_tx_tag_get(skb));
td_ptr->tdesc1.TCR |= TCR0_VETAG;
}
if (prev < 0)
prev = vptr->options.numtx - 1;
- td_ptr->tdesc0.owner = OWNED_BY_NIC;
+ td_ptr->tdesc0.len |= OWNED_BY_NIC;
vptr->td_used[qnum]++;
vptr->td_curr[qnum] = (index + 1) % vptr->options.numtx;
netif_stop_queue(dev);
td_ptr = &(vptr->td_rings[qnum][prev]);
- td_ptr->td_buf[0].queue = 1;
+ td_ptr->td_buf[0].size |= TD_QUEUE;
mac_tx_queue_wake(vptr->mac_regs, qnum);
}
dev->trans_start = jiffies;
velocity_save_context(vptr, &vptr->context);
velocity_shutdown(vptr);
velocity_set_wol(vptr);
- pci_enable_wake(pdev, 3, 1);
+ pci_enable_wake(pdev, PCI_D3hot, 1);
pci_set_power_state(pdev, PCI_D3hot);
} else {
velocity_save_context(vptr, &vptr->context);
static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr)
{
struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
+ struct net_device *dev = ifa->ifa_dev->dev;
+ struct velocity_info *vptr;
+ unsigned long flags;
- if (ifa) {
- struct net_device *dev = ifa->ifa_dev->dev;
- struct velocity_info *vptr;
- unsigned long flags;
+ if (dev_net(dev) != &init_net)
+ return NOTIFY_DONE;
- spin_lock_irqsave(&velocity_dev_list_lock, flags);
- list_for_each_entry(vptr, &velocity_dev_list, list) {
- if (vptr->dev == dev) {
- velocity_get_ip(vptr);
- break;
- }
+ spin_lock_irqsave(&velocity_dev_list_lock, flags);
+ list_for_each_entry(vptr, &velocity_dev_list, list) {
+ if (vptr->dev == dev) {
+ velocity_get_ip(vptr);
+ break;
}
- spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
}
+ spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
+
return NOTIFY_DONE;
}