return max_avail_segs * (p->mtu - 40);
}
+#if 0
+
/*
* t1_sched_max_avail_bytes() tells the scheduler the maximum amount of
* data that can be pushed per port.
t1_sched_update_parms(sge, port, 0, 0);
}
+#endif /* 0 */
+
/*
* get_clock() implements a ns clock (see ktime_get)
for_each_possible_cpu(cpu) {
struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu);
- ss->rx_packets += st->rx_packets;
ss->rx_cso_good += st->rx_cso_good;
- ss->tx_packets += st->tx_packets;
ss->tx_cso += st->tx_cso;
ss->tx_tso += st->tx_tso;
+ ss->tx_need_hdrroom += st->tx_need_hdrroom;
ss->vlan_xtract += st->vlan_xtract;
ss->vlan_insert += st->vlan_insert;
}
__skb_pull(skb, sizeof(*p));
st = per_cpu_ptr(sge->port_stats[p->iff], smp_processor_id());
- st->rx_packets++;
skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev);
skb->dev->last_rx = jiffies;
if (unlikely(adapter->vlan_grp && p->vlan_valid)) {
st->vlan_xtract++;
-#ifdef CONFIG_CHELSIO_T1_NAPI
- vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
- ntohs(p->vlan));
-#else
- vlan_hwaccel_rx(skb, adapter->vlan_grp,
- ntohs(p->vlan));
-#endif
- } else {
-#ifdef CONFIG_CHELSIO_T1_NAPI
+ vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
+ ntohs(p->vlan));
+ } else
netif_receive_skb(skb);
-#else
- netif_rx(skb);
-#endif
- }
}
/*
return (e->GenerationBit == Q->genbit);
}
-#ifdef CONFIG_CHELSIO_T1_NAPI
/*
* A simpler version of process_responses() that handles only pure (i.e.,
* non data-carrying) responses. Such respones are too light-weight to justify
{
struct adapter *adapter = container_of(napi, struct adapter, napi);
struct net_device *dev = adapter->port[0].dev;
- int work_done;
-
- work_done = process_responses(adapter, budget);
+ int work_done = process_responses(adapter, budget);
- if (likely(!responses_pending(adapter))) {
+ if (likely(work_done < budget)) {
netif_rx_complete(dev, napi);
writel(adapter->sge->respQ.cidx,
adapter->regs + A_SG_SLEEPING);
return work_done;
}
-/*
- * NAPI version of the main interrupt handler.
- */
irqreturn_t t1_interrupt(int irq, void *data)
{
struct adapter *adapter = data;
else {
/* no data, no NAPI needed */
writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
- napi_enable(&adapter->napi); /* undo schedule_prep */
+ /* undo schedule_prep */
+ napi_enable(&adapter->napi);
}
}
return IRQ_HANDLED;
return IRQ_RETVAL(handled != 0);
}
-#else
-/*
- * Main interrupt handler, optimized assuming that we took a 'DATA'
- * interrupt.
- *
- * 1. Clear the interrupt
- * 2. Loop while we find valid descriptors and process them; accumulate
- * information that can be processed after the loop
- * 3. Tell the SGE at which index we stopped processing descriptors
- * 4. Bookkeeping; free TX buffers, ring doorbell if there are any
- * outstanding TX buffers waiting, replenish RX buffers, potentially
- * reenable upper layers if they were turned off due to lack of TX
- * resources which are available again.
- * 5. If we took an interrupt, but no valid respQ descriptors was found we
- * let the slow_intr_handler run and do error handling.
- */
-irqreturn_t t1_interrupt(int irq, void *cookie)
-{
- int work_done;
- struct adapter *adapter = cookie;
- struct respQ *Q = &adapter->sge->respQ;
-
- spin_lock(&adapter->async_lock);
-
- writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
-
- if (likely(responses_pending(adapter)))
- work_done = process_responses(adapter, -1);
- else
- work_done = t1_slow_intr_handler(adapter);
-
- /*
- * The unconditional clearing of the PL_CAUSE above may have raced
- * with DMA completion and the corresponding generation of a response
- * to cause us to miss the resulting data interrupt. The next write
- * is also unconditional to recover the missed interrupt and render
- * this race harmless.
- */
- writel(Q->cidx, adapter->regs + A_SG_SLEEPING);
-
- if (!work_done)
- adapter->sge->stats.unhandled_irqs++;
- spin_unlock(&adapter->async_lock);
- return IRQ_RETVAL(work_done != 0);
-}
-#endif
-
/*
* Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
*
{
struct adapter *adapter = dev->priv;
struct sge *sge = adapter->sge;
- struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port], smp_processor_id());
+ struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port],
+ smp_processor_id());
struct cpl_tx_pkt *cpl;
struct sk_buff *orig_skb = skb;
int ret;
if (skb->protocol == htons(ETH_P_CPL5))
goto send;
+ /*
+ * We are using a non-standard hard_header_len.
+ * Allocate more header room in the rare cases it is not big enough.
+ */
+ if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) {
+ skb = skb_realloc_headroom(skb, sizeof(struct cpl_tx_pkt_lso));
+ ++st->tx_need_hdrroom;
+ dev_kfree_skb_any(orig_skb);
+ if (!skb)
+ return NETDEV_TX_OK;
+ }
+
if (skb_shinfo(skb)->gso_size) {
int eth_type;
struct cpl_tx_pkt_lso *hdr;
return NETDEV_TX_OK;
}
- /*
- * We are using a non-standard hard_header_len and some kernel
- * components, such as pktgen, do not handle it right.
- * Complain when this happens but try to fix things up.
- */
- if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) {
- pr_debug("%s: headroom %d header_len %d\n", dev->name,
- skb_headroom(skb), dev->hard_header_len);
-
- if (net_ratelimit())
- printk(KERN_ERR "%s: inadequate headroom in "
- "Tx packet\n", dev->name);
- skb = skb_realloc_headroom(skb, sizeof(*cpl));
- dev_kfree_skb_any(orig_skb);
- if (!skb)
- return NETDEV_TX_OK;
- }
-
if (!(adapter->flags & UDP_CSUM_CAPABLE) &&
skb->ip_summed == CHECKSUM_PARTIAL &&
ip_hdr(skb)->protocol == IPPROTO_UDP) {
cpl->vlan_valid = 0;
send:
- st->tx_packets++;
dev->trans_start = jiffies;
ret = t1_sge_tx(skb, adapter, 0, dev);