2 * Copyright (C) 2005 - 2008 ServerEngines
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@serverengines.com
14 * 209 N. Fair Oaks Ave
17 #include <linux/if_vlan.h>
18 #include <linux/inet_lro.h>
22 /* number of bytes of RX frame that are copied to skb->data */
25 #define NETIF_RX(skb) netif_receive_skb(skb)
26 #define VLAN_ACCEL_RX(skb, pnob, vt) \
27 vlan_hwaccel_rx(skb, pnob->vlan_grp, vt)
30 This function notifies BladeEngine of the number of completion
31 entries processed from the specified completion queue by writing
32 the number of popped entries to the door bell.
34 pnob - Pointer to the NetObject structure
35 n - Number of completion entries processed
36 cq_id - Queue ID of the completion queue for which notification
38 re_arm - 1 - rearm the completion ring to generate an event.
39 - 0 - dont rearm the completion ring to generate an event
41 void be_notify_cmpl(struct be_net_object *pnob, int n, int cq_id, int re_arm)
43 struct CQ_DB_AMAP cqdb;
46 AMAP_SET_BITS_PTR(CQ_DB, qid, &cqdb, cq_id);
47 AMAP_SET_BITS_PTR(CQ_DB, rearm, &cqdb, re_arm);
48 AMAP_SET_BITS_PTR(CQ_DB, num_popped, &cqdb, n);
49 PD_WRITE(&pnob->fn_obj, cq_db, cqdb.dw[0]);
53 * adds additional receive frags indicated by BE starting from given
54 * frag index (fi) to specified skb's frag list
57 add_skb_frags(struct be_net_object *pnob, struct sk_buff *skb,
60 struct be_adapter *adapter = pnob->adapter;
62 struct be_rx_page_info *rx_page_info;
63 u32 frag_sz = pnob->rx_buf_size;
65 sk_frag_idx = skb_shinfo(skb)->nr_frags;
67 index_inc(&fi, pnob->rx_q_len);
69 rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi];
70 pnob->rx_ctxt[fi] = NULL;
71 if ((rx_page_info->page_offset) ||
72 (pnob->rx_pg_shared == false)) {
73 pci_unmap_page(adapter->pdev,
74 pci_unmap_addr(rx_page_info, bus),
75 frag_sz, PCI_DMA_FROMDEVICE);
78 n = min(nresid, frag_sz);
79 skb_shinfo(skb)->frags[sk_frag_idx].page = rx_page_info->page;
80 skb_shinfo(skb)->frags[sk_frag_idx].page_offset
81 = rx_page_info->page_offset;
82 skb_shinfo(skb)->frags[sk_frag_idx].size = n;
87 skb_shinfo(skb)->nr_frags++;
90 memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
91 atomic_dec(&pnob->rx_q_posted);
96 * This function processes incoming nic packets over various Rx queues.
97 * This function takes the adapter, the current Rx status descriptor
98 * entry and the Rx completion queue ID as argument.
100 static inline int process_nic_rx_completion(struct be_net_object *pnob,
101 struct ETH_RX_COMPL_AMAP *rxcp)
103 struct be_adapter *adapter = pnob->adapter;
105 int udpcksm, tcpcksm;
108 u32 frag_sz = pnob->rx_buf_size;
110 struct be_rx_page_info *rx_page_info;
111 u32 numfrags, vtp, vtm, vlan_tag, pktsize;
113 fi = AMAP_GET_BITS_PTR(ETH_RX_COMPL, fragndx, rxcp);
114 BUG_ON(fi >= (int)pnob->rx_q_len);
117 rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi];
118 BUG_ON(!rx_page_info->page);
119 pnob->rx_ctxt[fi] = NULL;
122 * If one page is used per fragment or if this is the second half of
123 * of the page, unmap the page here
125 if ((rx_page_info->page_offset) || (pnob->rx_pg_shared == false)) {
126 pci_unmap_page(adapter->pdev,
127 pci_unmap_addr(rx_page_info, bus), frag_sz,
131 atomic_dec(&pnob->rx_q_posted);
132 udpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, udpcksm, rxcp);
133 tcpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, tcpcksm, rxcp);
134 pktsize = AMAP_GET_BITS_PTR(ETH_RX_COMPL, pktsize, rxcp);
136 * get rid of RX flush completions first.
138 if ((tcpcksm) && (udpcksm) && (pktsize == 32)) {
139 put_page(rx_page_info->page);
140 memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
143 skb = netdev_alloc_skb(pnob->netdev, BE_HDR_LEN + NET_IP_ALIGN);
145 dev_info(&pnob->netdev->dev, "alloc_skb() failed\n");
146 put_page(rx_page_info->page);
147 memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
150 skb_reserve(skb, NET_IP_ALIGN);
152 skb->dev = pnob->netdev;
154 n = min(pktsize, frag_sz);
156 va = page_address(rx_page_info->page) + rx_page_info->page_offset;
161 if (n <= BE_HDR_LEN) {
162 memcpy(skb->data, va, n);
163 put_page(rx_page_info->page);
168 /* Setup the SKB with page buffer information */
169 skb_shinfo(skb)->frags[0].page = rx_page_info->page;
170 skb_shinfo(skb)->nr_frags++;
172 /* Copy the header into the skb_data */
173 memcpy(skb->data, va, BE_HDR_LEN);
174 skb_shinfo(skb)->frags[0].page_offset =
175 rx_page_info->page_offset + BE_HDR_LEN;
176 skb_shinfo(skb)->frags[0].size = n - BE_HDR_LEN;
177 skb->data_len -= BE_HDR_LEN;
178 skb->tail += BE_HDR_LEN;
180 memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
181 nresid = pktsize - n;
183 skb->protocol = eth_type_trans(skb, pnob->netdev);
185 if ((tcpcksm || udpcksm) && adapter->rx_csum)
186 skb->ip_summed = CHECKSUM_UNNECESSARY;
188 skb->ip_summed = CHECKSUM_NONE;
190 * if we have more bytes left, the frame has been
191 * given to us in multiple fragments. This happens
192 * with Jumbo frames. Add the remaining fragments to
193 * skb->frags[] array.
196 add_skb_frags(pnob, skb, nresid, fi);
198 /* update the the true size of the skb. */
199 skb->truesize = skb->len + sizeof(struct sk_buff);
202 * If a 802.3 frame or 802.2 LLC frame
203 * (i.e) contains length field in MAC Hdr
204 * and frame len is greater than 64 bytes
206 if (((skb->protocol == ntohs(ETH_P_802_2)) ||
207 (skb->protocol == ntohs(ETH_P_802_3)))
208 && (pktsize > BE_HDR_LEN)) {
210 * If the length given in Mac Hdr is less than frame size
211 * Erraneous frame, Drop it
213 if ((ntohs(*(u16 *) (va + 12)) + ETH_HLEN) < pktsize) {
214 /* Increment Non Ether type II frames dropped */
215 adapter->be_stat.bes_802_3_dropped_frames++;
221 * else if the length given in Mac Hdr is greater than
222 * frame size, should not be seeing this sort of frames
223 * dump the pkt and pass to stack
225 else if ((ntohs(*(u16 *) (va + 12)) + ETH_HLEN) > pktsize) {
226 /* Increment Non Ether type II frames malformed */
227 adapter->be_stat.bes_802_3_malformed_frames++;
231 vtp = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtp, rxcp);
232 vtm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtm, rxcp);
234 /* Vlan tag present in pkt and BE found
235 * that the tag matched an entry in VLAN table
237 if (!pnob->vlan_grp || pnob->num_vlans == 0) {
238 /* But we have no VLANs configured.
239 * This should never happen. Drop the packet.
241 dev_info(&pnob->netdev->dev,
242 "BladeEngine: Unexpected vlan tagged packet\n");
246 /* pass the VLAN packet to stack */
247 vlan_tag = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vlan_tag, rxcp);
248 VLAN_ACCEL_RX(skb, pnob, be16_to_cpu(vlan_tag));
256 /* free all frags associated with the current rxcp */
257 numfrags = AMAP_GET_BITS_PTR(ETH_RX_COMPL, numfrags, rxcp);
258 while (numfrags-- > 1) {
259 index_inc(&fi, pnob->rx_q_len);
261 rx_page_info = (struct be_rx_page_info *)
263 pnob->rx_ctxt[fi] = (void *)NULL;
264 if (rx_page_info->page_offset || !pnob->rx_pg_shared) {
265 pci_unmap_page(adapter->pdev,
266 pci_unmap_addr(rx_page_info, bus),
267 frag_sz, PCI_DMA_FROMDEVICE);
270 put_page(rx_page_info->page);
271 memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
272 atomic_dec(&pnob->rx_q_posted);
277 static void process_nic_rx_completion_lro(struct be_net_object *pnob,
278 struct ETH_RX_COMPL_AMAP *rxcp)
280 struct be_adapter *adapter = pnob->adapter;
281 struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME];
282 unsigned int udpcksm, tcpcksm;
283 u32 numfrags, vlanf, vtm, vlan_tag, nresid;
285 unsigned int fi, idx, n;
286 struct be_rx_page_info *rx_page_info;
287 u32 frag_sz = pnob->rx_buf_size, pktsize;
288 bool rx_coal = (adapter->max_rx_coal <= 1) ? 0 : 1;
292 if (AMAP_GET_BITS_PTR(ETH_RX_COMPL, ipsec, rxcp)) {
293 /* Drop the pkt and move to the next completion. */
294 adapter->be_stat.bes_rx_misc_pkts++;
297 err = AMAP_GET_BITS_PTR(ETH_RX_COMPL, err, rxcp);
298 if (err || !rx_coal) {
299 /* We won't coalesce Rx pkts if the err bit set.
300 * take the path of normal completion processing */
301 process_nic_rx_completion(pnob, rxcp);
305 fi = AMAP_GET_BITS_PTR(ETH_RX_COMPL, fragndx, rxcp);
306 BUG_ON(fi >= (int)pnob->rx_q_len);
308 rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi];
309 BUG_ON(!rx_page_info->page);
310 pnob->rx_ctxt[fi] = (void *)NULL;
311 /* If one page is used per fragment or if this is the
312 * second half of the page, unmap the page here
314 if (rx_page_info->page_offset || !pnob->rx_pg_shared) {
315 pci_unmap_page(adapter->pdev,
316 pci_unmap_addr(rx_page_info, bus),
317 frag_sz, PCI_DMA_FROMDEVICE);
320 numfrags = AMAP_GET_BITS_PTR(ETH_RX_COMPL, numfrags, rxcp);
321 udpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, udpcksm, rxcp);
322 tcpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, tcpcksm, rxcp);
323 vlan_tag = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vlan_tag, rxcp);
324 vlant = be16_to_cpu(vlan_tag);
325 vlanf = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtp, rxcp);
326 vtm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtm, rxcp);
327 pktsize = AMAP_GET_BITS_PTR(ETH_RX_COMPL, pktsize, rxcp);
329 atomic_dec(&pnob->rx_q_posted);
331 if (tcpcksm && udpcksm && pktsize == 32) {
332 /* flush completion entries */
333 put_page(rx_page_info->page);
334 memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
337 /* Only one of udpcksum and tcpcksum can be set */
338 BUG_ON(udpcksm && tcpcksm);
340 /* jumbo frames could come in multiple fragments */
341 BUG_ON(numfrags != ((pktsize + (frag_sz - 1)) / frag_sz));
342 n = min(pktsize, frag_sz);
343 nresid = pktsize - n; /* will be useful for jumbo pkts */
346 va = page_address(rx_page_info->page) + rx_page_info->page_offset;
348 rx_frags[idx].page = rx_page_info->page;
349 rx_frags[idx].page_offset = (rx_page_info->page_offset);
350 rx_frags[idx].size = n;
351 memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
353 /* If we got multiple fragments, we have more data. */
356 index_inc(&fi, pnob->rx_q_len);
358 rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi];
359 pnob->rx_ctxt[fi] = (void *)NULL;
360 if (rx_page_info->page_offset || !pnob->rx_pg_shared) {
361 pci_unmap_page(adapter->pdev,
362 pci_unmap_addr(rx_page_info, bus),
363 frag_sz, PCI_DMA_FROMDEVICE);
366 n = min(nresid, frag_sz);
367 rx_frags[idx].page = rx_page_info->page;
368 rx_frags[idx].page_offset = (rx_page_info->page_offset);
369 rx_frags[idx].size = n;
372 memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
373 atomic_dec(&pnob->rx_q_posted);
376 if (likely(!(vlanf && vtm))) {
377 lro_receive_frags(&pnob->lro_mgr, rx_frags,
379 (void *)(unsigned long)csum, csum);
381 /* Vlan tag present in pkt and BE found
382 * that the tag matched an entry in VLAN table
384 if (unlikely(!pnob->vlan_grp || pnob->num_vlans == 0)) {
385 /* But we have no VLANs configured.
386 * This should never happen. Drop the packet.
388 dev_info(&pnob->netdev->dev,
389 "BladeEngine: Unexpected vlan tagged packet\n");
392 /* pass the VLAN packet to stack */
393 lro_vlan_hwaccel_receive_frags(&pnob->lro_mgr,
394 rx_frags, pktsize, pktsize,
395 pnob->vlan_grp, vlant,
396 (void *)(unsigned long)csum,
400 adapter->be_stat.bes_rx_coal++;
403 struct ETH_RX_COMPL_AMAP *be_get_rx_cmpl(struct be_net_object *pnob)
405 struct ETH_RX_COMPL_AMAP *rxcp = &pnob->rx_cq[pnob->rx_cq_tl];
408 valid = AMAP_GET_BITS_PTR(ETH_RX_COMPL, valid, rxcp);
412 ct = AMAP_GET_BITS_PTR(ETH_RX_COMPL, ct, rxcp);
414 /* Invalid chute #. treat as error */
415 AMAP_SET_BITS_PTR(ETH_RX_COMPL, err, rxcp, 1);
418 be_adv_rxcq_tl(pnob);
419 AMAP_SET_BITS_PTR(ETH_RX_COMPL, valid, rxcp, 0);
423 static void update_rx_rate(struct be_adapter *adapter)
425 /* update the rate once in two seconds */
426 if ((jiffies - adapter->eth_rx_jiffies) > 2 * (HZ)) {
428 r = adapter->eth_rx_bytes /
429 ((jiffies - adapter->eth_rx_jiffies) / (HZ));
430 r = (r / 1000000); /* MB/Sec */
433 adapter->be_stat.bes_eth_rx_rate = (r * 8);
434 adapter->eth_rx_jiffies = jiffies;
435 adapter->eth_rx_bytes = 0;
439 static int process_rx_completions(struct be_net_object *pnob, int max_work)
441 struct be_adapter *adapter = pnob->adapter;
442 struct ETH_RX_COMPL_AMAP *rxcp;
444 unsigned int pktsize;
446 while (max_work && (rxcp = be_get_rx_cmpl(pnob))) {
448 pktsize = AMAP_GET_BITS_PTR(ETH_RX_COMPL, pktsize, rxcp);
449 process_nic_rx_completion_lro(pnob, rxcp);
450 adapter->eth_rx_bytes += pktsize;
451 update_rx_rate(adapter);
454 adapter->be_stat.bes_rx_compl++;
456 if (likely(adapter->max_rx_coal > 1)) {
457 adapter->be_stat.bes_rx_flush++;
458 lro_flush_all(&pnob->lro_mgr);
461 /* Refill the queue */
462 if (atomic_read(&pnob->rx_q_posted) < 900)
463 be_post_eth_rx_buffs(pnob);
468 static struct ETH_TX_COMPL_AMAP *be_get_tx_cmpl(struct be_net_object *pnob)
470 struct ETH_TX_COMPL_AMAP *txcp = &pnob->tx_cq[pnob->tx_cq_tl];
473 valid = AMAP_GET_BITS_PTR(ETH_TX_COMPL, valid, txcp);
477 AMAP_SET_BITS_PTR(ETH_TX_COMPL, valid, txcp, 0);
478 be_adv_txcq_tl(pnob);
483 void process_one_tx_compl(struct be_net_object *pnob, u32 end_idx)
485 struct be_adapter *adapter = pnob->adapter;
486 int cur_index, tx_wrbs_completed = 0;
488 u64 busaddr, pa, pa_lo, pa_hi;
489 struct ETH_WRB_AMAP *wrb;
490 u32 frag_len, last_index, j;
492 last_index = tx_compl_lastwrb_idx_get(pnob);
493 BUG_ON(last_index != end_idx);
494 pnob->tx_ctxt[pnob->tx_q_tl] = NULL;
496 cur_index = pnob->tx_q_tl;
497 wrb = &pnob->tx_q[cur_index];
498 pa_hi = AMAP_GET_BITS_PTR(ETH_WRB, frag_pa_hi, wrb);
499 pa_lo = AMAP_GET_BITS_PTR(ETH_WRB, frag_pa_lo, wrb);
500 frag_len = AMAP_GET_BITS_PTR(ETH_WRB, frag_len, wrb);
501 busaddr = (pa_hi << 32) | pa_lo;
503 pa = le64_to_cpu(busaddr);
504 pci_unmap_single(adapter->pdev, pa,
505 frag_len, PCI_DMA_TODEVICE);
507 if (cur_index == last_index) {
508 skb = (struct sk_buff *)pnob->tx_ctxt[cur_index];
510 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
511 struct skb_frag_struct *frag;
512 frag = &skb_shinfo(skb)->frags[j];
513 pci_unmap_page(adapter->pdev,
514 (ulong) frag->page, frag->size,
518 pnob->tx_ctxt[cur_index] = NULL;
520 BUG_ON(pnob->tx_ctxt[cur_index]);
524 } while (cur_index != last_index);
525 atomic_sub(tx_wrbs_completed, &pnob->tx_q_used);
528 /* there is no need to take an SMP lock here since currently
529 * we have only one instance of the tasklet that does completion
532 static void process_nic_tx_completions(struct be_net_object *pnob)
534 struct be_adapter *adapter = pnob->adapter;
535 struct ETH_TX_COMPL_AMAP *txcp;
536 struct net_device *netdev = pnob->netdev;
537 u32 end_idx, num_processed = 0;
539 adapter->be_stat.bes_tx_events++;
541 while ((txcp = be_get_tx_cmpl(pnob))) {
542 end_idx = AMAP_GET_BITS_PTR(ETH_TX_COMPL, wrb_index, txcp);
543 process_one_tx_compl(pnob, end_idx);
545 adapter->be_stat.bes_tx_compl++;
547 be_notify_cmpl(pnob, num_processed, pnob->tx_cq_id, 1);
549 * We got Tx completions and have usable WRBs.
550 * If the netdev's queue has been stopped
551 * because we had run out of WRBs, wake it now.
553 spin_lock(&adapter->txq_lock);
554 if (netif_queue_stopped(netdev)
555 && atomic_read(&pnob->tx_q_used) < pnob->tx_q_len / 2) {
556 netif_wake_queue(netdev);
558 spin_unlock(&adapter->txq_lock);
561 static u32 post_rx_buffs(struct be_net_object *pnob, struct list_head *rxbl)
564 struct ETH_RX_D_AMAP *rxd = NULL;
565 struct be_recv_buffer *rxbp;
567 struct RQ_DB_AMAP rqdb;
569 rx_ctxp = pnob->rx_ctxt;
571 while (!list_empty(rxbl) &&
572 (rx_ctxp[pnob->rx_q_hd] == NULL) && nposted < 255) {
574 rxbp = list_first_entry(rxbl, struct be_recv_buffer, rxb_list);
575 list_del(&rxbp->rxb_list);
576 rxd = pnob->rx_q + pnob->rx_q_hd;
577 AMAP_SET_BITS_PTR(ETH_RX_D, fragpa_lo, rxd, rxbp->rxb_pa_lo);
578 AMAP_SET_BITS_PTR(ETH_RX_D, fragpa_hi, rxd, rxbp->rxb_pa_hi);
580 rx_ctxp[pnob->rx_q_hd] = rxbp->rxb_ctxt;
586 /* Now press the door bell to notify BladeEngine. */
588 AMAP_SET_BITS_PTR(RQ_DB, numPosted, &rqdb, nposted);
589 AMAP_SET_BITS_PTR(RQ_DB, rq, &rqdb, pnob->rx_q_id);
590 PD_WRITE(&pnob->fn_obj, erx_rq_db, rqdb.dw[0]);
592 atomic_add(nposted, &pnob->rx_q_posted);
596 void be_post_eth_rx_buffs(struct be_net_object *pnob)
598 struct be_adapter *adapter = pnob->adapter;
600 u64 busaddr = 0, tmp_pa;
603 struct be_recv_buffer *rxbp;
604 struct list_head rxbl;
605 struct be_rx_page_info *rx_page_info;
606 struct page *page = NULL;
608 gfp_t alloc_flags = GFP_ATOMIC;
612 max_bufs = 64; /* should be even # <= 255. */
614 frag_size = pnob->rx_buf_size;
615 page_order = get_order(frag_size);
617 if (frag_size == 8192)
618 alloc_flags |= (gfp_t) __GFP_COMP;
620 * Form a linked list of RECV_BUFFFER structure to be be posted.
621 * We will post even number of buffer so that pages can be
624 INIT_LIST_HEAD(&rxbl);
626 for (num_bufs = 0; num_bufs < max_bufs &&
627 !pnob->rx_page_info[pnob->rx_pg_info_hd].page; ++num_bufs) {
629 rxbp = &pnob->eth_rx_bufs[num_bufs];
630 pg_hd = pnob->rx_pg_info_hd;
631 rx_page_info = &pnob->rx_page_info[pg_hd];
634 page = alloc_pages(alloc_flags, page_order);
635 if (unlikely(page == NULL)) {
636 adapter->be_stat.bes_ethrx_post_fail++;
637 pnob->rxbuf_post_fail++;
640 pnob->rxbuf_post_fail = 0;
641 busaddr = pci_map_page(adapter->pdev, page, 0,
642 frag_size, PCI_DMA_FROMDEVICE);
643 rx_page_info->page_offset = 0;
644 rx_page_info->page = page;
646 * If we are sharing a page among two skbs,
647 * alloc a new one on the next iteration
649 if (pnob->rx_pg_shared == false)
653 rx_page_info->page_offset += frag_size;
654 rx_page_info->page = page;
656 * We are finished with the alloced page,
657 * Alloc a new one on the next iteration
661 rxbp->rxb_ctxt = (void *)rx_page_info;
662 index_inc(&pnob->rx_pg_info_hd, pnob->rx_q_len);
664 pci_unmap_addr_set(rx_page_info, bus, busaddr);
665 tmp_pa = busaddr + rx_page_info->page_offset;
666 rxbp->rxb_pa_lo = (tmp_pa & 0xFFFFFFFF);
667 rxbp->rxb_pa_hi = (tmp_pa >> 32);
668 rxbp->rxb_len = frag_size;
669 list_add_tail(&rxbp->rxb_list, &rxbl);
672 r = post_rx_buffs(pnob, &rxbl);
673 BUG_ON(r != num_bufs);
678 * Interrupt service for network function. We just schedule the
679 * tasklet which does all completion processing.
681 irqreturn_t be_int(int irq, void *dev)
683 struct net_device *netdev = dev;
684 struct be_net_object *pnob = netdev_priv(netdev);
685 struct be_adapter *adapter = pnob->adapter;
688 isr = CSR_READ(&pnob->fn_obj, cev.isr1);
692 spin_lock(&adapter->int_lock);
694 spin_unlock(&adapter->int_lock);
696 adapter->be_stat.bes_ints++;
698 tasklet_schedule(&adapter->sts_handler);
703 * Poll function called by NAPI with a work budget.
704 * We process as many UC. BC and MC receive completions
705 * as the budget allows and return the actual number of
706 * RX ststutses processed.
708 int be_poll(struct napi_struct *napi, int budget)
710 struct be_net_object *pnob =
711 container_of(napi, struct be_net_object, napi);
714 pnob->adapter->be_stat.bes_polls++;
715 work_done = process_rx_completions(pnob, budget);
716 BUG_ON(work_done > budget);
719 if (work_done < budget) {
720 netif_rx_complete(napi);
722 be_notify_cmpl(pnob, work_done, pnob->rx_cq_id, 1);
724 /* More to be consumed; continue with interrupts disabled */
725 be_notify_cmpl(pnob, work_done, pnob->rx_cq_id, 0);
730 static struct EQ_ENTRY_AMAP *get_event(struct be_net_object *pnob)
732 struct EQ_ENTRY_AMAP *eqp = &(pnob->event_q[pnob->event_q_tl]);
733 if (!AMAP_GET_BITS_PTR(EQ_ENTRY, Valid, eqp))
740 * Processes all valid events in the event ring associated with given
741 * NetObject. Also, notifies BE the number of events processed.
743 static inline u32 process_events(struct be_net_object *pnob)
745 struct be_adapter *adapter = pnob->adapter;
746 struct EQ_ENTRY_AMAP *eqp;
747 u32 rid, num_events = 0;
748 struct net_device *netdev = pnob->netdev;
750 while ((eqp = get_event(pnob)) != NULL) {
751 adapter->be_stat.bes_events++;
752 rid = AMAP_GET_BITS_PTR(EQ_ENTRY, ResourceID, eqp);
753 if (rid == pnob->rx_cq_id) {
754 adapter->be_stat.bes_rx_events++;
755 netif_rx_schedule(&pnob->napi);
756 } else if (rid == pnob->tx_cq_id) {
757 process_nic_tx_completions(pnob);
758 } else if (rid == pnob->mcc_cq_id) {
759 be_mcc_process_cq(&pnob->mcc_q_obj, 1);
761 dev_info(&netdev->dev,
762 "Invalid EQ ResourceID %d\n", rid);
764 AMAP_SET_BITS_PTR(EQ_ENTRY, Valid, eqp, 0);
765 AMAP_SET_BITS_PTR(EQ_ENTRY, ResourceID, eqp, 0);
771 static void update_eqd(struct be_adapter *adapter, struct be_net_object *pnob)
774 struct be_eq_object *eq_objectp;
776 /* update once a second */
777 if ((jiffies - adapter->ips_jiffies) > 1 * (HZ)) {
778 /* One second elapsed since last update */
780 r = adapter->be_stat.bes_ints - adapter->be_stat.bes_prev_ints;
781 r = r / ((jiffies - adapter->ips_jiffies) / (HZ));
782 adapter->be_stat.bes_ips = r;
783 adapter->ips_jiffies = jiffies;
784 adapter->be_stat.bes_prev_ints = adapter->be_stat.bes_ints;
785 if (r > IPS_HI_WM && adapter->cur_eqd < adapter->max_eqd)
786 new_eqd = (adapter->cur_eqd + 8);
787 if (r < IPS_LO_WM && adapter->cur_eqd > adapter->min_eqd)
788 new_eqd = (adapter->cur_eqd - 8);
789 if (adapter->enable_aic && new_eqd != -1) {
790 eq_objectp = &pnob->event_q_obj;
791 status = be_eq_modify_delay(&pnob->fn_obj, 1,
792 &eq_objectp, &new_eqd, NULL,
794 if (status == BE_SUCCESS)
795 adapter->cur_eqd = new_eqd;
801 This function notifies BladeEngine of how many events were processed
802 from the event queue by ringing the corresponding door bell and
803 optionally re-arms the event queue.
804 n - number of events processed
805 re_arm - 1 - re-arm the EQ, 0 - do not re-arm the EQ
808 static void be_notify_event(struct be_net_object *pnob, int n, int re_arm)
810 struct CQ_DB_AMAP eqdb;
813 AMAP_SET_BITS_PTR(CQ_DB, qid, &eqdb, pnob->event_q_id);
814 AMAP_SET_BITS_PTR(CQ_DB, rearm, &eqdb, re_arm);
815 AMAP_SET_BITS_PTR(CQ_DB, event, &eqdb, 1);
816 AMAP_SET_BITS_PTR(CQ_DB, num_popped, &eqdb, n);
818 * Under some situations we see an interrupt and no valid
819 * EQ entry. To keep going, we need to ring the DB even if
822 PD_WRITE(&pnob->fn_obj, cq_db, eqdb.dw[0]);
827 * Called from the tasklet scheduled by ISR. All real interrupt processing
830 void be_process_intr(unsigned long context)
832 struct be_adapter *adapter = (struct be_adapter *)context;
833 struct be_net_object *pnob = adapter->net_obj;
840 * we create only one NIC event queue in Linux. Event is
841 * expected only in the first event queue
843 BUG_ON(isr & 0xfffffffe);
845 return; /* not our interrupt */
846 n = process_events(pnob);
848 * Clear the event bit. adapter->isr is set by
849 * hard interrupt. Prevent race with lock.
851 spin_lock_irqsave(&adapter->int_lock, flags);
853 spin_unlock_irqrestore(&adapter->int_lock, flags);
854 be_notify_event(pnob, n, 1);
856 * If previous allocation attempts had failed and
857 * BE has used up all posted buffers, post RX buffers here
859 if (pnob->rxbuf_post_fail && atomic_read(&pnob->rx_q_posted) == 0)
860 be_post_eth_rx_buffs(pnob);
861 update_eqd(adapter, pnob);