2 * Copyright (C) 2005 - 2008 ServerEngines
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@serverengines.com
14 * 209 N. Fair Oaks Ave
17 #include <linux/if_vlan.h>
18 #include <linux/inet_lro.h>
22 /* number of bytes of RX frame that are copied to skb->data */
25 #define NETIF_RX(skb) netif_receive_skb(skb)
26 #define VLAN_ACCEL_RX(skb, pnob, vt) \
27 vlan_hwaccel_rx(skb, pnob->vlan_grp, vt)
30 This function notifies BladeEngine of the number of completion
31 entries processed from the specified completion queue by writing
32 the number of popped entries to the door bell.
34 pnob - Pointer to the NetObject structure
35 n - Number of completion entries processed
36 cq_id - Queue ID of the completion queue for which notification
38 re_arm - 1 - rearm the completion ring to generate an event.
39 - 0 - dont rearm the completion ring to generate an event
41 void be_notify_cmpl(struct be_net_object *pnob, int n, int cq_id, int re_arm)
43 struct CQ_DB_AMAP cqdb;
46 AMAP_SET_BITS_PTR(CQ_DB, qid, &cqdb, cq_id);
47 AMAP_SET_BITS_PTR(CQ_DB, rearm, &cqdb, re_arm);
48 AMAP_SET_BITS_PTR(CQ_DB, num_popped, &cqdb, n);
49 PD_WRITE(&pnob->fn_obj, cq_db, cqdb.dw[0]);
53 * adds additional receive frags indicated by BE starting from given
54 * frag index (fi) to specified skb's frag list
57 add_skb_frags(struct be_net_object *pnob, struct sk_buff *skb,
60 struct be_adapter *adapter = pnob->adapter;
62 struct be_rx_page_info *rx_page_info;
63 u32 frag_sz = pnob->rx_buf_size;
65 sk_frag_idx = skb_shinfo(skb)->nr_frags;
67 index_inc(&fi, pnob->rx_q_len);
69 rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi];
70 pnob->rx_ctxt[fi] = NULL;
71 if ((rx_page_info->page_offset) ||
72 (pnob->rx_pg_shared == false)) {
73 pci_unmap_page(adapter->pdev,
74 pci_unmap_addr(rx_page_info, bus),
75 frag_sz, PCI_DMA_FROMDEVICE);
78 n = min(nresid, frag_sz);
79 skb_shinfo(skb)->frags[sk_frag_idx].page = rx_page_info->page;
80 skb_shinfo(skb)->frags[sk_frag_idx].page_offset
81 = rx_page_info->page_offset;
82 skb_shinfo(skb)->frags[sk_frag_idx].size = n;
87 skb_shinfo(skb)->nr_frags++;
90 memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
91 atomic_dec(&pnob->rx_q_posted);
96 * This function processes incoming nic packets over various Rx queues.
97 * This function takes the adapter, the current Rx status descriptor
98 * entry and the Rx completion queue ID as argument.
100 static inline int process_nic_rx_completion(struct be_net_object *pnob,
101 struct ETH_RX_COMPL_AMAP *rxcp)
103 struct be_adapter *adapter = pnob->adapter;
105 int udpcksm, tcpcksm;
108 u32 frag_sz = pnob->rx_buf_size;
110 struct be_rx_page_info *rx_page_info;
111 u32 numfrags, vtp, vtm, vlan_tag, pktsize;
113 fi = AMAP_GET_BITS_PTR(ETH_RX_COMPL, fragndx, rxcp);
114 BUG_ON(fi >= (int)pnob->rx_q_len);
117 rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi];
118 BUG_ON(!rx_page_info->page);
119 pnob->rx_ctxt[fi] = NULL;
122 * If one page is used per fragment or if this is the second half of
123 * of the page, unmap the page here
125 if ((rx_page_info->page_offset) || (pnob->rx_pg_shared == false)) {
126 pci_unmap_page(adapter->pdev,
127 pci_unmap_addr(rx_page_info, bus), frag_sz,
131 atomic_dec(&pnob->rx_q_posted);
132 udpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, udpcksm, rxcp);
133 tcpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, tcpcksm, rxcp);
134 pktsize = AMAP_GET_BITS_PTR(ETH_RX_COMPL, pktsize, rxcp);
136 * get rid of RX flush completions first.
138 if ((tcpcksm) && (udpcksm) && (pktsize == 32)) {
139 put_page(rx_page_info->page);
140 memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
143 skb = netdev_alloc_skb(pnob->netdev, BE_HDR_LEN + NET_IP_ALIGN);
145 dev_info(&pnob->netdev->dev, "alloc_skb() failed\n");
146 put_page(rx_page_info->page);
147 memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
150 skb_reserve(skb, NET_IP_ALIGN);
152 skb->dev = pnob->netdev;
154 n = min(pktsize, frag_sz);
156 va = page_address(rx_page_info->page) + rx_page_info->page_offset;
159 skb->len = skb->data_len = n;
160 if (n <= BE_HDR_LEN) {
161 memcpy(skb->data, va, n);
162 put_page(rx_page_info->page);
167 /* Setup the SKB with page buffer information */
168 skb_shinfo(skb)->frags[0].page = rx_page_info->page;
169 skb_shinfo(skb)->nr_frags++;
171 /* Copy the header into the skb_data */
172 memcpy(skb->data, va, BE_HDR_LEN);
173 skb_shinfo(skb)->frags[0].page_offset =
174 rx_page_info->page_offset + BE_HDR_LEN;
175 skb_shinfo(skb)->frags[0].size = n - BE_HDR_LEN;
176 skb->data_len -= BE_HDR_LEN;
177 skb->tail += BE_HDR_LEN;
179 memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
180 nresid = pktsize - n;
182 skb->protocol = eth_type_trans(skb, pnob->netdev);
184 if ((tcpcksm || udpcksm) && adapter->rx_csum)
185 skb->ip_summed = CHECKSUM_UNNECESSARY;
187 skb->ip_summed = CHECKSUM_NONE;
189 * if we have more bytes left, the frame has been
190 * given to us in multiple fragments. This happens
191 * with Jumbo frames. Add the remaining fragments to
192 * skb->frags[] array.
195 add_skb_frags(pnob, skb, nresid, fi);
197 /* update the the true size of the skb. */
198 skb->truesize = skb->len + sizeof(struct sk_buff);
201 * If a 802.3 frame or 802.2 LLC frame
202 * (i.e) contains length field in MAC Hdr
203 * and frame len is greater than 64 bytes
205 if (((skb->protocol == ntohs(ETH_P_802_2)) ||
206 (skb->protocol == ntohs(ETH_P_802_3)))
207 && (pktsize > BE_HDR_LEN)) {
209 * If the length given in Mac Hdr is less than frame size
210 * Erraneous frame, Drop it
212 if ((ntohs(*(u16 *) (va + 12)) + ETH_HLEN) < pktsize) {
213 /* Increment Non Ether type II frames dropped */
214 adapter->be_stat.bes_802_3_dropped_frames++;
220 * else if the length given in Mac Hdr is greater than
221 * frame size, should not be seeing this sort of frames
222 * dump the pkt and pass to stack
224 else if ((ntohs(*(u16 *) (va + 12)) + ETH_HLEN) > pktsize) {
225 /* Increment Non Ether type II frames malformed */
226 adapter->be_stat.bes_802_3_malformed_frames++;
230 vtp = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtp, rxcp);
231 vtm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtm, rxcp);
233 /* Vlan tag present in pkt and BE found
234 * that the tag matched an entry in VLAN table
236 if (!pnob->vlan_grp || pnob->num_vlans == 0) {
237 /* But we have no VLANs configured.
238 * This should never happen. Drop the packet.
240 dev_info(&pnob->netdev->dev,
241 "BladeEngine: Unexpected vlan tagged packet\n");
245 /* pass the VLAN packet to stack */
246 vlan_tag = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vlan_tag, rxcp);
247 VLAN_ACCEL_RX(skb, pnob, be16_to_cpu(vlan_tag));
255 /* free all frags associated with the current rxcp */
256 numfrags = AMAP_GET_BITS_PTR(ETH_RX_COMPL, numfrags, rxcp);
257 while (numfrags-- > 1) {
258 index_inc(&fi, pnob->rx_q_len);
260 rx_page_info = (struct be_rx_page_info *)
262 pnob->rx_ctxt[fi] = (void *)NULL;
263 if (rx_page_info->page_offset || !pnob->rx_pg_shared) {
264 pci_unmap_page(adapter->pdev,
265 pci_unmap_addr(rx_page_info, bus),
266 frag_sz, PCI_DMA_FROMDEVICE);
269 put_page(rx_page_info->page);
270 memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
271 atomic_dec(&pnob->rx_q_posted);
276 static void process_nic_rx_completion_lro(struct be_net_object *pnob,
277 struct ETH_RX_COMPL_AMAP *rxcp)
279 struct be_adapter *adapter = pnob->adapter;
280 struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME];
281 unsigned int udpcksm, tcpcksm;
282 u32 numfrags, vlanf, vtm, vlan_tag, nresid;
284 unsigned int fi, idx, n;
285 struct be_rx_page_info *rx_page_info;
286 u32 frag_sz = pnob->rx_buf_size, pktsize;
287 bool rx_coal = (adapter->max_rx_coal <= 1) ? 0 : 1;
291 if (AMAP_GET_BITS_PTR(ETH_RX_COMPL, ipsec, rxcp)) {
292 /* Drop the pkt and move to the next completion. */
293 adapter->be_stat.bes_rx_misc_pkts++;
296 err = AMAP_GET_BITS_PTR(ETH_RX_COMPL, err, rxcp);
297 if (err || !rx_coal) {
298 /* We won't coalesce Rx pkts if the err bit set.
299 * take the path of normal completion processing */
300 process_nic_rx_completion(pnob, rxcp);
304 fi = AMAP_GET_BITS_PTR(ETH_RX_COMPL, fragndx, rxcp);
305 BUG_ON(fi >= (int)pnob->rx_q_len);
307 rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi];
308 BUG_ON(!rx_page_info->page);
309 pnob->rx_ctxt[fi] = (void *)NULL;
310 /* If one page is used per fragment or if this is the
311 * second half of the page, unmap the page here
313 if (rx_page_info->page_offset || !pnob->rx_pg_shared) {
314 pci_unmap_page(adapter->pdev,
315 pci_unmap_addr(rx_page_info, bus),
316 frag_sz, PCI_DMA_FROMDEVICE);
319 numfrags = AMAP_GET_BITS_PTR(ETH_RX_COMPL, numfrags, rxcp);
320 udpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, udpcksm, rxcp);
321 tcpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, tcpcksm, rxcp);
322 vlan_tag = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vlan_tag, rxcp);
323 vlant = be16_to_cpu(vlan_tag);
324 vlanf = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtp, rxcp);
325 vtm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtm, rxcp);
326 pktsize = AMAP_GET_BITS_PTR(ETH_RX_COMPL, pktsize, rxcp);
328 atomic_dec(&pnob->rx_q_posted);
330 if (tcpcksm && udpcksm && pktsize == 32) {
331 /* flush completion entries */
332 put_page(rx_page_info->page);
333 memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
336 /* Only one of udpcksum and tcpcksum can be set */
337 BUG_ON(udpcksm && tcpcksm);
339 /* jumbo frames could come in multiple fragments */
340 BUG_ON(numfrags != ((pktsize + (frag_sz - 1)) / frag_sz));
341 n = min(pktsize, frag_sz);
342 nresid = pktsize - n; /* will be useful for jumbo pkts */
345 va = page_address(rx_page_info->page) + rx_page_info->page_offset;
347 rx_frags[idx].page = rx_page_info->page;
348 rx_frags[idx].page_offset = (rx_page_info->page_offset);
349 rx_frags[idx].size = n;
350 memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
352 /* If we got multiple fragments, we have more data. */
355 index_inc(&fi, pnob->rx_q_len);
357 rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi];
358 pnob->rx_ctxt[fi] = (void *)NULL;
359 if (rx_page_info->page_offset || !pnob->rx_pg_shared) {
360 pci_unmap_page(adapter->pdev,
361 pci_unmap_addr(rx_page_info, bus),
362 frag_sz, PCI_DMA_FROMDEVICE);
365 n = min(nresid, frag_sz);
366 rx_frags[idx].page = rx_page_info->page;
367 rx_frags[idx].page_offset = (rx_page_info->page_offset);
368 rx_frags[idx].size = n;
371 memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
372 atomic_dec(&pnob->rx_q_posted);
375 if (likely(!(vlanf && vtm))) {
376 lro_receive_frags(&pnob->lro_mgr, rx_frags,
378 (void *)(unsigned long)csum, csum);
380 /* Vlan tag present in pkt and BE found
381 * that the tag matched an entry in VLAN table
383 if (unlikely(!pnob->vlan_grp || pnob->num_vlans == 0)) {
384 /* But we have no VLANs configured.
385 * This should never happen. Drop the packet.
387 dev_info(&pnob->netdev->dev,
388 "BladeEngine: Unexpected vlan tagged packet\n");
391 /* pass the VLAN packet to stack */
392 lro_vlan_hwaccel_receive_frags(&pnob->lro_mgr,
393 rx_frags, pktsize, pktsize,
394 pnob->vlan_grp, vlant,
395 (void *)(unsigned long)csum,
399 adapter->be_stat.bes_rx_coal++;
402 struct ETH_RX_COMPL_AMAP *be_get_rx_cmpl(struct be_net_object *pnob)
404 struct ETH_RX_COMPL_AMAP *rxcp = &pnob->rx_cq[pnob->rx_cq_tl];
407 valid = AMAP_GET_BITS_PTR(ETH_RX_COMPL, valid, rxcp);
411 ct = AMAP_GET_BITS_PTR(ETH_RX_COMPL, ct, rxcp);
413 /* Invalid chute #. treat as error */
414 AMAP_SET_BITS_PTR(ETH_RX_COMPL, err, rxcp, 1);
417 be_adv_rxcq_tl(pnob);
418 AMAP_SET_BITS_PTR(ETH_RX_COMPL, valid, rxcp, 0);
422 static void update_rx_rate(struct be_adapter *adapter)
424 /* update the rate once in two seconds */
425 if ((jiffies - adapter->eth_rx_jiffies) > 2 * (HZ)) {
427 r = adapter->eth_rx_bytes /
428 ((jiffies - adapter->eth_rx_jiffies) / (HZ));
429 r = (r / 1000000); /* MB/Sec */
432 adapter->be_stat.bes_eth_rx_rate = (r * 8);
433 adapter->eth_rx_jiffies = jiffies;
434 adapter->eth_rx_bytes = 0;
438 static int process_rx_completions(struct be_net_object *pnob, int max_work)
440 struct be_adapter *adapter = pnob->adapter;
441 struct ETH_RX_COMPL_AMAP *rxcp;
443 unsigned int pktsize;
445 while (max_work && (rxcp = be_get_rx_cmpl(pnob))) {
447 pktsize = AMAP_GET_BITS_PTR(ETH_RX_COMPL, pktsize, rxcp);
448 process_nic_rx_completion_lro(pnob, rxcp);
449 adapter->eth_rx_bytes += pktsize;
450 update_rx_rate(adapter);
453 adapter->be_stat.bes_rx_compl++;
455 if (likely(adapter->max_rx_coal > 1)) {
456 adapter->be_stat.bes_rx_flush++;
457 lro_flush_all(&pnob->lro_mgr);
460 /* Refill the queue */
461 if (atomic_read(&pnob->rx_q_posted) < 900)
462 be_post_eth_rx_buffs(pnob);
467 static struct ETH_TX_COMPL_AMAP *be_get_tx_cmpl(struct be_net_object *pnob)
469 struct ETH_TX_COMPL_AMAP *txcp = &pnob->tx_cq[pnob->tx_cq_tl];
472 valid = AMAP_GET_BITS_PTR(ETH_TX_COMPL, valid, txcp);
476 AMAP_SET_BITS_PTR(ETH_TX_COMPL, valid, txcp, 0);
477 be_adv_txcq_tl(pnob);
482 void process_one_tx_compl(struct be_net_object *pnob, u32 end_idx)
484 struct be_adapter *adapter = pnob->adapter;
485 int cur_index, tx_wrbs_completed = 0;
487 u64 busaddr, pa, pa_lo, pa_hi;
488 struct ETH_WRB_AMAP *wrb;
489 u32 frag_len, last_index, j;
491 last_index = tx_compl_lastwrb_idx_get(pnob);
492 BUG_ON(last_index != end_idx);
493 pnob->tx_ctxt[pnob->tx_q_tl] = NULL;
495 cur_index = pnob->tx_q_tl;
496 wrb = &pnob->tx_q[cur_index];
497 pa_hi = AMAP_GET_BITS_PTR(ETH_WRB, frag_pa_hi, wrb);
498 pa_lo = AMAP_GET_BITS_PTR(ETH_WRB, frag_pa_lo, wrb);
499 frag_len = AMAP_GET_BITS_PTR(ETH_WRB, frag_len, wrb);
500 busaddr = (pa_hi << 32) | pa_lo;
502 pa = le64_to_cpu(busaddr);
503 pci_unmap_single(adapter->pdev, pa,
504 frag_len, PCI_DMA_TODEVICE);
506 if (cur_index == last_index) {
507 skb = (struct sk_buff *)pnob->tx_ctxt[cur_index];
509 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
510 struct skb_frag_struct *frag;
511 frag = &skb_shinfo(skb)->frags[j];
512 pci_unmap_page(adapter->pdev,
513 (ulong) frag->page, frag->size,
517 pnob->tx_ctxt[cur_index] = NULL;
519 BUG_ON(pnob->tx_ctxt[cur_index]);
523 } while (cur_index != last_index);
524 atomic_sub(tx_wrbs_completed, &pnob->tx_q_used);
527 /* there is no need to take an SMP lock here since currently
528 * we have only one instance of the tasklet that does completion
531 static void process_nic_tx_completions(struct be_net_object *pnob)
533 struct be_adapter *adapter = pnob->adapter;
534 struct ETH_TX_COMPL_AMAP *txcp;
535 struct net_device *netdev = pnob->netdev;
536 u32 end_idx, num_processed = 0;
538 adapter->be_stat.bes_tx_events++;
540 while ((txcp = be_get_tx_cmpl(pnob))) {
541 end_idx = AMAP_GET_BITS_PTR(ETH_TX_COMPL, wrb_index, txcp);
542 process_one_tx_compl(pnob, end_idx);
544 adapter->be_stat.bes_tx_compl++;
546 be_notify_cmpl(pnob, num_processed, pnob->tx_cq_id, 1);
548 * We got Tx completions and have usable WRBs.
549 * If the netdev's queue has been stopped
550 * because we had run out of WRBs, wake it now.
552 spin_lock(&adapter->txq_lock);
553 if (netif_queue_stopped(netdev)
554 && atomic_read(&pnob->tx_q_used) < pnob->tx_q_len / 2) {
555 netif_wake_queue(netdev);
557 spin_unlock(&adapter->txq_lock);
560 static u32 post_rx_buffs(struct be_net_object *pnob, struct list_head *rxbl)
563 struct ETH_RX_D_AMAP *rxd = NULL;
564 struct be_recv_buffer *rxbp;
566 struct RQ_DB_AMAP rqdb;
568 rx_ctxp = pnob->rx_ctxt;
570 while (!list_empty(rxbl) &&
571 (rx_ctxp[pnob->rx_q_hd] == NULL) && nposted < 255) {
573 rxbp = list_first_entry(rxbl, struct be_recv_buffer, rxb_list);
574 list_del(&rxbp->rxb_list);
575 rxd = pnob->rx_q + pnob->rx_q_hd;
576 AMAP_SET_BITS_PTR(ETH_RX_D, fragpa_lo, rxd, rxbp->rxb_pa_lo);
577 AMAP_SET_BITS_PTR(ETH_RX_D, fragpa_hi, rxd, rxbp->rxb_pa_hi);
579 rx_ctxp[pnob->rx_q_hd] = rxbp->rxb_ctxt;
585 /* Now press the door bell to notify BladeEngine. */
587 AMAP_SET_BITS_PTR(RQ_DB, numPosted, &rqdb, nposted);
588 AMAP_SET_BITS_PTR(RQ_DB, rq, &rqdb, pnob->rx_q_id);
589 PD_WRITE(&pnob->fn_obj, erx_rq_db, rqdb.dw[0]);
591 atomic_add(nposted, &pnob->rx_q_posted);
595 void be_post_eth_rx_buffs(struct be_net_object *pnob)
597 struct be_adapter *adapter = pnob->adapter;
599 u64 busaddr = 0, tmp_pa;
602 struct be_recv_buffer *rxbp;
603 struct list_head rxbl;
604 struct be_rx_page_info *rx_page_info;
605 struct page *page = NULL;
607 gfp_t alloc_flags = GFP_ATOMIC;
611 max_bufs = 64; /* should be even # <= 255. */
613 frag_size = pnob->rx_buf_size;
614 page_order = get_order(frag_size);
616 if (frag_size == 8192)
617 alloc_flags |= (gfp_t) __GFP_COMP;
619 * Form a linked list of RECV_BUFFFER structure to be be posted.
620 * We will post even number of buffer so that pages can be
623 INIT_LIST_HEAD(&rxbl);
625 for (num_bufs = 0; num_bufs < max_bufs; ++num_bufs) {
627 rxbp = &pnob->eth_rx_bufs[num_bufs];
628 pg_hd = pnob->rx_pg_info_hd;
629 rx_page_info = &pnob->rx_page_info[pg_hd];
633 * before we allocate a page make sure that we
634 * have space in the RX queue to post the buffer.
635 * We check for two vacant slots since with
636 * 2K frags, we will need two slots.
638 if ((pnob->rx_ctxt[(pnob->rx_q_hd + num_bufs) &
639 (pnob->rx_q_len - 1)] != NULL)
640 || (pnob->rx_ctxt[(pnob->rx_q_hd + num_bufs + 1) %
641 pnob->rx_q_len] != NULL)) {
644 page = alloc_pages(alloc_flags, page_order);
645 if (unlikely(page == NULL)) {
646 adapter->be_stat.bes_ethrx_post_fail++;
647 pnob->rxbuf_post_fail++;
650 pnob->rxbuf_post_fail = 0;
651 busaddr = pci_map_page(adapter->pdev, page, 0,
652 frag_size, PCI_DMA_FROMDEVICE);
653 rx_page_info->page_offset = 0;
654 rx_page_info->page = page;
656 * If we are sharing a page among two skbs,
657 * alloc a new one on the next iteration
659 if (pnob->rx_pg_shared == false)
663 rx_page_info->page_offset += frag_size;
664 rx_page_info->page = page;
666 * We are finished with the alloced page,
667 * Alloc a new one on the next iteration
671 rxbp->rxb_ctxt = (void *)rx_page_info;
672 index_inc(&pnob->rx_pg_info_hd, pnob->rx_q_len);
674 pci_unmap_addr_set(rx_page_info, bus, busaddr);
675 tmp_pa = busaddr + rx_page_info->page_offset;
676 rxbp->rxb_pa_lo = (tmp_pa & 0xFFFFFFFF);
677 rxbp->rxb_pa_hi = (tmp_pa >> 32);
678 rxbp->rxb_len = frag_size;
679 list_add_tail(&rxbp->rxb_list, &rxbl);
682 r = post_rx_buffs(pnob, &rxbl);
683 BUG_ON(r != num_bufs);
688 * Interrupt service for network function. We just schedule the
689 * tasklet which does all completion processing.
691 irqreturn_t be_int(int irq, void *dev)
693 struct net_device *netdev = dev;
694 struct be_net_object *pnob = netdev_priv(netdev);
695 struct be_adapter *adapter = pnob->adapter;
698 isr = CSR_READ(&pnob->fn_obj, cev.isr1);
702 spin_lock(&adapter->int_lock);
704 spin_unlock(&adapter->int_lock);
706 adapter->be_stat.bes_ints++;
708 tasklet_schedule(&adapter->sts_handler);
713 * Poll function called by NAPI with a work budget.
714 * We process as many UC. BC and MC receive completions
715 * as the budget allows and return the actual number of
716 * RX ststutses processed.
718 int be_poll(struct napi_struct *napi, int budget)
720 struct net_device *netdev = napi->dev;
721 struct be_net_object *pnob = netdev_priv(netdev);
722 struct be_adapter *adapter = pnob->adapter;
725 adapter->be_stat.bes_polls++;
726 work_done = process_rx_completions(pnob, budget);
727 BUG_ON(work_done > budget);
730 if (work_done < budget) {
731 netif_rx_complete(napi);
733 be_notify_cmpl(pnob, work_done, pnob->rx_cq_id, 1);
735 /* More to be consumed; continue with interrupts disabled */
736 be_notify_cmpl(pnob, work_done, pnob->rx_cq_id, 0);
741 static struct EQ_ENTRY_AMAP *get_event(struct be_net_object *pnob)
743 struct EQ_ENTRY_AMAP *eqp = &(pnob->event_q[pnob->event_q_tl]);
744 if (!AMAP_GET_BITS_PTR(EQ_ENTRY, Valid, eqp))
751 * Processes all valid events in the event ring associated with given
752 * NetObject. Also, notifies BE the number of events processed.
754 static inline u32 process_events(struct be_net_object *pnob)
756 struct be_adapter *adapter = pnob->adapter;
757 struct EQ_ENTRY_AMAP *eqp;
758 u32 rid, num_events = 0;
759 struct net_device *netdev = pnob->netdev;
761 while ((eqp = get_event(pnob)) != NULL) {
762 adapter->be_stat.bes_events++;
763 rid = AMAP_GET_BITS_PTR(EQ_ENTRY, ResourceID, eqp);
764 if (rid == pnob->rx_cq_id) {
765 adapter->be_stat.bes_rx_events++;
766 netif_rx_schedule(&pnob->napi);
767 } else if (rid == pnob->tx_cq_id) {
768 process_nic_tx_completions(pnob);
769 } else if (rid == pnob->mcc_cq_id) {
770 be_mcc_process_cq(&pnob->mcc_q_obj, 1);
772 dev_info(&netdev->dev,
773 "Invalid EQ ResourceID %d\n", rid);
775 AMAP_SET_BITS_PTR(EQ_ENTRY, Valid, eqp, 0);
776 AMAP_SET_BITS_PTR(EQ_ENTRY, ResourceID, eqp, 0);
782 static void update_eqd(struct be_adapter *adapter, struct be_net_object *pnob)
785 struct be_eq_object *eq_objectp;
787 /* update once a second */
788 if ((jiffies - adapter->ips_jiffies) > 1 * (HZ)) {
789 /* One second elapsed since last update */
791 r = adapter->be_stat.bes_ints - adapter->be_stat.bes_prev_ints;
792 r = r / ((jiffies - adapter->ips_jiffies) / (HZ));
793 adapter->be_stat.bes_ips = r;
794 adapter->ips_jiffies = jiffies;
795 adapter->be_stat.bes_prev_ints = adapter->be_stat.bes_ints;
796 if (r > IPS_HI_WM && adapter->cur_eqd < adapter->max_eqd)
797 new_eqd = (adapter->cur_eqd + 8);
798 if (r < IPS_LO_WM && adapter->cur_eqd > adapter->min_eqd)
799 new_eqd = (adapter->cur_eqd - 8);
800 if (adapter->enable_aic && new_eqd != -1) {
801 eq_objectp = &pnob->event_q_obj;
802 status = be_eq_modify_delay(&pnob->fn_obj, 1,
803 &eq_objectp, &new_eqd, NULL,
805 if (status == BE_SUCCESS)
806 adapter->cur_eqd = new_eqd;
812 This function notifies BladeEngine of how many events were processed
813 from the event queue by ringing the corresponding door bell and
814 optionally re-arms the event queue.
815 n - number of events processed
816 re_arm - 1 - re-arm the EQ, 0 - do not re-arm the EQ
819 static void be_notify_event(struct be_net_object *pnob, int n, int re_arm)
821 struct CQ_DB_AMAP eqdb;
824 AMAP_SET_BITS_PTR(CQ_DB, qid, &eqdb, pnob->event_q_id);
825 AMAP_SET_BITS_PTR(CQ_DB, rearm, &eqdb, re_arm);
826 AMAP_SET_BITS_PTR(CQ_DB, event, &eqdb, 1);
827 AMAP_SET_BITS_PTR(CQ_DB, num_popped, &eqdb, n);
829 * Under some situations we see an interrupt and no valid
830 * EQ entry. To keep going, we need to ring the DB even if
833 PD_WRITE(&pnob->fn_obj, cq_db, eqdb.dw[0]);
838 * Called from the tasklet scheduled by ISR. All real interrupt processing
841 void be_process_intr(unsigned long context)
843 struct be_adapter *adapter = (struct be_adapter *)context;
844 struct be_net_object *pnob = adapter->net_obj;
851 * we create only one NIC event queue in Linux. Event is
852 * expected only in the first event queue
854 BUG_ON(isr & 0xfffffffe);
856 return; /* not our interrupt */
857 n = process_events(pnob);
859 * Clear the event bit. adapter->isr is set by
860 * hard interrupt. Prevent race with lock.
862 spin_lock_irqsave(&adapter->int_lock, flags);
864 spin_unlock_irqrestore(&adapter->int_lock, flags);
865 be_notify_event(pnob, n, 1);
867 * If previous allocation attempts had failed and
868 * BE has used up all posted buffers, post RX buffers here
870 if (pnob->rxbuf_post_fail && atomic_read(&pnob->rx_q_posted) == 0)
871 be_post_eth_rx_buffs(pnob);
872 update_eqd(adapter, pnob);