]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland...
authorLinus Torvalds <torvalds@woody.linux-foundation.org>
Fri, 8 Feb 2008 23:34:26 +0000 (15:34 -0800)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Fri, 8 Feb 2008 23:34:26 +0000 (15:34 -0800)
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  IB/core: Remove unused struct ib_device.flags member
  IB/core: Add IP checksum offload support
  IPoIB: Add send gather support
  IPoIB: Add high DMA feature flag
  IB/mlx4: Use multiple WQ blocks to post smaller send WQEs
  mlx4_core: Clean up struct mlx4_buf
  mlx4_core: For 64-bit systems, vmap() kernel queue buffers
  IB/mlx4: Consolidate code to get an entry from a struct mlx4_buf

14 files changed:
drivers/infiniband/hw/mlx4/cq.c
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/mlx4/srq.c
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_verbs.c
drivers/net/mlx4/alloc.c
drivers/net/mlx4/mr.c
include/linux/mlx4/device.h
include/linux/mlx4/qp.h
include/rdma/ib_verbs.h

index 7950aa6e8184b1441a5dcfd2e6daad7f21d64e43..7360bbafbe84ec9883b665913bc76362e7a74196 100644 (file)
@@ -64,13 +64,7 @@ static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type)
 
 static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n)
 {
-       int offset = n * sizeof (struct mlx4_cqe);
-
-       if (buf->buf.nbufs == 1)
-               return buf->buf.u.direct.buf + offset;
-       else
-               return buf->buf.u.page_list[offset >> PAGE_SHIFT].buf +
-                       (offset & (PAGE_SIZE - 1));
+       return mlx4_buf_offset(&buf->buf, n * sizeof (struct mlx4_cqe));
 }
 
 static void *get_cqe(struct mlx4_ib_cq *cq, int n)
@@ -332,6 +326,12 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
        is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
                MLX4_CQE_OPCODE_ERROR;
 
+       if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP &&
+                    is_send)) {
+               printk(KERN_WARNING "Completion for NOP opcode detected!\n");
+               return -EINVAL;
+       }
+
        if (!*cur_qp ||
            (be32_to_cpu(cqe->my_qpn) & 0xffffff) != (*cur_qp)->mqp.qpn) {
                /*
@@ -354,8 +354,10 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
 
        if (is_send) {
                wq = &(*cur_qp)->sq;
-               wqe_ctr = be16_to_cpu(cqe->wqe_index);
-               wq->tail += (u16) (wqe_ctr - (u16) wq->tail);
+               if (!(*cur_qp)->sq_signal_bits) {
+                       wqe_ctr = be16_to_cpu(cqe->wqe_index);
+                       wq->tail += (u16) (wqe_ctr - (u16) wq->tail);
+               }
                wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
                ++wq->tail;
        } else if ((*cur_qp)->ibqp.srq) {
index 28697653a370f18fac368f3ecaed4c070517601c..3726e451a327201d037016c4c40173932710d126 100644 (file)
@@ -120,6 +120,8 @@ struct mlx4_ib_qp {
 
        u32                     doorbell_qpn;
        __be32                  sq_signal_bits;
+       unsigned                sq_next_wqe;
+       int                     sq_max_wqes_per_wr;
        int                     sq_spare_wqes;
        struct mlx4_ib_wq       sq;
 
index 8cba9c532e6425b38a8554384d144bb06c32c8fa..958e205b6d7c7d5894b419ac6327b7483b900236 100644 (file)
@@ -30,6 +30,8 @@
  * SOFTWARE.
  */
 
+#include <linux/log2.h>
+
 #include <rdma/ib_cache.h>
 #include <rdma/ib_pack.h>
 
@@ -96,11 +98,7 @@ static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
 
 static void *get_wqe(struct mlx4_ib_qp *qp, int offset)
 {
-       if (qp->buf.nbufs == 1)
-               return qp->buf.u.direct.buf + offset;
-       else
-               return qp->buf.u.page_list[offset >> PAGE_SHIFT].buf +
-                       (offset & (PAGE_SIZE - 1));
+       return mlx4_buf_offset(&qp->buf, offset);
 }
 
 static void *get_recv_wqe(struct mlx4_ib_qp *qp, int n)
@@ -115,16 +113,87 @@ static void *get_send_wqe(struct mlx4_ib_qp *qp, int n)
 
 /*
  * Stamp a SQ WQE so that it is invalid if prefetched by marking the
- * first four bytes of every 64 byte chunk with 0xffffffff, except for
- * the very first chunk of the WQE.
+ * first four bytes of every 64 byte chunk with
+ *     0x7FFFFFF | (invalid_ownership_value << 31).
+ *
+ * When the max work request size is less than or equal to the WQE
+ * basic block size, as an optimization, we can stamp all WQEs with
+ * 0xffffffff, and skip the very first chunk of each WQE.
  */
-static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n)
+static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size)
 {
-       u32 *wqe = get_send_wqe(qp, n);
+       u32 *wqe;
        int i;
+       int s;
+       int ind;
+       void *buf;
+       __be32 stamp;
+
+       s = roundup(size, 1U << qp->sq.wqe_shift);
+       if (qp->sq_max_wqes_per_wr > 1) {
+               for (i = 0; i < s; i += 64) {
+                       ind = (i >> qp->sq.wqe_shift) + n;
+                       stamp = ind & qp->sq.wqe_cnt ? cpu_to_be32(0x7fffffff) :
+                                                      cpu_to_be32(0xffffffff);
+                       buf = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
+                       wqe = buf + (i & ((1 << qp->sq.wqe_shift) - 1));
+                       *wqe = stamp;
+               }
+       } else {
+               buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
+               for (i = 64; i < s; i += 64) {
+                       wqe = buf + i;
+                       *wqe = 0xffffffff;
+               }
+       }
+}
+
+static void post_nop_wqe(struct mlx4_ib_qp *qp, int n, int size)
+{
+       struct mlx4_wqe_ctrl_seg *ctrl;
+       struct mlx4_wqe_inline_seg *inl;
+       void *wqe;
+       int s;
+
+       ctrl = wqe = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
+       s = sizeof(struct mlx4_wqe_ctrl_seg);
 
-       for (i = 16; i < 1 << (qp->sq.wqe_shift - 2); i += 16)
-               wqe[i] = 0xffffffff;
+       if (qp->ibqp.qp_type == IB_QPT_UD) {
+               struct mlx4_wqe_datagram_seg *dgram = wqe + sizeof *ctrl;
+               struct mlx4_av *av = (struct mlx4_av *)dgram->av;
+               memset(dgram, 0, sizeof *dgram);
+               av->port_pd = cpu_to_be32((qp->port << 24) | to_mpd(qp->ibqp.pd)->pdn);
+               s += sizeof(struct mlx4_wqe_datagram_seg);
+       }
+
+       /* Pad the remainder of the WQE with an inline data segment. */
+       if (size > s) {
+               inl = wqe + s;
+               inl->byte_count = cpu_to_be32(1 << 31 | (size - s - sizeof *inl));
+       }
+       ctrl->srcrb_flags = 0;
+       ctrl->fence_size = size / 16;
+       /*
+        * Make sure descriptor is fully written before setting ownership bit
+        * (because HW can start executing as soon as we do).
+        */
+       wmb();
+
+       ctrl->owner_opcode = cpu_to_be32(MLX4_OPCODE_NOP | MLX4_WQE_CTRL_NEC) |
+               (n & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0);
+
+       stamp_send_wqe(qp, n + qp->sq_spare_wqes, size);
+}
+
+/* Post NOP WQE to prevent wrap-around in the middle of WR */
+static inline unsigned pad_wraparound(struct mlx4_ib_qp *qp, int ind)
+{
+       unsigned s = qp->sq.wqe_cnt - (ind & (qp->sq.wqe_cnt - 1));
+       if (unlikely(s < qp->sq_max_wqes_per_wr)) {
+               post_nop_wqe(qp, ind, s << qp->sq.wqe_shift);
+               ind += s;
+       }
+       return ind;
 }
 
 static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type)
@@ -241,6 +310,8 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
 static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
                              enum ib_qp_type type, struct mlx4_ib_qp *qp)
 {
+       int s;
+
        /* Sanity check SQ size before proceeding */
        if (cap->max_send_wr     > dev->dev->caps.max_wqes  ||
            cap->max_send_sge    > dev->dev->caps.max_sq_sg ||
@@ -256,20 +327,74 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
            cap->max_send_sge + 2 > dev->dev->caps.max_sq_sg)
                return -EINVAL;
 
-       qp->sq.wqe_shift = ilog2(roundup_pow_of_two(max(cap->max_send_sge *
-                                                       sizeof (struct mlx4_wqe_data_seg),
-                                                       cap->max_inline_data +
-                                                       sizeof (struct mlx4_wqe_inline_seg)) +
-                                                   send_wqe_overhead(type)));
-       qp->sq.max_gs    = ((1 << qp->sq.wqe_shift) - send_wqe_overhead(type)) /
-               sizeof (struct mlx4_wqe_data_seg);
+       s = max(cap->max_send_sge * sizeof (struct mlx4_wqe_data_seg),
+               cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) +
+               send_wqe_overhead(type);
 
        /*
-        * We need to leave 2 KB + 1 WQE of headroom in the SQ to
-        * allow HW to prefetch.
+        * Hermon supports shrinking WQEs, such that a single work
+        * request can include multiple units of 1 << wqe_shift.  This
+        * way, work requests can differ in size, and do not have to
+        * be a power of 2 in size, saving memory and speeding up send
+        * WR posting.  Unfortunately, if we do this then the
+        * wqe_index field in CQEs can't be used to look up the WR ID
+        * anymore, so we do this only if selective signaling is off.
+        *
+        * Further, on 32-bit platforms, we can't use vmap() to make
+        * the QP buffer virtually contigious.  Thus we have to use
+        * constant-sized WRs to make sure a WR is always fully within
+        * a single page-sized chunk.
+        *
+        * Finally, we use NOP work requests to pad the end of the
+        * work queue, to avoid wrap-around in the middle of WR.  We
+        * set NEC bit to avoid getting completions with error for
+        * these NOP WRs, but since NEC is only supported starting
+        * with firmware 2.2.232, we use constant-sized WRs for older
+        * firmware.
+        *
+        * And, since MLX QPs only support SEND, we use constant-sized
+        * WRs in this case.
+        *
+        * We look for the smallest value of wqe_shift such that the
+        * resulting number of wqes does not exceed device
+        * capabilities.
+        *
+        * We set WQE size to at least 64 bytes, this way stamping
+        * invalidates each WQE.
         */
-       qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + 1;
-       qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr + qp->sq_spare_wqes);
+       if (dev->dev->caps.fw_ver >= MLX4_FW_VER_WQE_CTRL_NEC &&
+           qp->sq_signal_bits && BITS_PER_LONG == 64 &&
+           type != IB_QPT_SMI && type != IB_QPT_GSI)
+               qp->sq.wqe_shift = ilog2(64);
+       else
+               qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s));
+
+       for (;;) {
+               if (1 << qp->sq.wqe_shift > dev->dev->caps.max_sq_desc_sz)
+                       return -EINVAL;
+
+               qp->sq_max_wqes_per_wr = DIV_ROUND_UP(s, 1U << qp->sq.wqe_shift);
+
+               /*
+                * We need to leave 2 KB + 1 WR of headroom in the SQ to
+                * allow HW to prefetch.
+                */
+               qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + qp->sq_max_wqes_per_wr;
+               qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr *
+                                                   qp->sq_max_wqes_per_wr +
+                                                   qp->sq_spare_wqes);
+
+               if (qp->sq.wqe_cnt <= dev->dev->caps.max_wqes)
+                       break;
+
+               if (qp->sq_max_wqes_per_wr <= 1)
+                       return -EINVAL;
+
+               ++qp->sq.wqe_shift;
+       }
+
+       qp->sq.max_gs = ((qp->sq_max_wqes_per_wr << qp->sq.wqe_shift) -
+                        send_wqe_overhead(type)) / sizeof (struct mlx4_wqe_data_seg);
 
        qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
                (qp->sq.wqe_cnt << qp->sq.wqe_shift);
@@ -281,7 +406,8 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
                qp->sq.offset = 0;
        }
 
-       cap->max_send_wr  = qp->sq.max_post = qp->sq.wqe_cnt - qp->sq_spare_wqes;
+       cap->max_send_wr  = qp->sq.max_post =
+               (qp->sq.wqe_cnt - qp->sq_spare_wqes) / qp->sq_max_wqes_per_wr;
        cap->max_send_sge = qp->sq.max_gs;
        /* We don't support inline sends for kernel QPs (yet) */
        cap->max_inline_data = 0;
@@ -327,6 +453,12 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
        qp->rq.tail         = 0;
        qp->sq.head         = 0;
        qp->sq.tail         = 0;
+       qp->sq_next_wqe     = 0;
+
+       if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
+               qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
+       else
+               qp->sq_signal_bits = 0;
 
        err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, !!init_attr->srq, qp);
        if (err)
@@ -417,11 +549,6 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
         */
        qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
 
-       if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
-               qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
-       else
-               qp->sq_signal_bits = 0;
-
        qp->mqp.event = mlx4_ib_qp_event;
 
        return 0;
@@ -916,7 +1043,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
                        ctrl = get_send_wqe(qp, i);
                        ctrl->owner_opcode = cpu_to_be32(1 << 31);
 
-                       stamp_send_wqe(qp, i);
+                       stamp_send_wqe(qp, i, 1 << qp->sq.wqe_shift);
                }
        }
 
@@ -969,6 +1096,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
                qp->rq.tail = 0;
                qp->sq.head = 0;
                qp->sq.tail = 0;
+               qp->sq_next_wqe = 0;
                if (!ibqp->srq)
                        *qp->db.db  = 0;
        }
@@ -1278,13 +1406,14 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
        unsigned long flags;
        int nreq;
        int err = 0;
-       int ind;
-       int size;
+       unsigned ind;
+       int uninitialized_var(stamp);
+       int uninitialized_var(size);
        int i;
 
        spin_lock_irqsave(&qp->sq.lock, flags);
 
-       ind = qp->sq.head;
+       ind = qp->sq_next_wqe;
 
        for (nreq = 0; wr; ++nreq, wr = wr->next) {
                if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
@@ -1300,7 +1429,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                }
 
                ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
-               qp->sq.wrid[ind & (qp->sq.wqe_cnt - 1)] = wr->wr_id;
+               qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id;
 
                ctrl->srcrb_flags =
                        (wr->send_flags & IB_SEND_SIGNALED ?
@@ -1413,16 +1542,23 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] |
                        (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0);
 
+               stamp = ind + qp->sq_spare_wqes;
+               ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift);
+
                /*
                 * We can improve latency by not stamping the last
                 * send queue WQE until after ringing the doorbell, so
                 * only stamp here if there are still more WQEs to post.
+                *
+                * Same optimization applies to padding with NOP wqe
+                * in case of WQE shrinking (used to prevent wrap-around
+                * in the middle of WR).
                 */
-               if (wr->next)
-                       stamp_send_wqe(qp, (ind + qp->sq_spare_wqes) &
-                                      (qp->sq.wqe_cnt - 1));
+               if (wr->next) {
+                       stamp_send_wqe(qp, stamp, size * 16);
+                       ind = pad_wraparound(qp, ind);
+               }
 
-               ++ind;
        }
 
 out:
@@ -1444,8 +1580,10 @@ out:
                 */
                mmiowb();
 
-               stamp_send_wqe(qp, (ind + qp->sq_spare_wqes - 1) &
-                              (qp->sq.wqe_cnt - 1));
+               stamp_send_wqe(qp, stamp, size * 16);
+
+               ind = pad_wraparound(qp, ind);
+               qp->sq_next_wqe = ind;
        }
 
        spin_unlock_irqrestore(&qp->sq.lock, flags);
index e7e9a3d0dac343f5dbd333d87a1839be635f12db..beaa3b06cf58b1eae91d08c2d6004b83223fcff0 100644 (file)
 
 static void *get_wqe(struct mlx4_ib_srq *srq, int n)
 {
-       int offset = n << srq->msrq.wqe_shift;
-
-       if (srq->buf.nbufs == 1)
-               return srq->buf.u.direct.buf + offset;
-       else
-               return srq->buf.u.page_list[offset >> PAGE_SHIFT].buf +
-                       (offset & (PAGE_SIZE - 1));
+       return mlx4_buf_offset(&srq->buf, n << srq->msrq.wqe_shift);
 }
 
 static void mlx4_ib_srq_event(struct mlx4_srq *srq, enum mlx4_event type)
index fe250c60607d9a4a700ff0e6461cf586c6d547e5..f9b7caa541439666e4379b1c8b2cff00c60ed181 100644 (file)
@@ -143,7 +143,7 @@ struct ipoib_rx_buf {
 
 struct ipoib_tx_buf {
        struct sk_buff *skb;
-       u64             mapping;
+       u64             mapping[MAX_SKB_FRAGS + 1];
 };
 
 struct ib_cm_id;
@@ -296,7 +296,7 @@ struct ipoib_dev_priv {
        struct ipoib_tx_buf *tx_ring;
        unsigned             tx_head;
        unsigned             tx_tail;
-       struct ib_sge        tx_sge;
+       struct ib_sge        tx_sge[MAX_SKB_FRAGS + 1];
        struct ib_send_wr    tx_wr;
        unsigned             tx_outstanding;
 
index 1818f958c25027543aa2b36890b2ae0c5464aa85..7dd2ec473d24e297950aefffee9609959301b51f 100644 (file)
@@ -634,8 +634,8 @@ static inline int post_send(struct ipoib_dev_priv *priv,
 {
        struct ib_send_wr *bad_wr;
 
-       priv->tx_sge.addr       = addr;
-       priv->tx_sge.length     = len;
+       priv->tx_sge[0].addr          = addr;
+       priv->tx_sge[0].length        = len;
 
        priv->tx_wr.wr_id       = wr_id | IPOIB_OP_CM;
 
@@ -676,7 +676,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
                return;
        }
 
-       tx_req->mapping = addr;
+       tx_req->mapping[0] = addr;
 
        if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
                               addr, skb->len))) {
@@ -715,7 +715,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
 
        tx_req = &tx->tx_ring[wr_id];
 
-       ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
+       ib_dma_unmap_single(priv->ca, tx_req->mapping[0], tx_req->skb->len, DMA_TO_DEVICE);
 
        /* FIXME: is this right? Shouldn't we only increment on success? */
        ++dev->stats.tx_packets;
@@ -1110,7 +1110,7 @@ timeout:
 
        while ((int) p->tx_tail - (int) p->tx_head < 0) {
                tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
-               ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len,
+               ib_dma_unmap_single(priv->ca, tx_req->mapping[0], tx_req->skb->len,
                                    DMA_TO_DEVICE);
                dev_kfree_skb_any(tx_req->skb);
                ++p->tx_tail;
index 52bc2bd5799ae227a66046bfd2d79e3f4b1028b2..9d3e778dc56d62f41582521d03682b6739a0c930 100644 (file)
@@ -239,6 +239,54 @@ repost:
                           "for buf %d\n", wr_id);
 }
 
+static int ipoib_dma_map_tx(struct ib_device *ca,
+                           struct ipoib_tx_buf *tx_req)
+{
+       struct sk_buff *skb = tx_req->skb;
+       u64 *mapping = tx_req->mapping;
+       int i;
+
+       mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb),
+                                      DMA_TO_DEVICE);
+       if (unlikely(ib_dma_mapping_error(ca, mapping[0])))
+               return -EIO;
+
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+               mapping[i + 1] = ib_dma_map_page(ca, frag->page,
+                                                frag->page_offset, frag->size,
+                                                DMA_TO_DEVICE);
+               if (unlikely(ib_dma_mapping_error(ca, mapping[i + 1])))
+                       goto partial_error;
+       }
+       return 0;
+
+partial_error:
+       ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
+
+       for (; i > 0; --i) {
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
+               ib_dma_unmap_page(ca, mapping[i], frag->size, DMA_TO_DEVICE);
+       }
+       return -EIO;
+}
+
+static void ipoib_dma_unmap_tx(struct ib_device *ca,
+                              struct ipoib_tx_buf *tx_req)
+{
+       struct sk_buff *skb = tx_req->skb;
+       u64 *mapping = tx_req->mapping;
+       int i;
+
+       ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
+
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+               ib_dma_unmap_page(ca, mapping[i + 1], frag->size,
+                                 DMA_TO_DEVICE);
+       }
+}
+
 static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -257,8 +305,7 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
 
        tx_req = &priv->tx_ring[wr_id];
 
-       ib_dma_unmap_single(priv->ca, tx_req->mapping,
-                           tx_req->skb->len, DMA_TO_DEVICE);
+       ipoib_dma_unmap_tx(priv->ca, tx_req);
 
        ++dev->stats.tx_packets;
        dev->stats.tx_bytes += tx_req->skb->len;
@@ -341,16 +388,23 @@ void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
 static inline int post_send(struct ipoib_dev_priv *priv,
                            unsigned int wr_id,
                            struct ib_ah *address, u32 qpn,
-                           u64 addr, int len)
+                           u64 *mapping, int headlen,
+                           skb_frag_t *frags,
+                           int nr_frags)
 {
        struct ib_send_wr *bad_wr;
+       int i;
 
-       priv->tx_sge.addr             = addr;
-       priv->tx_sge.length           = len;
-
-       priv->tx_wr.wr_id             = wr_id;
-       priv->tx_wr.wr.ud.remote_qpn  = qpn;
-       priv->tx_wr.wr.ud.ah          = address;
+       priv->tx_sge[0].addr         = mapping[0];
+       priv->tx_sge[0].length       = headlen;
+       for (i = 0; i < nr_frags; ++i) {
+               priv->tx_sge[i + 1].addr = mapping[i + 1];
+               priv->tx_sge[i + 1].length = frags[i].size;
+       }
+       priv->tx_wr.num_sge          = nr_frags + 1;
+       priv->tx_wr.wr_id            = wr_id;
+       priv->tx_wr.wr.ud.remote_qpn = qpn;
+       priv->tx_wr.wr.ud.ah         = address;
 
        return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr);
 }
@@ -360,7 +414,6 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct ipoib_tx_buf *tx_req;
-       u64 addr;
 
        if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
                ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
@@ -383,20 +436,19 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
         */
        tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
        tx_req->skb = skb;
-       addr = ib_dma_map_single(priv->ca, skb->data, skb->len,
-                                DMA_TO_DEVICE);
-       if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
+       if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
                ++dev->stats.tx_errors;
                dev_kfree_skb_any(skb);
                return;
        }
-       tx_req->mapping = addr;
 
        if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
-                              address->ah, qpn, addr, skb->len))) {
+                              address->ah, qpn,
+                              tx_req->mapping, skb_headlen(skb),
+                              skb_shinfo(skb)->frags, skb_shinfo(skb)->nr_frags))) {
                ipoib_warn(priv, "post_send failed\n");
                ++dev->stats.tx_errors;
-               ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
+               ipoib_dma_unmap_tx(priv->ca, tx_req);
                dev_kfree_skb_any(skb);
        } else {
                dev->trans_start = jiffies;
@@ -615,10 +667,7 @@ int ipoib_ib_dev_stop(struct net_device *dev, int flush)
                        while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
                                tx_req = &priv->tx_ring[priv->tx_tail &
                                                        (ipoib_sendq_size - 1)];
-                               ib_dma_unmap_single(priv->ca,
-                                                   tx_req->mapping,
-                                                   tx_req->skb->len,
-                                                   DMA_TO_DEVICE);
+                               ipoib_dma_unmap_tx(priv->ca, tx_req);
                                dev_kfree_skb_any(tx_req->skb);
                                ++priv->tx_tail;
                                --priv->tx_outstanding;
index 09f5371137a13aba0e5c7add61c1f1d0e3115ef6..f96477a8ca5ac96121ae618d24153870e23f4a48 100644 (file)
@@ -965,7 +965,9 @@ static void ipoib_setup(struct net_device *dev)
        dev->addr_len            = INFINIBAND_ALEN;
        dev->type                = ARPHRD_INFINIBAND;
        dev->tx_queue_len        = ipoib_sendq_size * 2;
-       dev->features            = NETIF_F_VLAN_CHALLENGED | NETIF_F_LLTX;
+       dev->features            = (NETIF_F_VLAN_CHALLENGED     |
+                                   NETIF_F_LLTX                |
+                                   NETIF_F_HIGHDMA);
 
        /* MTU will be reset when mcast join happens */
        dev->mtu                 = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN;
index 433e99ac227b85a2bd6e8594c69fe47f3dc6e274..a3aeb911f024f6b5d62ebd19fedd2a82251e6518 100644 (file)
@@ -157,6 +157,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
        };
 
        int ret, size;
+       int i;
 
        priv->pd = ib_alloc_pd(priv->ca);
        if (IS_ERR(priv->pd)) {
@@ -191,6 +192,9 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
        init_attr.send_cq = priv->cq;
        init_attr.recv_cq = priv->cq;
 
+       if (dev->features & NETIF_F_SG)
+               init_attr.cap.max_send_sge = MAX_SKB_FRAGS + 1;
+
        priv->qp = ib_create_qp(priv->pd, &init_attr);
        if (IS_ERR(priv->qp)) {
                printk(KERN_WARNING "%s: failed to create QP\n", ca->name);
@@ -201,11 +205,11 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
        priv->dev->dev_addr[2] = (priv->qp->qp_num >>  8) & 0xff;
        priv->dev->dev_addr[3] = (priv->qp->qp_num      ) & 0xff;
 
-       priv->tx_sge.lkey       = priv->mr->lkey;
+       for (i = 0; i < MAX_SKB_FRAGS + 1; ++i)
+               priv->tx_sge[i].lkey = priv->mr->lkey;
 
        priv->tx_wr.opcode      = IB_WR_SEND;
-       priv->tx_wr.sg_list     = &priv->tx_sge;
-       priv->tx_wr.num_sge     = 1;
+       priv->tx_wr.sg_list     = priv->tx_sge;
        priv->tx_wr.send_flags  = IB_SEND_SIGNALED;
 
        return 0;
index b226e019bc8b70ab18fee4d8a8c6f6540f40711e..521dc0322ee4ff329b07ed469e3923d402585d6e 100644 (file)
@@ -116,40 +116,53 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
                buf->nbufs        = 1;
                buf->npages       = 1;
                buf->page_shift   = get_order(size) + PAGE_SHIFT;
-               buf->u.direct.buf = dma_alloc_coherent(&dev->pdev->dev,
+               buf->direct.buf   = dma_alloc_coherent(&dev->pdev->dev,
                                                       size, &t, GFP_KERNEL);
-               if (!buf->u.direct.buf)
+               if (!buf->direct.buf)
                        return -ENOMEM;
 
-               buf->u.direct.map = t;
+               buf->direct.map = t;
 
                while (t & ((1 << buf->page_shift) - 1)) {
                        --buf->page_shift;
                        buf->npages *= 2;
                }
 
-               memset(buf->u.direct.buf, 0, size);
+               memset(buf->direct.buf, 0, size);
        } else {
                int i;
 
                buf->nbufs       = (size + PAGE_SIZE - 1) / PAGE_SIZE;
                buf->npages      = buf->nbufs;
                buf->page_shift  = PAGE_SHIFT;
-               buf->u.page_list = kzalloc(buf->nbufs * sizeof *buf->u.page_list,
+               buf->page_list   = kzalloc(buf->nbufs * sizeof *buf->page_list,
                                           GFP_KERNEL);
-               if (!buf->u.page_list)
+               if (!buf->page_list)
                        return -ENOMEM;
 
                for (i = 0; i < buf->nbufs; ++i) {
-                       buf->u.page_list[i].buf =
+                       buf->page_list[i].buf =
                                dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
                                                   &t, GFP_KERNEL);
-                       if (!buf->u.page_list[i].buf)
+                       if (!buf->page_list[i].buf)
                                goto err_free;
 
-                       buf->u.page_list[i].map = t;
+                       buf->page_list[i].map = t;
 
-                       memset(buf->u.page_list[i].buf, 0, PAGE_SIZE);
+                       memset(buf->page_list[i].buf, 0, PAGE_SIZE);
+               }
+
+               if (BITS_PER_LONG == 64) {
+                       struct page **pages;
+                       pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
+                       if (!pages)
+                               goto err_free;
+                       for (i = 0; i < buf->nbufs; ++i)
+                               pages[i] = virt_to_page(buf->page_list[i].buf);
+                       buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
+                       kfree(pages);
+                       if (!buf->direct.buf)
+                               goto err_free;
                }
        }
 
@@ -167,15 +180,18 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
        int i;
 
        if (buf->nbufs == 1)
-               dma_free_coherent(&dev->pdev->dev, size, buf->u.direct.buf,
-                                 buf->u.direct.map);
+               dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
+                                 buf->direct.map);
        else {
+               if (BITS_PER_LONG == 64)
+                       vunmap(buf->direct.buf);
+
                for (i = 0; i < buf->nbufs; ++i)
-                       if (buf->u.page_list[i].buf)
+                       if (buf->page_list[i].buf)
                                dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
-                                                 buf->u.page_list[i].buf,
-                                                 buf->u.page_list[i].map);
-               kfree(buf->u.page_list);
+                                                 buf->page_list[i].buf,
+                                                 buf->page_list[i].map);
+               kfree(buf->page_list);
        }
 }
 EXPORT_SYMBOL_GPL(mlx4_buf_free);
index 9c9e308d0917d2e26161221a0a9f393db91289d3..679dfdb6807f12fce7c5ba76cba4c668d76534ef 100644 (file)
@@ -419,9 +419,9 @@ int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
 
        for (i = 0; i < buf->npages; ++i)
                if (buf->nbufs == 1)
-                       page_list[i] = buf->u.direct.map + (i << buf->page_shift);
+                       page_list[i] = buf->direct.map + (i << buf->page_shift);
                else
-                       page_list[i] = buf->u.page_list[i].map;
+                       page_list[i] = buf->page_list[i].map;
 
        err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list);
 
index 222815d91c40108f87d8c47b3a0f5541d7b0c5ae..6cdf813cd47883b5c38b674003ea650c3a532709 100644 (file)
@@ -133,6 +133,11 @@ enum {
        MLX4_STAT_RATE_OFFSET   = 5
 };
 
+static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
+{
+       return (major << 32) | (minor << 16) | subminor;
+}
+
 struct mlx4_caps {
        u64                     fw_ver;
        int                     num_ports;
@@ -189,10 +194,8 @@ struct mlx4_buf_list {
 };
 
 struct mlx4_buf {
-       union {
-               struct mlx4_buf_list    direct;
-               struct mlx4_buf_list   *page_list;
-       } u;
+       struct mlx4_buf_list    direct;
+       struct mlx4_buf_list   *page_list;
        int                     nbufs;
        int                     npages;
        int                     page_shift;
@@ -308,6 +311,14 @@ struct mlx4_init_port_param {
 int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
                   struct mlx4_buf *buf);
 void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
+static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset)
+{
+       if (BITS_PER_LONG == 64 || buf->nbufs == 1)
+               return buf->direct.buf + offset;
+       else
+               return buf->page_list[offset >> PAGE_SHIFT].buf +
+                       (offset & (PAGE_SIZE - 1));
+}
 
 int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn);
 void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn);
index 3968b943259ae26a5c85fcf2019ab33fad79c958..09a2230923f2841b646a6b0f032d947db67f4275 100644 (file)
@@ -154,7 +154,11 @@ struct mlx4_qp_context {
        u32                     reserved5[10];
 };
 
+/* Which firmware version adds support for NEC (NoErrorCompletion) bit */
+#define MLX4_FW_VER_WQE_CTRL_NEC mlx4_fw_ver(2, 2, 232)
+
 enum {
+       MLX4_WQE_CTRL_NEC       = 1 << 29,
        MLX4_WQE_CTRL_FENCE     = 1 << 6,
        MLX4_WQE_CTRL_CQ_UPDATE = 3 << 2,
        MLX4_WQE_CTRL_SOLICITED = 1 << 1,
index cfbd38fe299825f7101925860abc70c08d7f8dbb..701e7b40560ae8b4e0ff83af131d88592b143eef 100644 (file)
@@ -95,7 +95,15 @@ enum ib_device_cap_flags {
        IB_DEVICE_N_NOTIFY_CQ           = (1<<14),
        IB_DEVICE_ZERO_STAG             = (1<<15),
        IB_DEVICE_SEND_W_INV            = (1<<16),
-       IB_DEVICE_MEM_WINDOW            = (1<<17)
+       IB_DEVICE_MEM_WINDOW            = (1<<17),
+       /*
+        * Devices should set IB_DEVICE_UD_IP_SUM if they support
+        * insertion of UDP and TCP checksum on outgoing UD IPoIB
+        * messages and can verify the validity of checksum for
+        * incoming messages.  Setting this flag implies that the
+        * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
+        */
+       IB_DEVICE_UD_IP_CSUM            = (1<<18),
 };
 
 enum ib_atomic_cap {
@@ -431,6 +439,7 @@ struct ib_wc {
        u8                      sl;
        u8                      dlid_path_bits;
        u8                      port_num;       /* valid only for DR SMPs on switches */
+       int                     csum_ok;
 };
 
 enum ib_cq_notify_flags {
@@ -615,7 +624,8 @@ enum ib_send_flags {
        IB_SEND_FENCE           = 1,
        IB_SEND_SIGNALED        = (1<<1),
        IB_SEND_SOLICITED       = (1<<2),
-       IB_SEND_INLINE          = (1<<3)
+       IB_SEND_INLINE          = (1<<3),
+       IB_SEND_IP_CSUM         = (1<<4)
 };
 
 struct ib_sge {
@@ -890,8 +900,6 @@ struct ib_device {
        int                          *pkey_tbl_len;
        int                          *gid_tbl_len;
 
-       u32                           flags;
-
        int                           num_comp_vectors;
 
        struct iw_cm_verbs           *iwcm;