From f438000f7a31fad7cfd27f33ad324a250f4cd2df Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Wed, 16 Apr 2008 21:09:28 -0700 Subject: [PATCH] IB/mlx4: Micro-optimize mlx4_ib_post_send() Rather than have build_mlx_header() return a negative value on failure and the length of the segments it builds on success, add a pointer parameter to return the length and return 0 on success. This matches the calling convention used for build_lso_seg() and generates slightly smaller code -- eg, on 64-bit x86: add/remove: 0/0 grow/shrink: 0/1 up/down: 0/-22 (-22) function old new delta mlx4_ib_post_send 2023 2001 -22 Signed-off-by: Roland Dreier --- drivers/infiniband/hw/mlx4/qp.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 2ba24308408..f5210c17e31 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -1200,7 +1200,7 @@ out: } static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, - void *wqe) + void *wqe, unsigned *mlx_seg_len) { struct ib_device *ib_dev = &to_mdev(sqp->qp.ibqp.device)->ib_dev; struct mlx4_wqe_mlx_seg *mlx = wqe; @@ -1321,7 +1321,9 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, i = 2; } - return ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16); + *mlx_seg_len = + ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16); + return 0; } static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq) @@ -1548,15 +1550,13 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, case IB_QPT_SMI: case IB_QPT_GSI: - err = build_mlx_header(to_msqp(qp), wr, ctrl); - if (err < 0) { + err = build_mlx_header(to_msqp(qp), wr, ctrl, &seglen); + if (unlikely(err)) { *bad_wr = wr; goto out; } - wqe += err; - size += err / 16; - - err = 0; + wqe += seglen; + size += seglen / 16; break; default: -- 2.41.0