]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
IB: Whitespace fixes
authorRoland Dreier <rolandd@cisco.com>
Fri, 22 Sep 2006 22:22:46 +0000 (15:22 -0700)
committerRoland Dreier <rolandd@cisco.com>
Fri, 22 Sep 2006 22:22:46 +0000 (15:22 -0700)
Remove some trailing whitespace that has snuck in despite the best
efforts of whitespace=error-all.  Also fix a few other whitespace
bogosities.

Signed-off-by: Roland Dreier <rolandd@cisco.com>
15 files changed:
drivers/infiniband/Kconfig
drivers/infiniband/core/addr.c
drivers/infiniband/core/cm.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/mad.c
drivers/infiniband/core/mad_rmpp.c
drivers/infiniband/core/sa_query.c
drivers/infiniband/core/sysfs.c
drivers/infiniband/core/ucm.c
drivers/infiniband/core/user_mad.c
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/hw/ipath/ipath_driver.c
drivers/infiniband/hw/mthca/mthca_cq.c
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
drivers/infiniband/ulp/srp/ib_srp.c

index fd2d528daa3aac069bbf126f0f4860c669ec9e08..9a329b2c108cc305ae633cacf95620b566d9da30 100644 (file)
@@ -14,7 +14,7 @@ config INFINIBAND_USER_MAD
        ---help---
          Userspace InfiniBand Management Datagram (MAD) support.  This
          is the kernel side of the userspace MAD support, which allows
-         userspace processes to send and receive MADs. You will also 
+         userspace processes to send and receive MADs. You will also
          need libibumad from <http://www.openib.org>.
 
 config INFINIBAND_USER_ACCESS
index 1205e8027829aa55dee38e10af974abd8d46ba93..d8e54e002ce3c92b205f10d51c285830e9283df4 100644 (file)
@@ -327,10 +327,10 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr)
 }
 EXPORT_SYMBOL(rdma_addr_cancel);
 
-static int netevent_callback(struct notifier_block *self, unsigned long event, 
+static int netevent_callback(struct notifier_block *self, unsigned long event,
        void *ctx)
 {
-       if (event == NETEVENT_NEIGH_UPDATE) {  
+       if (event == NETEVENT_NEIGH_UPDATE) {
                struct neighbour *neigh = ctx;
 
                if (neigh->dev->type == ARPHRD_INFINIBAND &&
index c8982b02d9b6375fbcaa729596ee40308194f1ac..1c145fe92a54c6470f6767278193cf1284e7bcd0 100644 (file)
@@ -179,7 +179,7 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
        if (IS_ERR(ah))
                return PTR_ERR(ah);
 
-       m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, 
+       m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
                               cm_id_priv->av.pkey_index,
                               0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
                               GFP_ATOMIC);
index 5d625a81193f079ea2f38272ed03b0381f9e1b0e..9d58bb59cd45b16c5fae6aac4fbc02151b59e6b6 100644 (file)
@@ -613,7 +613,7 @@ static void cma_destroy_listen(struct rdma_id_private *id_priv)
        if (id_priv->cma_dev) {
                switch (id_priv->id.device->node_type) {
                case IB_NODE_CA:
-                       if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
+                       if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
                                ib_destroy_cm_id(id_priv->cm_id.ib);
                        break;
                default:
@@ -692,13 +692,13 @@ void rdma_destroy_id(struct rdma_cm_id *id)
        if (id_priv->cma_dev) {
                switch (id->device->node_type) {
                case IB_NODE_CA:
-                       if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
+                       if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
                                ib_destroy_cm_id(id_priv->cm_id.ib);
                        break;
                default:
                        break;
                }
-               mutex_lock(&lock);
+               mutex_lock(&lock);
                cma_detach_from_dev(id_priv);
                mutex_unlock(&lock);
        }
@@ -1492,7 +1492,7 @@ static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
        hlist_for_each_entry(cur_id, node, &bind_list->owners, node) {
                if (cma_any_addr(&cur_id->id.route.addr.src_addr))
                        return -EADDRNOTAVAIL;
-               
+
                cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr;
                if (sin->sin_addr.s_addr == cur_sin->sin_addr.s_addr)
                        return -EADDRINUSE;
index 1c3cfbbe6a97fcf3e7e5ce71060ad9b302a53833..32d3028b274b81c43fae4313ea609e05feee02ce 100644 (file)
@@ -1246,8 +1246,8 @@ static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
        int i;
 
        for (i = 0; i < MAX_MGMT_OUI; i++)
-                /* Is there matching OUI for this vendor class ? */
-                if (!memcmp(vendor_class->oui[i], oui, 3))
+               /* Is there matching OUI for this vendor class ? */
+               if (!memcmp(vendor_class->oui[i], oui, 3))
                        return i;
 
        return -1;
@@ -2237,7 +2237,7 @@ static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
        list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
                                 &mad_agent_priv->send_list, agent_list) {
                if (mad_send_wr->status == IB_WC_SUCCESS) {
-                       mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
+                       mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
                        mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
                }
        }
@@ -2528,10 +2528,10 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
                        }
                }
                sg_list.addr = dma_map_single(qp_info->port_priv->
-                                               device->dma_device,
+                                               device->dma_device,
                                              &mad_priv->grh,
                                              sizeof *mad_priv -
-                                               sizeof mad_priv->header,
+                                               sizeof mad_priv->header,
                                              DMA_FROM_DEVICE);
                pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr);
                recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
@@ -2606,7 +2606,7 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
        struct ib_qp *qp;
 
        attr = kmalloc(sizeof *attr, GFP_KERNEL);
-       if (!attr) {
+       if (!attr) {
                printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n");
                return -ENOMEM;
        }
index 3ace5f492dc4b5b3f78e20829f6b1e3381387f65..1ef79d015a1e32010c392bc6641e0a2dce99bb3f 100644 (file)
@@ -391,7 +391,7 @@ static inline int window_size(struct ib_mad_agent_private *agent)
 static struct ib_mad_recv_buf * find_seg_location(struct list_head *rmpp_list,
                                                  int seg_num)
 {
-        struct ib_mad_recv_buf *seg_buf;
+       struct ib_mad_recv_buf *seg_buf;
        int cur_seg_num;
 
        list_for_each_entry_reverse(seg_buf, rmpp_list, list) {
index d6b84226bba7b75d29cf441f319e8bbb327f2c6e..df762ba4868f8f57a1e83bd854418af87592e7b4 100644 (file)
@@ -887,7 +887,7 @@ static void send_handler(struct ib_mad_agent *agent,
        idr_remove(&query_idr, query->id);
        spin_unlock_irqrestore(&idr_lock, flags);
 
-        ib_free_send_mad(mad_send_wc->send_buf);
+       ib_free_send_mad(mad_send_wc->send_buf);
        kref_put(&query->sm_ah->ref, free_sm_ah);
        query->release(query);
 }
index 21f9282c1b25d00dcddef4fd2ccba530ad05f543..fb6660564a3096ce5193a3c3b5c6fa3faded6173 100644 (file)
@@ -68,7 +68,7 @@ struct port_table_attribute {
        int                     index;
 };
 
-static inline int ibdev_is_alive(const struct ib_device *dev) 
+static inline int ibdev_is_alive(const struct ib_device *dev)
 {
        return dev->reg_state == IB_DEV_REGISTERED;
 }
index c1c6fda9452cc44f6b44400e00eaa7f5e42ea43a..e74c964af7fa42541354001fc77e521b4b9ba67c 100644 (file)
@@ -309,9 +309,9 @@ static int ib_ucm_event_process(struct ib_cm_event *evt,
                info          = evt->param.apr_rcvd.apr_info;
                break;
        case IB_CM_SIDR_REQ_RECEIVED:
-               uvt->resp.u.sidr_req_resp.pkey = 
+               uvt->resp.u.sidr_req_resp.pkey =
                                        evt->param.sidr_req_rcvd.pkey;
-               uvt->resp.u.sidr_req_resp.port = 
+               uvt->resp.u.sidr_req_resp.port =
                                        evt->param.sidr_req_rcvd.port;
                uvt->data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE;
                break;
@@ -1237,7 +1237,7 @@ static struct class ucm_class = {
 static ssize_t show_ibdev(struct class_device *class_dev, char *buf)
 {
        struct ib_ucm_device *dev;
-       
+
        dev = container_of(class_dev, struct ib_ucm_device, class_dev);
        return sprintf(buf, "%s\n", dev->ib_dev->name);
 }
index 1273f8807e849a28f7aec5bba0aef5bd87dbba6c..8a455aec758f3b00603e0b8c701b5895295440e3 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
- * Copyright (c) 2005 Voltaire, Inc. All rights reserved. 
+ * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
index 3fcb5d189a2344c48e2f9ea45cf840d790ab5304..b72c7f69ca906b0e9931a4b5b53a270e58dd765e 100644 (file)
@@ -1676,7 +1676,6 @@ ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
                                break;
                }
 
-
        if (copy_to_user((void __user *) (unsigned long) cmd.response,
                         &resp, sizeof resp))
                ret = -EFAULT;
@@ -1726,7 +1725,6 @@ ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
                                break;
                }
 
-
        if (copy_to_user((void __user *) (unsigned long) cmd.response,
                         &resp, sizeof resp))
                ret = -EFAULT;
index 47c9d15557c8ae14fdda21a8f0d71c7fbd77fc59..2108466c7e337169649c4a64e0f9157995a9aca7 100644 (file)
@@ -2126,9 +2126,9 @@ int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv)
                dd->ipath_rx_pol_inv = new_pol_inv;
                val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
                val &= ~(INFINIPATH_XGXS_RX_POL_MASK <<
-                         INFINIPATH_XGXS_RX_POL_SHIFT);
-                val |= ((u64)dd->ipath_rx_pol_inv) <<
-                        INFINIPATH_XGXS_RX_POL_SHIFT;
+                        INFINIPATH_XGXS_RX_POL_SHIFT);
+               val |= ((u64)dd->ipath_rx_pol_inv) <<
+                       INFINIPATH_XGXS_RX_POL_SHIFT;
                ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
        }
        return 0;
index 3e27a084257e9f23b7a4fba49561e15cfddf5b91..e393681ba7d46a1988508c3f5d25e27b2cd4261d 100644 (file)
@@ -544,11 +544,11 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
                wq = &(*cur_qp)->rq;
                wqe = be32_to_cpu(cqe->wqe);
                wqe_index = wqe >> wq->wqe_shift;
-               /*
-               * WQE addr == base - 1 might be reported in receive completion
-               * with error instead of (rq size - 1) by Sinai FW 1.0.800 and
-               * Arbel FW 5.1.400.  This bug should be fixed in later FW revs.
-               */
+               /*
+                * WQE addr == base - 1 might be reported in receive completion
+                * with error instead of (rq size - 1) by Sinai FW 1.0.800 and
+                * Arbel FW 5.1.400.  This bug should be fixed in later FW revs.
+                */
                if (unlikely(wqe_index < 0))
                        wqe_index = wq->max - 1;
                entry->wr_id = (*cur_qp)->wrid[wqe_index];
index ec356ce7cdcdefc3fbb91d6f7e3b9bf8ea071a82..60b09f5cb3476294d6ab2e77d7b61e7867e49c9a 100644 (file)
@@ -795,7 +795,7 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
        }
 
        if (priv->broadcast) {
-               rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree);
+               rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree);
                list_add_tail(&priv->broadcast->list, &remove_list);
                priv->broadcast = NULL;
        }
index 249a98c06aeb0ba0e480174169473f8ccd560b35..61c13d1e050636f35fe94fd1034861959173b557 100644 (file)
@@ -330,7 +330,7 @@ static int srp_send_req(struct srp_target_port *target)
        req->priv.req_buf_fmt   = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
                                              SRP_BUF_FORMAT_INDIRECT);
        /*
-        * In the published SRP specification (draft rev. 16a), the 
+        * In the published SRP specification (draft rev. 16a), the
         * port identifier format is 8 bytes of ID extension followed
         * by 8 bytes of GUID.  Older drafts put the two halves in the
         * opposite order, so that the GUID comes first.