]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
IB/ehca: Change idr spinlocks into rwlocks
authorJoachim Fenkes <fenkes@de.ibm.com>
Mon, 9 Jul 2007 13:31:10 +0000 (15:31 +0200)
committerRoland Dreier <rolandd@cisco.com>
Tue, 10 Jul 2007 03:12:27 +0000 (20:12 -0700)
This eliminates lock contention among IRQs as well as the need to
disable IRQs around idr_find, because there are no IRQ writers.

Signed-off-by: Joachim Fenkes <fenkes@de.ibm.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
drivers/infiniband/hw/ehca/ehca_classes.h
drivers/infiniband/hw/ehca/ehca_cq.c
drivers/infiniband/hw/ehca/ehca_irq.c
drivers/infiniband/hw/ehca/ehca_main.c
drivers/infiniband/hw/ehca/ehca_qp.c
drivers/infiniband/hw/ehca/ehca_uverbs.c

index 8580f2a0ea57dab2a4168c7c606306cea448cbf8..f1e0db2ff16c25423dfd80bafcfe07fbf3f5adca 100644 (file)
@@ -293,8 +293,8 @@ void ehca_cleanup_av_cache(void);
 int ehca_init_mrmw_cache(void);
 void ehca_cleanup_mrmw_cache(void);
 
-extern spinlock_t ehca_qp_idr_lock;
-extern spinlock_t ehca_cq_idr_lock;
+extern rwlock_t ehca_qp_idr_lock;
+extern rwlock_t ehca_cq_idr_lock;
 extern struct idr ehca_qp_idr;
 extern struct idr ehca_cq_idr;
 
index 3729997457caae4dd1963d840b9d76773afbb249..01d4a148bd719c4cd955c0628034b97cc14543d8 100644 (file)
@@ -163,9 +163,9 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
                        goto create_cq_exit1;
                }
 
-               spin_lock_irqsave(&ehca_cq_idr_lock, flags);
+               write_lock_irqsave(&ehca_cq_idr_lock, flags);
                ret = idr_get_new(&ehca_cq_idr, my_cq, &my_cq->token);
-               spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+               write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
 
        } while (ret == -EAGAIN);
 
@@ -294,9 +294,9 @@ create_cq_exit3:
                         "cq_num=%x h_ret=%lx", my_cq, my_cq->cq_number, h_ret);
 
 create_cq_exit2:
-       spin_lock_irqsave(&ehca_cq_idr_lock, flags);
+       write_lock_irqsave(&ehca_cq_idr_lock, flags);
        idr_remove(&ehca_cq_idr, my_cq->token);
-       spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+       write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
 
 create_cq_exit1:
        kmem_cache_free(cq_cache, my_cq);
@@ -334,9 +334,9 @@ int ehca_destroy_cq(struct ib_cq *cq)
         * remove the CQ from the idr first to make sure
         * no more interrupt tasklets will touch this CQ
         */
-       spin_lock_irqsave(&ehca_cq_idr_lock, flags);
+       write_lock_irqsave(&ehca_cq_idr_lock, flags);
        idr_remove(&ehca_cq_idr, my_cq->token);
-       spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+       write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
 
        /* now wait until all pending events have completed */
        wait_event(my_cq->wait_completion, !atomic_read(&my_cq->nr_events));
index 3e790a326d97601cef26978f0df2309349b14264..02b73c84c49b78fde6a4788ce7ae3740f632e390 100644 (file)
@@ -180,12 +180,11 @@ static void qp_event_callback(struct ehca_shca *shca,
 {
        struct ib_event event;
        struct ehca_qp *qp;
-       unsigned long flags;
        u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe);
 
-       spin_lock_irqsave(&ehca_qp_idr_lock, flags);
+       read_lock(&ehca_qp_idr_lock);
        qp = idr_find(&ehca_qp_idr, token);
-       spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
+       read_unlock(&ehca_qp_idr_lock);
 
 
        if (!qp)
@@ -209,14 +208,13 @@ static void cq_event_callback(struct ehca_shca *shca,
                              u64 eqe)
 {
        struct ehca_cq *cq;
-       unsigned long flags;
        u32 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe);
 
-       spin_lock_irqsave(&ehca_cq_idr_lock, flags);
+       read_lock(&ehca_cq_idr_lock);
        cq = idr_find(&ehca_cq_idr, token);
        if (cq)
                atomic_inc(&cq->nr_events);
-       spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+       read_unlock(&ehca_cq_idr_lock);
 
        if (!cq)
                return;
@@ -411,7 +409,6 @@ static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
 {
        u64 eqe_value;
        u32 token;
-       unsigned long flags;
        struct ehca_cq *cq;
 
        eqe_value = eqe->entry;
@@ -419,11 +416,11 @@ static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
        if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
                ehca_dbg(&shca->ib_device, "Got completion event");
                token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
-               spin_lock_irqsave(&ehca_cq_idr_lock, flags);
+               read_lock(&ehca_cq_idr_lock);
                cq = idr_find(&ehca_cq_idr, token);
                if (cq)
                        atomic_inc(&cq->nr_events);
-               spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+               read_unlock(&ehca_cq_idr_lock);
                if (cq == NULL) {
                        ehca_err(&shca->ib_device,
                                 "Invalid eqe for non-existing cq token=%x",
@@ -480,11 +477,11 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
                eqe_value = eqe_cache[eqe_cnt].eqe->entry;
                if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
                        token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
-                       spin_lock(&ehca_cq_idr_lock);
+                       read_lock(&ehca_cq_idr_lock);
                        eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token);
                        if (eqe_cache[eqe_cnt].cq)
                                atomic_inc(&eqe_cache[eqe_cnt].cq->nr_events);
-                       spin_unlock(&ehca_cq_idr_lock);
+                       read_unlock(&ehca_cq_idr_lock);
                        if (!eqe_cache[eqe_cnt].cq) {
                                ehca_err(&shca->ib_device,
                                         "Invalid eqe for non-existing cq "
index 32396b203f1413208fb36b6e9b90697e6f2e44f2..28ba2dd242165f626ead5afc46f43a978de18d9b 100644 (file)
@@ -96,8 +96,8 @@ MODULE_PARM_DESC(static_rate,
 MODULE_PARM_DESC(scaling_code,
                 "set scaling code (0: disabled/default, 1: enabled)");
 
-DEFINE_SPINLOCK(ehca_qp_idr_lock);
-DEFINE_SPINLOCK(ehca_cq_idr_lock);
+DEFINE_RWLOCK(ehca_qp_idr_lock);
+DEFINE_RWLOCK(ehca_cq_idr_lock);
 DEFINE_IDR(ehca_qp_idr);
 DEFINE_IDR(ehca_cq_idr);
 
index 31d21526df5e15256fdc86cd8422a02be3b3bebb..74671250303f00433247cae49679b89be401a6f8 100644 (file)
@@ -512,9 +512,9 @@ struct ehca_qp *internal_create_qp(struct ib_pd *pd,
                        goto create_qp_exit0;
                }
 
-               spin_lock_irqsave(&ehca_qp_idr_lock, flags);
+               write_lock_irqsave(&ehca_qp_idr_lock, flags);
                ret = idr_get_new(&ehca_qp_idr, my_qp, &my_qp->token);
-               spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
+               write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
 
        } while (ret == -EAGAIN);
 
@@ -733,9 +733,9 @@ create_qp_exit2:
        hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
 
 create_qp_exit1:
-       spin_lock_irqsave(&ehca_qp_idr_lock, flags);
+       write_lock_irqsave(&ehca_qp_idr_lock, flags);
        idr_remove(&ehca_qp_idr, my_qp->token);
-       spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
+       write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
 
 create_qp_exit0:
        kmem_cache_free(qp_cache, my_qp);
@@ -1706,9 +1706,9 @@ int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
                }
        }
 
-       spin_lock_irqsave(&ehca_qp_idr_lock, flags);
+       write_lock_irqsave(&ehca_qp_idr_lock, flags);
        idr_remove(&ehca_qp_idr, my_qp->token);
-       spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
+       write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
 
        h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
        if (h_ret != H_SUCCESS) {
index d8fe37d56f1a5e86d9252507d73b533ef35608a7..3031b3bb56f9bc6b7ad3eb24d58d2b6d21f138ac 100644 (file)
@@ -253,7 +253,6 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
        u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */
        u32 cur_pid = current->tgid;
        u32 ret;
-       unsigned long flags;
        struct ehca_cq *cq;
        struct ehca_qp *qp;
        struct ehca_pd *pd;
@@ -261,9 +260,9 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
 
        switch (q_type) {
        case  1: /* CQ */
-               spin_lock_irqsave(&ehca_cq_idr_lock, flags);
+               read_lock(&ehca_cq_idr_lock);
                cq = idr_find(&ehca_cq_idr, idr_handle);
-               spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+               read_unlock(&ehca_cq_idr_lock);
 
                /* make sure this mmap really belongs to the authorized user */
                if (!cq)
@@ -289,9 +288,9 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
                break;
 
        case 2: /* QP */
-               spin_lock_irqsave(&ehca_qp_idr_lock, flags);
+               read_lock(&ehca_qp_idr_lock);
                qp = idr_find(&ehca_qp_idr, idr_handle);
-               spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
+               read_unlock(&ehca_qp_idr_lock);
 
                /* make sure this mmap really belongs to the authorized user */
                if (!qp)