unsigned int next_wqe_idx;   /* Idx to first wqe to be flushed */
 };
 
+/* function to calculate the next index for the qmap */
+static inline unsigned int next_index(unsigned int cur_index, unsigned int limit)
+{
+       unsigned int temp = cur_index + 1;
+       return (temp == limit) ? 0 : temp;
+}
+
 struct ehca_qp {
        union {
                struct ib_qp ib_qp;
 
                return -EFAULT;
        }
 
-       tail_idx = (qmap->tail + 1) % qmap->entries;
+       tail_idx = next_index(qmap->tail, qmap->entries);
        wqe_idx = q_ofs / ipz_queue->qe_size;
 
        /* check all processed wqes, whether a cqe is requested or not */
        while (tail_idx != wqe_idx) {
                if (qmap->map[tail_idx].cqe_req)
                        qmap->left_to_poll++;
-               tail_idx = (tail_idx + 1) % qmap->entries;
+               tail_idx = next_index(tail_idx, qmap->entries);
        }
        /* save index in queue, where we have to start flushing */
        qmap->next_wqe_idx = wqe_idx;
        } else {
                spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
                my_qp->sq_map.left_to_poll = 0;
-               my_qp->sq_map.next_wqe_idx = (my_qp->sq_map.tail + 1) %
-                                               my_qp->sq_map.entries;
+               my_qp->sq_map.next_wqe_idx = next_index(my_qp->sq_map.tail,
+                                                       my_qp->sq_map.entries);
                spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
 
                spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
                my_qp->rq_map.left_to_poll = 0;
-               my_qp->rq_map.next_wqe_idx = (my_qp->rq_map.tail + 1) %
-                                               my_qp->rq_map.entries;
+               my_qp->rq_map.next_wqe_idx = next_index(my_qp->rq_map.tail,
+                                                       my_qp->rq_map.entries);
                spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags);
        }
 
 
                 * set left_to_poll to 0 because in error state, we will not
                 * get any additional CQEs
                 */
-               my_qp->sq_map.next_wqe_idx = (my_qp->sq_map.tail + 1) %
-                                               my_qp->sq_map.entries;
+               my_qp->sq_map.next_wqe_idx = next_index(my_qp->sq_map.tail,
+                                                       my_qp->sq_map.entries);
                my_qp->sq_map.left_to_poll = 0;
                ehca_add_to_err_list(my_qp, 1);
 
-               my_qp->rq_map.next_wqe_idx = (my_qp->rq_map.tail + 1) %
-                                               my_qp->rq_map.entries;
+               my_qp->rq_map.next_wqe_idx = next_index(my_qp->rq_map.tail,
+                                                       my_qp->rq_map.entries);
                my_qp->rq_map.left_to_poll = 0;
                if (HAS_RQ(my_qp))
                        ehca_add_to_err_list(my_qp, 0);
 
                /* mark as reported and advance next_wqe pointer */
                qmap_entry->reported = 1;
-               qmap->next_wqe_idx++;
-               if (qmap->next_wqe_idx == qmap->entries)
-                       qmap->next_wqe_idx = 0;
+               qmap->next_wqe_idx = next_index(qmap->next_wqe_idx,
+                                               qmap->entries);
                qmap_entry = &qmap->map[qmap->next_wqe_idx];
 
                wc++; nr++;