2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
6 * Authors: Joachim Fenkes <fenkes@de.ibm.com>
7 * Stefan Roscher <stefan.roscher@de.ibm.com>
8 * Waleri Fomin <fomin@de.ibm.com>
9 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
10 * Reinhard Ernst <rernst@de.ibm.com>
11 * Heiko J Schick <schickhj@de.ibm.com>
13 * Copyright (c) 2005 IBM Corporation
15 * All rights reserved.
17 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions are met:
25 * Redistributions of source code must retain the above copyright notice, this
26 * list of conditions and the following disclaimer.
28 * Redistributions in binary form must reproduce the above copyright notice,
29 * this list of conditions and the following disclaimer in the documentation
30 * and/or other materials
31 * provided with the distribution.
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
34 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
37 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
38 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
39 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
40 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
41 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
43 * POSSIBILITY OF SUCH DAMAGE.
46 #include "ehca_classes.h"
47 #include "ehca_tools.h"
49 #include "ehca_iverbs.h"
53 static struct kmem_cache *qp_cache;
56 * attributes not supported by query qp
58 #define QP_ATTR_QUERY_NOT_SUPPORTED (IB_QP_MAX_DEST_RD_ATOMIC | \
59 IB_QP_MAX_QP_RD_ATOMIC | \
60 IB_QP_ACCESS_FLAGS | \
61 IB_QP_EN_SQD_ASYNC_NOTIFY)
64 * ehca (internal) qp state values
77 * qp state transitions as defined by IB Arch Rel 1.1 page 431
79 enum ib_qp_statetrans {
91 IB_QPST_MAX /* nr of transitions, this must be last!!! */
95 * ib2ehca_qp_state maps IB to ehca qp_state
96 * returns ehca qp state corresponding to given ib qp state
98 static inline enum ehca_qp_state ib2ehca_qp_state(enum ib_qp_state ib_qp_state)
100 switch (ib_qp_state) {
102 return EHCA_QPS_RESET;
104 return EHCA_QPS_INIT;
116 ehca_gen_err("invalid ib_qp_state=%x", ib_qp_state);
122 * ehca2ib_qp_state maps ehca to IB qp_state
123 * returns ib qp state corresponding to given ehca qp state
125 static inline enum ib_qp_state ehca2ib_qp_state(enum ehca_qp_state
128 switch (ehca_qp_state) {
144 ehca_gen_err("invalid ehca_qp_state=%x", ehca_qp_state);
150 * ehca_qp_type used as index for req_attr and opt_attr of
151 * struct ehca_modqp_statetrans
162 * ib2ehcaqptype maps Ib to ehca qp_type
163 * returns ehca qp type corresponding to ib qp type
165 static inline enum ehca_qp_type ib2ehcaqptype(enum ib_qp_type ibqptype)
178 ehca_gen_err("Invalid ibqptype=%x", ibqptype);
183 static inline enum ib_qp_statetrans get_modqp_statetrans(int ib_fromstate,
187 switch (ib_tostate) {
189 index = IB_QPST_ANY2RESET;
192 switch (ib_fromstate) {
194 index = IB_QPST_RESET2INIT;
197 index = IB_QPST_INIT2INIT;
202 if (ib_fromstate == IB_QPS_INIT)
203 index = IB_QPST_INIT2RTR;
206 switch (ib_fromstate) {
208 index = IB_QPST_RTR2RTS;
211 index = IB_QPST_RTS2RTS;
214 index = IB_QPST_SQD2RTS;
217 index = IB_QPST_SQE2RTS;
222 if (ib_fromstate == IB_QPS_RTS)
223 index = IB_QPST_RTS2SQD;
228 index = IB_QPST_ANY2ERR;
237 * ibqptype2servicetype returns hcp service type corresponding to given
238 * ib qp type used by create_qp()
240 static inline int ibqptype2servicetype(enum ib_qp_type ibqptype)
252 case IB_QPT_RAW_IPV6:
257 ehca_gen_err("Invalid ibqptype=%x", ibqptype);
263 * init userspace queue info from ipz_queue data
265 static inline void queue2resp(struct ipzu_queue_resp *resp,
266 struct ipz_queue *queue)
268 resp->qe_size = queue->qe_size;
269 resp->act_nr_of_sg = queue->act_nr_of_sg;
270 resp->queue_length = queue->queue_length;
271 resp->pagesize = queue->pagesize;
272 resp->toggle_state = queue->toggle_state;
273 resp->offset = queue->offset;
277 * init_qp_queue initializes/constructs r/squeue and registers queue pages.
279 static inline int init_qp_queue(struct ehca_shca *shca,
281 struct ehca_qp *my_qp,
282 struct ipz_queue *queue,
285 struct ehca_alloc_queue_parms *parms,
288 int ret, cnt, ipz_rc, nr_q_pages;
291 struct ib_device *ib_dev = &shca->ib_device;
292 struct ipz_adapter_handle ipz_hca_handle = shca->ipz_hca_handle;
294 if (!parms->queue_size)
297 if (parms->is_small) {
299 ipz_rc = ipz_queue_ctor(pd, queue, nr_q_pages,
300 128 << parms->page_size,
301 wqe_size, parms->act_nr_sges, 1);
303 nr_q_pages = parms->queue_size;
304 ipz_rc = ipz_queue_ctor(pd, queue, nr_q_pages,
305 EHCA_PAGESIZE, wqe_size,
306 parms->act_nr_sges, 0);
310 ehca_err(ib_dev, "Cannot allocate page for queue. ipz_rc=%i",
315 /* register queue pages */
316 for (cnt = 0; cnt < nr_q_pages; cnt++) {
317 vpage = ipz_qpageit_get_inc(queue);
319 ehca_err(ib_dev, "ipz_qpageit_get_inc() "
320 "failed p_vpage= %p", vpage);
324 rpage = virt_to_abs(vpage);
326 h_ret = hipz_h_register_rpage_qp(ipz_hca_handle,
327 my_qp->ipz_qp_handle,
329 rpage, parms->is_small ? 0 : 1,
330 my_qp->galpas.kernel);
331 if (cnt == (nr_q_pages - 1)) { /* last page! */
332 if (h_ret != expected_hret) {
333 ehca_err(ib_dev, "hipz_qp_register_rpage() "
335 ret = ehca2ib_return_code(h_ret);
338 vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue);
340 ehca_err(ib_dev, "ipz_qpageit_get_inc() "
341 "should not succeed vpage=%p", vpage);
346 if (h_ret != H_PAGE_REGISTERED) {
347 ehca_err(ib_dev, "hipz_qp_register_rpage() "
349 ret = ehca2ib_return_code(h_ret);
355 ipz_qeit_reset(queue);
360 ipz_queue_dtor(pd, queue);
364 static inline int ehca_calc_wqe_size(int act_nr_sge, int is_llqp)
367 return 128 << act_nr_sge;
369 return offsetof(struct ehca_wqe,
370 u.nud.sg_list[act_nr_sge]);
373 static void ehca_determine_small_queue(struct ehca_alloc_queue_parms *queue,
374 int req_nr_sge, int is_llqp)
376 u32 wqe_size, q_size;
377 int act_nr_sge = req_nr_sge;
380 /* round up #SGEs so WQE size is a power of 2 */
381 for (act_nr_sge = 4; act_nr_sge <= 252;
382 act_nr_sge = 4 + 2 * act_nr_sge)
383 if (act_nr_sge >= req_nr_sge)
386 wqe_size = ehca_calc_wqe_size(act_nr_sge, is_llqp);
387 q_size = wqe_size * (queue->max_wr + 1);
390 queue->page_size = 2;
391 else if (q_size <= 1024)
392 queue->page_size = 3;
394 queue->page_size = 0;
396 queue->is_small = (queue->page_size != 0);
399 /* needs to be called with cq->spinlock held */
400 void ehca_add_to_err_list(struct ehca_qp *qp, int on_sq)
402 struct list_head *list, *node;
404 /* TODO: support low latency QPs */
405 if (qp->ext_type == EQPT_LLQP)
409 list = &qp->send_cq->sqp_err_list;
410 node = &qp->sq_err_node;
412 list = &qp->recv_cq->rqp_err_list;
413 node = &qp->rq_err_node;
416 if (list_empty(node))
417 list_add_tail(node, list);
422 static void del_from_err_list(struct ehca_cq *cq, struct list_head *node)
426 spin_lock_irqsave(&cq->spinlock, flags);
428 if (!list_empty(node))
431 spin_unlock_irqrestore(&cq->spinlock, flags);
434 static void reset_queue_map(struct ehca_queue_map *qmap)
439 for (i = 0; i < qmap->entries; i++)
440 qmap->map[i].reported = 1;
444 * Create an ib_qp struct that is either a QP or an SRQ, depending on
445 * the value of the is_srq parameter. If init_attr and srq_init_attr share
446 * fields, the field out of init_attr is used.
448 static struct ehca_qp *internal_create_qp(
450 struct ib_qp_init_attr *init_attr,
451 struct ib_srq_init_attr *srq_init_attr,
452 struct ib_udata *udata, int is_srq)
454 struct ehca_qp *my_qp, *my_srq = NULL;
455 struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
456 struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
458 struct ib_ucontext *context = NULL;
460 int is_llqp = 0, has_srq = 0;
461 int qp_type, max_send_sge, max_recv_sge, ret;
463 /* h_call's out parameters */
464 struct ehca_alloc_qp_parms parms;
465 u32 swqe_size = 0, rwqe_size = 0, ib_qp_num;
468 if (!atomic_add_unless(&shca->num_qps, 1, shca->max_num_qps)) {
469 ehca_err(pd->device, "Unable to create QP, max number of %i "
470 "QPs reached.", shca->max_num_qps);
471 ehca_err(pd->device, "To increase the maximum number of QPs "
472 "use the number_of_qps module parameter.\n");
473 return ERR_PTR(-ENOSPC);
476 if (init_attr->create_flags) {
477 atomic_dec(&shca->num_qps);
478 return ERR_PTR(-EINVAL);
481 memset(&parms, 0, sizeof(parms));
482 qp_type = init_attr->qp_type;
484 if (init_attr->sq_sig_type != IB_SIGNAL_REQ_WR &&
485 init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) {
486 ehca_err(pd->device, "init_attr->sg_sig_type=%x not allowed",
487 init_attr->sq_sig_type);
488 atomic_dec(&shca->num_qps);
489 return ERR_PTR(-EINVAL);
493 if (qp_type & 0x80) {
495 parms.ext_type = EQPT_LLQP;
496 parms.ll_comp_flags = qp_type & LLQP_COMP_MASK;
499 init_attr->qp_type &= 0x1F;
501 /* handle SRQ base QPs */
502 if (init_attr->srq) {
503 my_srq = container_of(init_attr->srq, struct ehca_qp, ib_srq);
505 if (qp_type == IB_QPT_UC) {
506 ehca_err(pd->device, "UC with SRQ not supported");
507 atomic_dec(&shca->num_qps);
508 return ERR_PTR(-EINVAL);
512 parms.ext_type = EQPT_SRQBASE;
513 parms.srq_qpn = my_srq->real_qp_num;
516 if (is_llqp && has_srq) {
517 ehca_err(pd->device, "LLQPs can't have an SRQ");
518 atomic_dec(&shca->num_qps);
519 return ERR_PTR(-EINVAL);
524 parms.ext_type = EQPT_SRQ;
525 parms.srq_limit = srq_init_attr->attr.srq_limit;
526 if (init_attr->cap.max_recv_sge > 3) {
527 ehca_err(pd->device, "no more than three SGEs "
528 "supported for SRQ pd=%p max_sge=%x",
529 pd, init_attr->cap.max_recv_sge);
530 atomic_dec(&shca->num_qps);
531 return ERR_PTR(-EINVAL);
536 if (qp_type != IB_QPT_UD &&
537 qp_type != IB_QPT_UC &&
538 qp_type != IB_QPT_RC &&
539 qp_type != IB_QPT_SMI &&
540 qp_type != IB_QPT_GSI) {
541 ehca_err(pd->device, "wrong QP Type=%x", qp_type);
542 atomic_dec(&shca->num_qps);
543 return ERR_PTR(-EINVAL);
549 if ((init_attr->cap.max_send_wr > 255) ||
550 (init_attr->cap.max_recv_wr > 255)) {
552 "Invalid Number of max_sq_wr=%x "
553 "or max_rq_wr=%x for RC LLQP",
554 init_attr->cap.max_send_wr,
555 init_attr->cap.max_recv_wr);
556 atomic_dec(&shca->num_qps);
557 return ERR_PTR(-EINVAL);
561 if (!EHCA_BMASK_GET(HCA_CAP_UD_LL_QP, shca->hca_cap)) {
562 ehca_err(pd->device, "UD LLQP not supported "
564 atomic_dec(&shca->num_qps);
565 return ERR_PTR(-ENOSYS);
567 if (!(init_attr->cap.max_send_sge <= 5
568 && init_attr->cap.max_send_sge >= 1
569 && init_attr->cap.max_recv_sge <= 5
570 && init_attr->cap.max_recv_sge >= 1)) {
572 "Invalid Number of max_send_sge=%x "
573 "or max_recv_sge=%x for UD LLQP",
574 init_attr->cap.max_send_sge,
575 init_attr->cap.max_recv_sge);
576 atomic_dec(&shca->num_qps);
577 return ERR_PTR(-EINVAL);
578 } else if (init_attr->cap.max_send_wr > 255) {
581 "max_send_wr=%x for UD QP_TYPE=%x",
582 init_attr->cap.max_send_wr, qp_type);
583 atomic_dec(&shca->num_qps);
584 return ERR_PTR(-EINVAL);
588 ehca_err(pd->device, "unsupported LL QP Type=%x",
590 atomic_dec(&shca->num_qps);
591 return ERR_PTR(-EINVAL);
594 int max_sge = (qp_type == IB_QPT_UD || qp_type == IB_QPT_SMI
595 || qp_type == IB_QPT_GSI) ? 250 : 252;
597 if (init_attr->cap.max_send_sge > max_sge
598 || init_attr->cap.max_recv_sge > max_sge) {
599 ehca_err(pd->device, "Invalid number of SGEs requested "
600 "send_sge=%x recv_sge=%x max_sge=%x",
601 init_attr->cap.max_send_sge,
602 init_attr->cap.max_recv_sge, max_sge);
603 atomic_dec(&shca->num_qps);
604 return ERR_PTR(-EINVAL);
608 if (pd->uobject && udata)
609 context = pd->uobject->context;
611 my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL);
613 ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd);
614 atomic_dec(&shca->num_qps);
615 return ERR_PTR(-ENOMEM);
618 atomic_set(&my_qp->nr_events, 0);
619 init_waitqueue_head(&my_qp->wait_completion);
620 spin_lock_init(&my_qp->spinlock_s);
621 spin_lock_init(&my_qp->spinlock_r);
622 my_qp->qp_type = qp_type;
623 my_qp->ext_type = parms.ext_type;
624 my_qp->state = IB_QPS_RESET;
626 if (init_attr->recv_cq)
628 container_of(init_attr->recv_cq, struct ehca_cq, ib_cq);
629 if (init_attr->send_cq)
631 container_of(init_attr->send_cq, struct ehca_cq, ib_cq);
634 if (!idr_pre_get(&ehca_qp_idr, GFP_KERNEL)) {
636 ehca_err(pd->device, "Can't reserve idr resources.");
637 goto create_qp_exit0;
640 write_lock_irqsave(&ehca_qp_idr_lock, flags);
641 ret = idr_get_new(&ehca_qp_idr, my_qp, &my_qp->token);
642 write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
643 } while (ret == -EAGAIN);
647 ehca_err(pd->device, "Can't allocate new idr entry.");
648 goto create_qp_exit0;
651 if (my_qp->token > 0x1FFFFFF) {
653 ehca_err(pd->device, "Invalid number of qp");
654 goto create_qp_exit1;
658 parms.srq_token = my_qp->token;
660 parms.servicetype = ibqptype2servicetype(qp_type);
661 if (parms.servicetype < 0) {
663 ehca_err(pd->device, "Invalid qp_type=%x", qp_type);
664 goto create_qp_exit1;
667 /* Always signal by WQE so we can hide circ. WQEs */
668 parms.sigtype = HCALL_SIGT_BY_WQE;
670 /* UD_AV CIRCUMVENTION */
671 max_send_sge = init_attr->cap.max_send_sge;
672 max_recv_sge = init_attr->cap.max_recv_sge;
673 if (parms.servicetype == ST_UD && !is_llqp) {
678 parms.token = my_qp->token;
679 parms.eq_handle = shca->eq.ipz_eq_handle;
680 parms.pd = my_pd->fw_pd;
682 parms.send_cq_handle = my_qp->send_cq->ipz_cq_handle;
684 parms.recv_cq_handle = my_qp->recv_cq->ipz_cq_handle;
686 parms.squeue.max_wr = init_attr->cap.max_send_wr;
687 parms.rqueue.max_wr = init_attr->cap.max_recv_wr;
688 parms.squeue.max_sge = max_send_sge;
689 parms.rqueue.max_sge = max_recv_sge;
691 /* RC QPs need one more SWQE for unsolicited ack circumvention */
692 if (qp_type == IB_QPT_RC)
693 parms.squeue.max_wr++;
695 if (EHCA_BMASK_GET(HCA_CAP_MINI_QP, shca->hca_cap)) {
697 ehca_determine_small_queue(
698 &parms.squeue, max_send_sge, is_llqp);
700 ehca_determine_small_queue(
701 &parms.rqueue, max_recv_sge, is_llqp);
703 (parms.squeue.is_small || parms.rqueue.is_small);
706 h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms);
707 if (h_ret != H_SUCCESS) {
708 ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%li",
710 ret = ehca2ib_return_code(h_ret);
711 goto create_qp_exit1;
714 ib_qp_num = my_qp->real_qp_num = parms.real_qp_num;
715 my_qp->ipz_qp_handle = parms.qp_handle;
716 my_qp->galpas = parms.galpas;
718 swqe_size = ehca_calc_wqe_size(parms.squeue.act_nr_sges, is_llqp);
719 rwqe_size = ehca_calc_wqe_size(parms.rqueue.act_nr_sges, is_llqp);
724 parms.squeue.act_nr_sges = 1;
725 parms.rqueue.act_nr_sges = 1;
727 /* hide the extra WQE */
728 parms.squeue.act_nr_wqes--;
733 /* UD circumvention */
735 parms.squeue.act_nr_sges = 1;
736 parms.rqueue.act_nr_sges = 1;
738 parms.squeue.act_nr_sges -= 2;
739 parms.rqueue.act_nr_sges -= 2;
742 if (IB_QPT_GSI == qp_type || IB_QPT_SMI == qp_type) {
743 parms.squeue.act_nr_wqes = init_attr->cap.max_send_wr;
744 parms.rqueue.act_nr_wqes = init_attr->cap.max_recv_wr;
745 parms.squeue.act_nr_sges = init_attr->cap.max_send_sge;
746 parms.rqueue.act_nr_sges = init_attr->cap.max_recv_sge;
747 ib_qp_num = (qp_type == IB_QPT_SMI) ? 0 : 1;
756 /* initialize r/squeue and register queue pages */
759 shca, my_pd, my_qp, &my_qp->ipz_squeue, 0,
760 HAS_RQ(my_qp) ? H_PAGE_REGISTERED : H_SUCCESS,
761 &parms.squeue, swqe_size);
763 ehca_err(pd->device, "Couldn't initialize squeue "
764 "and pages ret=%i", ret);
765 goto create_qp_exit2;
768 my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length /
769 my_qp->ipz_squeue.qe_size;
770 my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries *
771 sizeof(struct ehca_qmap_entry));
772 if (!my_qp->sq_map.map) {
773 ehca_err(pd->device, "Couldn't allocate squeue "
775 goto create_qp_exit3;
777 INIT_LIST_HEAD(&my_qp->sq_err_node);
778 /* to avoid the generation of bogus flush CQEs */
779 reset_queue_map(&my_qp->sq_map);
784 shca, my_pd, my_qp, &my_qp->ipz_rqueue, 1,
785 H_SUCCESS, &parms.rqueue, rwqe_size);
787 ehca_err(pd->device, "Couldn't initialize rqueue "
788 "and pages ret=%i", ret);
789 goto create_qp_exit4;
792 my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length /
793 my_qp->ipz_rqueue.qe_size;
794 my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries *
795 sizeof(struct ehca_qmap_entry));
796 if (!my_qp->rq_map.map) {
797 ehca_err(pd->device, "Couldn't allocate squeue "
799 goto create_qp_exit5;
801 INIT_LIST_HEAD(&my_qp->rq_err_node);
802 /* to avoid the generation of bogus flush CQEs */
803 reset_queue_map(&my_qp->rq_map);
804 } else if (init_attr->srq) {
805 /* this is a base QP, use the queue map of the SRQ */
806 my_qp->rq_map = my_srq->rq_map;
807 INIT_LIST_HEAD(&my_qp->rq_err_node);
809 my_qp->ipz_rqueue = my_srq->ipz_rqueue;
813 my_qp->ib_srq.pd = &my_pd->ib_pd;
814 my_qp->ib_srq.device = my_pd->ib_pd.device;
816 my_qp->ib_srq.srq_context = init_attr->qp_context;
817 my_qp->ib_srq.event_handler = init_attr->event_handler;
819 my_qp->ib_qp.qp_num = ib_qp_num;
820 my_qp->ib_qp.pd = &my_pd->ib_pd;
821 my_qp->ib_qp.device = my_pd->ib_pd.device;
823 my_qp->ib_qp.recv_cq = init_attr->recv_cq;
824 my_qp->ib_qp.send_cq = init_attr->send_cq;
826 my_qp->ib_qp.qp_type = qp_type;
827 my_qp->ib_qp.srq = init_attr->srq;
829 my_qp->ib_qp.qp_context = init_attr->qp_context;
830 my_qp->ib_qp.event_handler = init_attr->event_handler;
833 init_attr->cap.max_inline_data = 0; /* not supported yet */
834 init_attr->cap.max_recv_sge = parms.rqueue.act_nr_sges;
835 init_attr->cap.max_recv_wr = parms.rqueue.act_nr_wqes;
836 init_attr->cap.max_send_sge = parms.squeue.act_nr_sges;
837 init_attr->cap.max_send_wr = parms.squeue.act_nr_wqes;
838 my_qp->init_attr = *init_attr;
840 if (qp_type == IB_QPT_SMI || qp_type == IB_QPT_GSI) {
841 shca->sport[init_attr->port_num - 1].ibqp_sqp[qp_type] =
843 if (ehca_nr_ports < 0) {
844 /* alloc array to cache subsequent modify qp parms
845 * for autodetect mode
848 kzalloc(EHCA_MOD_QP_PARM_MAX *
849 sizeof(*my_qp->mod_qp_parm),
851 if (!my_qp->mod_qp_parm) {
853 "Could not alloc mod_qp_parm");
854 goto create_qp_exit5;
859 /* NOTE: define_apq0() not supported yet */
860 if (qp_type == IB_QPT_GSI) {
861 h_ret = ehca_define_sqp(shca, my_qp, init_attr);
862 if (h_ret != H_SUCCESS) {
863 kfree(my_qp->mod_qp_parm);
864 my_qp->mod_qp_parm = NULL;
865 /* the QP pointer is no longer valid */
866 shca->sport[init_attr->port_num - 1].ibqp_sqp[qp_type] =
868 ret = ehca2ib_return_code(h_ret);
869 goto create_qp_exit6;
873 if (my_qp->send_cq) {
874 ret = ehca_cq_assign_qp(my_qp->send_cq, my_qp);
877 "Couldn't assign qp to send_cq ret=%i", ret);
878 goto create_qp_exit7;
882 /* copy queues, galpa data to user space */
883 if (context && udata) {
884 struct ehca_create_qp_resp resp;
885 memset(&resp, 0, sizeof(resp));
887 resp.qp_num = my_qp->real_qp_num;
888 resp.token = my_qp->token;
889 resp.qp_type = my_qp->qp_type;
890 resp.ext_type = my_qp->ext_type;
891 resp.qkey = my_qp->qkey;
892 resp.real_qp_num = my_qp->real_qp_num;
895 queue2resp(&resp.ipz_squeue, &my_qp->ipz_squeue);
897 queue2resp(&resp.ipz_rqueue, &my_qp->ipz_rqueue);
898 resp.fw_handle_ofs = (u32)
899 (my_qp->galpas.user.fw_handle & (PAGE_SIZE - 1));
901 if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
902 ehca_err(pd->device, "Copy to udata failed");
904 goto create_qp_exit8;
911 ehca_cq_unassign_qp(my_qp->send_cq, my_qp->real_qp_num);
914 kfree(my_qp->mod_qp_parm);
918 vfree(my_qp->rq_map.map);
922 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
926 vfree(my_qp->sq_map.map);
930 ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
933 hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
936 write_lock_irqsave(&ehca_qp_idr_lock, flags);
937 idr_remove(&ehca_qp_idr, my_qp->token);
938 write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
941 kmem_cache_free(qp_cache, my_qp);
942 atomic_dec(&shca->num_qps);
946 struct ib_qp *ehca_create_qp(struct ib_pd *pd,
947 struct ib_qp_init_attr *qp_init_attr,
948 struct ib_udata *udata)
952 ret = internal_create_qp(pd, qp_init_attr, NULL, udata, 0);
953 return IS_ERR(ret) ? (struct ib_qp *)ret : &ret->ib_qp;
956 static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
957 struct ib_uobject *uobject);
959 struct ib_srq *ehca_create_srq(struct ib_pd *pd,
960 struct ib_srq_init_attr *srq_init_attr,
961 struct ib_udata *udata)
963 struct ib_qp_init_attr qp_init_attr;
964 struct ehca_qp *my_qp;
966 struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
968 struct hcp_modify_qp_control_block *mqpcb;
969 u64 hret, update_mask;
971 /* For common attributes, internal_create_qp() takes its info
972 * out of qp_init_attr, so copy all common attrs there.
974 memset(&qp_init_attr, 0, sizeof(qp_init_attr));
975 qp_init_attr.event_handler = srq_init_attr->event_handler;
976 qp_init_attr.qp_context = srq_init_attr->srq_context;
977 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
978 qp_init_attr.qp_type = IB_QPT_RC;
979 qp_init_attr.cap.max_recv_wr = srq_init_attr->attr.max_wr;
980 qp_init_attr.cap.max_recv_sge = srq_init_attr->attr.max_sge;
982 my_qp = internal_create_qp(pd, &qp_init_attr, srq_init_attr, udata, 1);
984 return (struct ib_srq *)my_qp;
986 /* copy back return values */
987 srq_init_attr->attr.max_wr = qp_init_attr.cap.max_recv_wr;
988 srq_init_attr->attr.max_sge = 3;
990 /* drive SRQ into RTR state */
991 mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
993 ehca_err(pd->device, "Could not get zeroed page for mqpcb "
994 "ehca_qp=%p qp_num=%x ", my_qp, my_qp->real_qp_num);
995 ret = ERR_PTR(-ENOMEM);
999 mqpcb->qp_state = EHCA_QPS_INIT;
1000 mqpcb->prim_phys_port = 1;
1001 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
1002 hret = hipz_h_modify_qp(shca->ipz_hca_handle,
1003 my_qp->ipz_qp_handle,
1006 mqpcb, my_qp->galpas.kernel);
1007 if (hret != H_SUCCESS) {
1008 ehca_err(pd->device, "Could not modify SRQ to INIT "
1009 "ehca_qp=%p qp_num=%x h_ret=%li",
1010 my_qp, my_qp->real_qp_num, hret);
1014 mqpcb->qp_enable = 1;
1015 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE, 1);
1016 hret = hipz_h_modify_qp(shca->ipz_hca_handle,
1017 my_qp->ipz_qp_handle,
1020 mqpcb, my_qp->galpas.kernel);
1021 if (hret != H_SUCCESS) {
1022 ehca_err(pd->device, "Could not enable SRQ "
1023 "ehca_qp=%p qp_num=%x h_ret=%li",
1024 my_qp, my_qp->real_qp_num, hret);
1028 mqpcb->qp_state = EHCA_QPS_RTR;
1029 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
1030 hret = hipz_h_modify_qp(shca->ipz_hca_handle,
1031 my_qp->ipz_qp_handle,
1034 mqpcb, my_qp->galpas.kernel);
1035 if (hret != H_SUCCESS) {
1036 ehca_err(pd->device, "Could not modify SRQ to RTR "
1037 "ehca_qp=%p qp_num=%x h_ret=%li",
1038 my_qp, my_qp->real_qp_num, hret);
1042 ehca_free_fw_ctrlblock(mqpcb);
1044 return &my_qp->ib_srq;
1047 ret = ERR_PTR(ehca2ib_return_code(hret));
1048 ehca_free_fw_ctrlblock(mqpcb);
1051 internal_destroy_qp(pd->device, my_qp, my_qp->ib_srq.uobject);
1057 * prepare_sqe_rts called by internal_modify_qp() at trans sqe -> rts
1058 * set purge bit of bad wqe and subsequent wqes to avoid reentering sqe
1059 * returns total number of bad wqes in bad_wqe_cnt
1061 static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
1065 struct ipz_queue *squeue;
1066 void *bad_send_wqe_p, *bad_send_wqe_v;
1068 struct ehca_wqe *wqe;
1069 int qp_num = my_qp->ib_qp.qp_num;
1071 /* get send wqe pointer */
1072 h_ret = hipz_h_disable_and_get_wqe(shca->ipz_hca_handle,
1073 my_qp->ipz_qp_handle, &my_qp->pf,
1074 &bad_send_wqe_p, NULL, 2);
1075 if (h_ret != H_SUCCESS) {
1076 ehca_err(&shca->ib_device, "hipz_h_disable_and_get_wqe() failed"
1077 " ehca_qp=%p qp_num=%x h_ret=%li",
1078 my_qp, qp_num, h_ret);
1079 return ehca2ib_return_code(h_ret);
1081 bad_send_wqe_p = (void *)((u64)bad_send_wqe_p & (~(1L << 63)));
1082 ehca_dbg(&shca->ib_device, "qp_num=%x bad_send_wqe_p=%p",
1083 qp_num, bad_send_wqe_p);
1084 /* convert wqe pointer to vadr */
1085 bad_send_wqe_v = abs_to_virt((u64)bad_send_wqe_p);
1086 if (ehca_debug_level >= 2)
1087 ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num);
1088 squeue = &my_qp->ipz_squeue;
1089 if (ipz_queue_abs_to_offset(squeue, (u64)bad_send_wqe_p, &q_ofs)) {
1090 ehca_err(&shca->ib_device, "failed to get wqe offset qp_num=%x"
1091 " bad_send_wqe_p=%p", qp_num, bad_send_wqe_p);
1095 /* loop sets wqe's purge bit */
1096 wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs);
1098 while (wqe->optype != 0xff && wqe->wqef != 0xff) {
1099 if (ehca_debug_level >= 2)
1100 ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num);
1101 wqe->nr_of_data_seg = 0; /* suppress data access */
1102 wqe->wqef = WQEF_PURGE; /* WQE to be purged */
1103 q_ofs = ipz_queue_advance_offset(squeue, q_ofs);
1104 wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs);
1105 *bad_wqe_cnt = (*bad_wqe_cnt)+1;
1108 * bad wqe will be reprocessed and ignored when pol_cq() is called,
1109 * i.e. nr of wqes with flush error status is one less
1111 ehca_dbg(&shca->ib_device, "qp_num=%x flusherr_wqe_cnt=%x",
1112 qp_num, (*bad_wqe_cnt)-1);
1118 static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue,
1119 struct ehca_queue_map *qmap)
1125 /* convert real to abs address */
1126 wqe_p = wqe_p & (~(1UL << 63));
1128 wqe_v = abs_to_virt(wqe_p);
1130 if (ipz_queue_abs_to_offset(ipz_queue, wqe_p, &q_ofs)) {
1131 ehca_gen_err("Invalid offset for calculating left cqes "
1132 "wqe_p=%#lx wqe_v=%p\n", wqe_p, wqe_v);
1136 wqe_idx = q_ofs / ipz_queue->qe_size;
1137 if (wqe_idx < qmap->tail)
1138 qmap->left_to_poll = (qmap->entries - qmap->tail) + wqe_idx;
1140 qmap->left_to_poll = wqe_idx - qmap->tail;
1145 static int check_for_left_cqes(struct ehca_qp *my_qp, struct ehca_shca *shca)
1148 void *send_wqe_p, *recv_wqe_p;
1150 unsigned long flags;
1151 int qp_num = my_qp->ib_qp.qp_num;
1153 /* this hcall is not supported on base QPs */
1154 if (my_qp->ext_type != EQPT_SRQBASE) {
1155 /* get send and receive wqe pointer */
1156 h_ret = hipz_h_disable_and_get_wqe(shca->ipz_hca_handle,
1157 my_qp->ipz_qp_handle, &my_qp->pf,
1158 &send_wqe_p, &recv_wqe_p, 4);
1159 if (h_ret != H_SUCCESS) {
1160 ehca_err(&shca->ib_device, "disable_and_get_wqe() "
1161 "failed ehca_qp=%p qp_num=%x h_ret=%li",
1162 my_qp, qp_num, h_ret);
1163 return ehca2ib_return_code(h_ret);
1167 * acquire lock to ensure that nobody is polling the cq which
1168 * could mean that the qmap->tail pointer is in an
1169 * inconsistent state.
1171 spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
1172 ret = calc_left_cqes((u64)send_wqe_p, &my_qp->ipz_squeue,
1174 spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
1179 spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
1180 ret = calc_left_cqes((u64)recv_wqe_p, &my_qp->ipz_rqueue,
1182 spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags);
1186 spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
1187 my_qp->sq_map.left_to_poll = 0;
1188 spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
1190 spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
1191 my_qp->rq_map.left_to_poll = 0;
1192 spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags);
1195 /* this assures flush cqes being generated only for pending wqes */
1196 if ((my_qp->sq_map.left_to_poll == 0) &&
1197 (my_qp->rq_map.left_to_poll == 0)) {
1198 spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
1199 ehca_add_to_err_list(my_qp, 1);
1200 spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
1202 if (HAS_RQ(my_qp)) {
1203 spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
1204 ehca_add_to_err_list(my_qp, 0);
1205 spin_unlock_irqrestore(&my_qp->recv_cq->spinlock,
1214 * internal_modify_qp with circumvention to handle aqp0 properly
1215 * smi_reset2init indicates if this is an internal reset-to-init-call for
1216 * smi. This flag must always be zero if called from ehca_modify_qp()!
1217 * This internal func was intorduced to avoid recursion of ehca_modify_qp()!
1219 static int internal_modify_qp(struct ib_qp *ibqp,
1220 struct ib_qp_attr *attr,
1221 int attr_mask, int smi_reset2init)
1223 enum ib_qp_state qp_cur_state, qp_new_state;
1224 int cnt, qp_attr_idx, ret = 0;
1225 enum ib_qp_statetrans statetrans;
1226 struct hcp_modify_qp_control_block *mqpcb;
1227 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
1228 struct ehca_shca *shca =
1229 container_of(ibqp->pd->device, struct ehca_shca, ib_device);
1232 int bad_wqe_cnt = 0;
1233 int squeue_locked = 0;
1234 unsigned long flags = 0;
1236 /* do query_qp to obtain current attr values */
1237 mqpcb = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
1239 ehca_err(ibqp->device, "Could not get zeroed page for mqpcb "
1240 "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num);
1244 h_ret = hipz_h_query_qp(shca->ipz_hca_handle,
1245 my_qp->ipz_qp_handle,
1247 mqpcb, my_qp->galpas.kernel);
1248 if (h_ret != H_SUCCESS) {
1249 ehca_err(ibqp->device, "hipz_h_query_qp() failed "
1250 "ehca_qp=%p qp_num=%x h_ret=%li",
1251 my_qp, ibqp->qp_num, h_ret);
1252 ret = ehca2ib_return_code(h_ret);
1253 goto modify_qp_exit1;
1256 qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state);
1258 if (qp_cur_state == -EINVAL) { /* invalid qp state */
1260 ehca_err(ibqp->device, "Invalid current ehca_qp_state=%x "
1261 "ehca_qp=%p qp_num=%x",
1262 mqpcb->qp_state, my_qp, ibqp->qp_num);
1263 goto modify_qp_exit1;
1266 * circumvention to set aqp0 initial state to init
1267 * as expected by IB spec
1269 if (smi_reset2init == 0 &&
1270 ibqp->qp_type == IB_QPT_SMI &&
1271 qp_cur_state == IB_QPS_RESET &&
1272 (attr_mask & IB_QP_STATE) &&
1273 attr->qp_state == IB_QPS_INIT) { /* RESET -> INIT */
1274 struct ib_qp_attr smiqp_attr = {
1275 .qp_state = IB_QPS_INIT,
1276 .port_num = my_qp->init_attr.port_num,
1280 int smiqp_attr_mask = IB_QP_STATE | IB_QP_PORT |
1281 IB_QP_PKEY_INDEX | IB_QP_QKEY;
1282 int smirc = internal_modify_qp(
1283 ibqp, &smiqp_attr, smiqp_attr_mask, 1);
1285 ehca_err(ibqp->device, "SMI RESET -> INIT failed. "
1286 "ehca_modify_qp() rc=%i", smirc);
1288 goto modify_qp_exit1;
1290 qp_cur_state = IB_QPS_INIT;
1291 ehca_dbg(ibqp->device, "SMI RESET -> INIT succeeded");
1293 /* is transmitted current state equal to "real" current state */
1294 if ((attr_mask & IB_QP_CUR_STATE) &&
1295 qp_cur_state != attr->cur_qp_state) {
1297 ehca_err(ibqp->device,
1298 "Invalid IB_QP_CUR_STATE attr->curr_qp_state=%x <>"
1299 " actual cur_qp_state=%x. ehca_qp=%p qp_num=%x",
1300 attr->cur_qp_state, qp_cur_state, my_qp, ibqp->qp_num);
1301 goto modify_qp_exit1;
1304 ehca_dbg(ibqp->device, "ehca_qp=%p qp_num=%x current qp_state=%x "
1305 "new qp_state=%x attribute_mask=%x",
1306 my_qp, ibqp->qp_num, qp_cur_state, attr->qp_state, attr_mask);
1308 qp_new_state = attr_mask & IB_QP_STATE ? attr->qp_state : qp_cur_state;
1309 if (!smi_reset2init &&
1310 !ib_modify_qp_is_ok(qp_cur_state, qp_new_state, ibqp->qp_type,
1313 ehca_err(ibqp->device,
1314 "Invalid qp transition new_state=%x cur_state=%x "
1315 "ehca_qp=%p qp_num=%x attr_mask=%x", qp_new_state,
1316 qp_cur_state, my_qp, ibqp->qp_num, attr_mask);
1317 goto modify_qp_exit1;
1320 mqpcb->qp_state = ib2ehca_qp_state(qp_new_state);
1321 if (mqpcb->qp_state)
1322 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
1325 ehca_err(ibqp->device, "Invalid new qp state=%x "
1326 "ehca_qp=%p qp_num=%x",
1327 qp_new_state, my_qp, ibqp->qp_num);
1328 goto modify_qp_exit1;
1331 /* retrieve state transition struct to get req and opt attrs */
1332 statetrans = get_modqp_statetrans(qp_cur_state, qp_new_state);
1333 if (statetrans < 0) {
1335 ehca_err(ibqp->device, "<INVALID STATE CHANGE> qp_cur_state=%x "
1336 "new_qp_state=%x State_xsition=%x ehca_qp=%p "
1337 "qp_num=%x", qp_cur_state, qp_new_state,
1338 statetrans, my_qp, ibqp->qp_num);
1339 goto modify_qp_exit1;
1342 qp_attr_idx = ib2ehcaqptype(ibqp->qp_type);
1344 if (qp_attr_idx < 0) {
1346 ehca_err(ibqp->device,
1347 "Invalid QP type=%x ehca_qp=%p qp_num=%x",
1348 ibqp->qp_type, my_qp, ibqp->qp_num);
1349 goto modify_qp_exit1;
1352 ehca_dbg(ibqp->device,
1353 "ehca_qp=%p qp_num=%x <VALID STATE CHANGE> qp_state_xsit=%x",
1354 my_qp, ibqp->qp_num, statetrans);
1356 /* eHCA2 rev2 and higher require the SEND_GRH_FLAG to be set
1359 if ((my_qp->qp_type == IB_QPT_UD) &&
1360 (my_qp->ext_type != EQPT_LLQP) &&
1361 (statetrans == IB_QPST_INIT2RTR) &&
1362 (shca->hw_level >= 0x22)) {
1363 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1);
1364 mqpcb->send_grh_flag = 1;
1367 /* sqe -> rts: set purge bit of bad wqe before actual trans */
1368 if ((my_qp->qp_type == IB_QPT_UD ||
1369 my_qp->qp_type == IB_QPT_GSI ||
1370 my_qp->qp_type == IB_QPT_SMI) &&
1371 statetrans == IB_QPST_SQE2RTS) {
1372 /* mark next free wqe if kernel */
1373 if (!ibqp->uobject) {
1374 struct ehca_wqe *wqe;
1375 /* lock send queue */
1376 spin_lock_irqsave(&my_qp->spinlock_s, flags);
1378 /* mark next free wqe */
1379 wqe = (struct ehca_wqe *)
1380 ipz_qeit_get(&my_qp->ipz_squeue);
1381 wqe->optype = wqe->wqef = 0xff;
1382 ehca_dbg(ibqp->device, "qp_num=%x next_free_wqe=%p",
1385 ret = prepare_sqe_rts(my_qp, shca, &bad_wqe_cnt);
1387 ehca_err(ibqp->device, "prepare_sqe_rts() failed "
1388 "ehca_qp=%p qp_num=%x ret=%i",
1389 my_qp, ibqp->qp_num, ret);
1390 goto modify_qp_exit2;
1395 * enable RDMA_Atomic_Control if reset->init und reliable con
1396 * this is necessary since gen2 does not provide that flag,
1397 * but pHyp requires it
1399 if (statetrans == IB_QPST_RESET2INIT &&
1400 (ibqp->qp_type == IB_QPT_RC || ibqp->qp_type == IB_QPT_UC)) {
1401 mqpcb->rdma_atomic_ctrl = 3;
1402 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RDMA_ATOMIC_CTRL, 1);
1404 /* circ. pHyp requires #RDMA/Atomic Resp Res for UC INIT -> RTR */
1405 if (statetrans == IB_QPST_INIT2RTR &&
1406 (ibqp->qp_type == IB_QPT_UC) &&
1407 !(attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)) {
1408 mqpcb->rdma_nr_atomic_resp_res = 1; /* default to 1 */
1410 EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES, 1);
1413 if (attr_mask & IB_QP_PKEY_INDEX) {
1414 if (attr->pkey_index >= 16) {
1416 ehca_err(ibqp->device, "Invalid pkey_index=%x. "
1417 "ehca_qp=%p qp_num=%x max_pkey_index=f",
1418 attr->pkey_index, my_qp, ibqp->qp_num);
1419 goto modify_qp_exit2;
1421 mqpcb->prim_p_key_idx = attr->pkey_index;
1422 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_P_KEY_IDX, 1);
1424 if (attr_mask & IB_QP_PORT) {
1425 struct ehca_sport *sport;
1426 struct ehca_qp *aqp1;
1427 if (attr->port_num < 1 || attr->port_num > shca->num_ports) {
1429 ehca_err(ibqp->device, "Invalid port=%x. "
1430 "ehca_qp=%p qp_num=%x num_ports=%x",
1431 attr->port_num, my_qp, ibqp->qp_num,
1433 goto modify_qp_exit2;
1435 sport = &shca->sport[attr->port_num - 1];
1436 if (!sport->ibqp_sqp[IB_QPT_GSI]) {
1437 /* should not occur */
1439 ehca_err(ibqp->device, "AQP1 was not created for "
1440 "port=%x", attr->port_num);
1441 goto modify_qp_exit2;
1443 aqp1 = container_of(sport->ibqp_sqp[IB_QPT_GSI],
1444 struct ehca_qp, ib_qp);
1445 if (ibqp->qp_type != IB_QPT_GSI &&
1446 ibqp->qp_type != IB_QPT_SMI &&
1447 aqp1->mod_qp_parm) {
1449 * firmware will reject this modify_qp() because
1450 * port is not activated/initialized fully
1453 ehca_warn(ibqp->device, "Couldn't modify qp port=%x: "
1454 "either port is being activated (try again) "
1455 "or cabling issue", attr->port_num);
1456 goto modify_qp_exit2;
1458 mqpcb->prim_phys_port = attr->port_num;
1459 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_PHYS_PORT, 1);
1461 if (attr_mask & IB_QP_QKEY) {
1462 mqpcb->qkey = attr->qkey;
1463 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_QKEY, 1);
1465 if (attr_mask & IB_QP_AV) {
1466 mqpcb->dlid = attr->ah_attr.dlid;
1467 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DLID, 1);
1468 mqpcb->source_path_bits = attr->ah_attr.src_path_bits;
1469 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS, 1);
1470 mqpcb->service_level = attr->ah_attr.sl;
1471 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL, 1);
1473 if (ehca_calc_ipd(shca, mqpcb->prim_phys_port,
1474 attr->ah_attr.static_rate,
1475 &mqpcb->max_static_rate)) {
1477 goto modify_qp_exit2;
1479 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE, 1);
1482 * Always supply the GRH flag, even if it's zero, to give the
1483 * hypervisor a clear "yes" or "no" instead of a "perhaps"
1485 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1);
1488 * only if GRH is TRUE we might consider SOURCE_GID_IDX
1489 * and DEST_GID otherwise phype will return H_ATTR_PARM!!!
1491 if (attr->ah_attr.ah_flags == IB_AH_GRH) {
1492 mqpcb->send_grh_flag = 1;
1494 mqpcb->source_gid_idx = attr->ah_attr.grh.sgid_index;
1496 EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX, 1);
1498 for (cnt = 0; cnt < 16; cnt++)
1499 mqpcb->dest_gid.byte[cnt] =
1500 attr->ah_attr.grh.dgid.raw[cnt];
1502 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DEST_GID, 1);
1503 mqpcb->flow_label = attr->ah_attr.grh.flow_label;
1504 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL, 1);
1505 mqpcb->hop_limit = attr->ah_attr.grh.hop_limit;
1506 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT, 1);
1507 mqpcb->traffic_class = attr->ah_attr.grh.traffic_class;
1509 EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS, 1);
1513 if (attr_mask & IB_QP_PATH_MTU) {
1515 my_qp->mtu_shift = attr->path_mtu + 7;
1516 mqpcb->path_mtu = attr->path_mtu;
1517 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PATH_MTU, 1);
1519 if (attr_mask & IB_QP_TIMEOUT) {
1520 mqpcb->timeout = attr->timeout;
1521 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_TIMEOUT, 1);
1523 if (attr_mask & IB_QP_RETRY_CNT) {
1524 mqpcb->retry_count = attr->retry_cnt;
1525 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RETRY_COUNT, 1);
1527 if (attr_mask & IB_QP_RNR_RETRY) {
1528 mqpcb->rnr_retry_count = attr->rnr_retry;
1529 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RNR_RETRY_COUNT, 1);
1531 if (attr_mask & IB_QP_RQ_PSN) {
1532 mqpcb->receive_psn = attr->rq_psn;
1533 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RECEIVE_PSN, 1);
1535 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1536 mqpcb->rdma_nr_atomic_resp_res = attr->max_dest_rd_atomic < 3 ?
1537 attr->max_dest_rd_atomic : 2;
1539 EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES, 1);
1541 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1542 mqpcb->rdma_atomic_outst_dest_qp = attr->max_rd_atomic < 3 ?
1543 attr->max_rd_atomic : 2;
1546 (MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP, 1);
1548 if (attr_mask & IB_QP_ALT_PATH) {
1549 if (attr->alt_port_num < 1
1550 || attr->alt_port_num > shca->num_ports) {
1552 ehca_err(ibqp->device, "Invalid alt_port=%x. "
1553 "ehca_qp=%p qp_num=%x num_ports=%x",
1554 attr->alt_port_num, my_qp, ibqp->qp_num,
1556 goto modify_qp_exit2;
1558 mqpcb->alt_phys_port = attr->alt_port_num;
1560 if (attr->alt_pkey_index >= 16) {
1562 ehca_err(ibqp->device, "Invalid alt_pkey_index=%x. "
1563 "ehca_qp=%p qp_num=%x max_pkey_index=f",
1564 attr->pkey_index, my_qp, ibqp->qp_num);
1565 goto modify_qp_exit2;
1567 mqpcb->alt_p_key_idx = attr->alt_pkey_index;
1569 mqpcb->timeout_al = attr->alt_timeout;
1570 mqpcb->dlid_al = attr->alt_ah_attr.dlid;
1571 mqpcb->source_path_bits_al = attr->alt_ah_attr.src_path_bits;
1572 mqpcb->service_level_al = attr->alt_ah_attr.sl;
1574 if (ehca_calc_ipd(shca, mqpcb->alt_phys_port,
1575 attr->alt_ah_attr.static_rate,
1576 &mqpcb->max_static_rate_al)) {
1578 goto modify_qp_exit2;
1581 /* OpenIB doesn't support alternate retry counts - copy them */
1582 mqpcb->retry_count_al = mqpcb->retry_count;
1583 mqpcb->rnr_retry_count_al = mqpcb->rnr_retry_count;
1585 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_ALT_PHYS_PORT, 1)
1586 | EHCA_BMASK_SET(MQPCB_MASK_ALT_P_KEY_IDX, 1)
1587 | EHCA_BMASK_SET(MQPCB_MASK_TIMEOUT_AL, 1)
1588 | EHCA_BMASK_SET(MQPCB_MASK_DLID_AL, 1)
1589 | EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS_AL, 1)
1590 | EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL_AL, 1)
1591 | EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE_AL, 1)
1592 | EHCA_BMASK_SET(MQPCB_MASK_RETRY_COUNT_AL, 1)
1593 | EHCA_BMASK_SET(MQPCB_MASK_RNR_RETRY_COUNT_AL, 1);
1596 * Always supply the GRH flag, even if it's zero, to give the
1597 * hypervisor a clear "yes" or "no" instead of a "perhaps"
1599 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG_AL, 1);
1602 * only if GRH is TRUE we might consider SOURCE_GID_IDX
1603 * and DEST_GID otherwise phype will return H_ATTR_PARM!!!
1605 if (attr->alt_ah_attr.ah_flags == IB_AH_GRH) {
1606 mqpcb->send_grh_flag_al = 1;
1608 for (cnt = 0; cnt < 16; cnt++)
1609 mqpcb->dest_gid_al.byte[cnt] =
1610 attr->alt_ah_attr.grh.dgid.raw[cnt];
1611 mqpcb->source_gid_idx_al =
1612 attr->alt_ah_attr.grh.sgid_index;
1613 mqpcb->flow_label_al = attr->alt_ah_attr.grh.flow_label;
1614 mqpcb->hop_limit_al = attr->alt_ah_attr.grh.hop_limit;
1615 mqpcb->traffic_class_al =
1616 attr->alt_ah_attr.grh.traffic_class;
1619 EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX_AL, 1)
1620 | EHCA_BMASK_SET(MQPCB_MASK_DEST_GID_AL, 1)
1621 | EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL_AL, 1)
1622 | EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT_AL, 1) |
1623 EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS_AL, 1);
1627 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
1628 mqpcb->min_rnr_nak_timer_field = attr->min_rnr_timer;
1630 EHCA_BMASK_SET(MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD, 1);
1633 if (attr_mask & IB_QP_SQ_PSN) {
1634 mqpcb->send_psn = attr->sq_psn;
1635 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_PSN, 1);
1638 if (attr_mask & IB_QP_DEST_QPN) {
1639 mqpcb->dest_qp_nr = attr->dest_qp_num;
1640 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DEST_QP_NR, 1);
1643 if (attr_mask & IB_QP_PATH_MIG_STATE) {
1644 if (attr->path_mig_state != IB_MIG_REARM
1645 && attr->path_mig_state != IB_MIG_MIGRATED) {
1647 ehca_err(ibqp->device, "Invalid mig_state=%x",
1648 attr->path_mig_state);
1649 goto modify_qp_exit2;
1651 mqpcb->path_migration_state = attr->path_mig_state + 1;
1652 if (attr->path_mig_state == IB_MIG_REARM)
1653 my_qp->mig_armed = 1;
1655 EHCA_BMASK_SET(MQPCB_MASK_PATH_MIGRATION_STATE, 1);
1658 if (attr_mask & IB_QP_CAP) {
1659 mqpcb->max_nr_outst_send_wr = attr->cap.max_send_wr+1;
1661 EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_SEND_WR, 1);
1662 mqpcb->max_nr_outst_recv_wr = attr->cap.max_recv_wr+1;
1664 EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_RECV_WR, 1);
1665 /* no support for max_send/recv_sge yet */
1668 if (ehca_debug_level >= 2)
1669 ehca_dmp(mqpcb, 4*70, "qp_num=%x", ibqp->qp_num);
1671 h_ret = hipz_h_modify_qp(shca->ipz_hca_handle,
1672 my_qp->ipz_qp_handle,
1675 mqpcb, my_qp->galpas.kernel);
1677 if (h_ret != H_SUCCESS) {
1678 ret = ehca2ib_return_code(h_ret);
1679 ehca_err(ibqp->device, "hipz_h_modify_qp() failed h_ret=%li "
1680 "ehca_qp=%p qp_num=%x", h_ret, my_qp, ibqp->qp_num);
1681 goto modify_qp_exit2;
1684 if ((my_qp->qp_type == IB_QPT_UD ||
1685 my_qp->qp_type == IB_QPT_GSI ||
1686 my_qp->qp_type == IB_QPT_SMI) &&
1687 statetrans == IB_QPST_SQE2RTS) {
1688 /* doorbell to reprocessing wqes */
1689 iosync(); /* serialize GAL register access */
1690 hipz_update_sqa(my_qp, bad_wqe_cnt-1);
1691 ehca_gen_dbg("doorbell for %x wqes", bad_wqe_cnt);
1694 if (statetrans == IB_QPST_RESET2INIT ||
1695 statetrans == IB_QPST_INIT2INIT) {
1696 mqpcb->qp_enable = 1;
1697 mqpcb->qp_state = EHCA_QPS_INIT;
1699 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE, 1);
1701 h_ret = hipz_h_modify_qp(shca->ipz_hca_handle,
1702 my_qp->ipz_qp_handle,
1706 my_qp->galpas.kernel);
1708 if (h_ret != H_SUCCESS) {
1709 ret = ehca2ib_return_code(h_ret);
1710 ehca_err(ibqp->device, "ENABLE in context of "
1711 "RESET_2_INIT failed! Maybe you didn't get "
1712 "a LID h_ret=%li ehca_qp=%p qp_num=%x",
1713 h_ret, my_qp, ibqp->qp_num);
1714 goto modify_qp_exit2;
1717 if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR)) {
1718 ret = check_for_left_cqes(my_qp, shca);
1720 goto modify_qp_exit2;
1723 if (statetrans == IB_QPST_ANY2RESET) {
1724 ipz_qeit_reset(&my_qp->ipz_rqueue);
1725 ipz_qeit_reset(&my_qp->ipz_squeue);
1727 if (qp_cur_state == IB_QPS_ERR) {
1728 del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
1731 del_from_err_list(my_qp->recv_cq,
1732 &my_qp->rq_err_node);
1734 reset_queue_map(&my_qp->sq_map);
1737 reset_queue_map(&my_qp->rq_map);
1740 if (attr_mask & IB_QP_QKEY)
1741 my_qp->qkey = attr->qkey;
1744 if (squeue_locked) { /* this means: sqe -> rts */
1745 spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
1746 my_qp->sqerr_purgeflag = 1;
1750 ehca_free_fw_ctrlblock(mqpcb);
1755 int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1756 struct ib_udata *udata)
1760 struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
1762 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
1764 /* The if-block below caches qp_attr to be modified for GSI and SMI
1765 * qps during the initialization by ib_mad. When the respective port
1766 * is activated, ie we got an event PORT_ACTIVE, we'll replay the
1767 * cached modify calls sequence, see ehca_recover_sqs() below.
1768 * Why that is required:
1769 * 1) If one port is connected, older code requires that port one
1770 * to be connected and module option nr_ports=1 to be given by
1771 * user, which is very inconvenient for end user.
1772 * 2) Firmware accepts modify_qp() only if respective port has become
1773 * active. Older code had a wait loop of 30sec create_qp()/
1774 * define_aqp1(), which is not appropriate in practice. This
1775 * code now removes that wait loop, see define_aqp1(), and always
1776 * reports all ports to ib_mad resp. users. Only activated ports
1777 * will then usable for the users.
1779 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) {
1780 int port = my_qp->init_attr.port_num;
1781 struct ehca_sport *sport = &shca->sport[port - 1];
1782 unsigned long flags;
1783 spin_lock_irqsave(&sport->mod_sqp_lock, flags);
1784 /* cache qp_attr only during init */
1785 if (my_qp->mod_qp_parm) {
1786 struct ehca_mod_qp_parm *p;
1787 if (my_qp->mod_qp_parm_idx >= EHCA_MOD_QP_PARM_MAX) {
1788 ehca_err(&shca->ib_device,
1789 "mod_qp_parm overflow state=%x port=%x"
1790 " type=%x", attr->qp_state,
1791 my_qp->init_attr.port_num,
1793 spin_unlock_irqrestore(&sport->mod_sqp_lock,
1797 p = &my_qp->mod_qp_parm[my_qp->mod_qp_parm_idx];
1798 p->mask = attr_mask;
1800 my_qp->mod_qp_parm_idx++;
1801 ehca_dbg(&shca->ib_device,
1802 "Saved qp_attr for state=%x port=%x type=%x",
1803 attr->qp_state, my_qp->init_attr.port_num,
1805 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
1808 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
1811 ret = internal_modify_qp(ibqp, attr, attr_mask, 0);
1814 if ((ret == 0) && (attr_mask & IB_QP_STATE))
1815 my_qp->state = attr->qp_state;
1820 void ehca_recover_sqp(struct ib_qp *sqp)
1822 struct ehca_qp *my_sqp = container_of(sqp, struct ehca_qp, ib_qp);
1823 int port = my_sqp->init_attr.port_num;
1824 struct ib_qp_attr attr;
1825 struct ehca_mod_qp_parm *qp_parm;
1826 int i, qp_parm_idx, ret;
1827 unsigned long flags, wr_cnt;
1829 if (!my_sqp->mod_qp_parm)
1831 ehca_dbg(sqp->device, "SQP port=%x qp_num=%x", port, sqp->qp_num);
1833 qp_parm = my_sqp->mod_qp_parm;
1834 qp_parm_idx = my_sqp->mod_qp_parm_idx;
1835 for (i = 0; i < qp_parm_idx; i++) {
1836 attr = qp_parm[i].attr;
1837 ret = internal_modify_qp(sqp, &attr, qp_parm[i].mask, 0);
1839 ehca_err(sqp->device, "Could not modify SQP port=%x "
1840 "qp_num=%x ret=%x", port, sqp->qp_num, ret);
1843 ehca_dbg(sqp->device, "SQP port=%x qp_num=%x in state=%x",
1844 port, sqp->qp_num, attr.qp_state);
1847 /* re-trigger posted recv wrs */
1848 wr_cnt = my_sqp->ipz_rqueue.current_q_offset /
1849 my_sqp->ipz_rqueue.qe_size;
1851 spin_lock_irqsave(&my_sqp->spinlock_r, flags);
1852 hipz_update_rqa(my_sqp, wr_cnt);
1853 spin_unlock_irqrestore(&my_sqp->spinlock_r, flags);
1854 ehca_dbg(sqp->device, "doorbell port=%x qp_num=%x wr_cnt=%lx",
1855 port, sqp->qp_num, wr_cnt);
1860 /* this prevents subsequent calls to modify_qp() to cache qp_attr */
1861 my_sqp->mod_qp_parm = NULL;
1864 int ehca_query_qp(struct ib_qp *qp,
1865 struct ib_qp_attr *qp_attr,
1866 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
1868 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
1869 struct ehca_shca *shca = container_of(qp->device, struct ehca_shca,
1871 struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
1872 struct hcp_modify_qp_control_block *qpcb;
1876 if (qp_attr_mask & QP_ATTR_QUERY_NOT_SUPPORTED) {
1877 ehca_err(qp->device, "Invalid attribute mask "
1878 "ehca_qp=%p qp_num=%x qp_attr_mask=%x ",
1879 my_qp, qp->qp_num, qp_attr_mask);
1883 qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1885 ehca_err(qp->device, "Out of memory for qpcb "
1886 "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num);
1890 h_ret = hipz_h_query_qp(adapter_handle,
1891 my_qp->ipz_qp_handle,
1893 qpcb, my_qp->galpas.kernel);
1895 if (h_ret != H_SUCCESS) {
1896 ret = ehca2ib_return_code(h_ret);
1897 ehca_err(qp->device, "hipz_h_query_qp() failed "
1898 "ehca_qp=%p qp_num=%x h_ret=%li",
1899 my_qp, qp->qp_num, h_ret);
1900 goto query_qp_exit1;
1903 qp_attr->cur_qp_state = ehca2ib_qp_state(qpcb->qp_state);
1904 qp_attr->qp_state = qp_attr->cur_qp_state;
1906 if (qp_attr->cur_qp_state == -EINVAL) {
1908 ehca_err(qp->device, "Got invalid ehca_qp_state=%x "
1909 "ehca_qp=%p qp_num=%x",
1910 qpcb->qp_state, my_qp, qp->qp_num);
1911 goto query_qp_exit1;
1914 if (qp_attr->qp_state == IB_QPS_SQD)
1915 qp_attr->sq_draining = 1;
1917 qp_attr->qkey = qpcb->qkey;
1918 qp_attr->path_mtu = qpcb->path_mtu;
1919 qp_attr->path_mig_state = qpcb->path_migration_state - 1;
1920 qp_attr->rq_psn = qpcb->receive_psn;
1921 qp_attr->sq_psn = qpcb->send_psn;
1922 qp_attr->min_rnr_timer = qpcb->min_rnr_nak_timer_field;
1923 qp_attr->cap.max_send_wr = qpcb->max_nr_outst_send_wr-1;
1924 qp_attr->cap.max_recv_wr = qpcb->max_nr_outst_recv_wr-1;
1925 /* UD_AV CIRCUMVENTION */
1926 if (my_qp->qp_type == IB_QPT_UD) {
1927 qp_attr->cap.max_send_sge =
1928 qpcb->actual_nr_sges_in_sq_wqe - 2;
1929 qp_attr->cap.max_recv_sge =
1930 qpcb->actual_nr_sges_in_rq_wqe - 2;
1932 qp_attr->cap.max_send_sge =
1933 qpcb->actual_nr_sges_in_sq_wqe;
1934 qp_attr->cap.max_recv_sge =
1935 qpcb->actual_nr_sges_in_rq_wqe;
1938 qp_attr->cap.max_inline_data = my_qp->sq_max_inline_data_size;
1939 qp_attr->dest_qp_num = qpcb->dest_qp_nr;
1941 qp_attr->pkey_index =
1942 EHCA_BMASK_GET(MQPCB_PRIM_P_KEY_IDX, qpcb->prim_p_key_idx);
1945 EHCA_BMASK_GET(MQPCB_PRIM_PHYS_PORT, qpcb->prim_phys_port);
1947 qp_attr->timeout = qpcb->timeout;
1948 qp_attr->retry_cnt = qpcb->retry_count;
1949 qp_attr->rnr_retry = qpcb->rnr_retry_count;
1951 qp_attr->alt_pkey_index =
1952 EHCA_BMASK_GET(MQPCB_PRIM_P_KEY_IDX, qpcb->alt_p_key_idx);
1954 qp_attr->alt_port_num = qpcb->alt_phys_port;
1955 qp_attr->alt_timeout = qpcb->timeout_al;
1957 qp_attr->max_dest_rd_atomic = qpcb->rdma_nr_atomic_resp_res;
1958 qp_attr->max_rd_atomic = qpcb->rdma_atomic_outst_dest_qp;
1961 qp_attr->ah_attr.sl = qpcb->service_level;
1963 if (qpcb->send_grh_flag) {
1964 qp_attr->ah_attr.ah_flags = IB_AH_GRH;
1967 qp_attr->ah_attr.static_rate = qpcb->max_static_rate;
1968 qp_attr->ah_attr.dlid = qpcb->dlid;
1969 qp_attr->ah_attr.src_path_bits = qpcb->source_path_bits;
1970 qp_attr->ah_attr.port_num = qp_attr->port_num;
1973 qp_attr->ah_attr.grh.traffic_class = qpcb->traffic_class;
1974 qp_attr->ah_attr.grh.hop_limit = qpcb->hop_limit;
1975 qp_attr->ah_attr.grh.sgid_index = qpcb->source_gid_idx;
1976 qp_attr->ah_attr.grh.flow_label = qpcb->flow_label;
1978 for (cnt = 0; cnt < 16; cnt++)
1979 qp_attr->ah_attr.grh.dgid.raw[cnt] =
1980 qpcb->dest_gid.byte[cnt];
1983 qp_attr->alt_ah_attr.sl = qpcb->service_level_al;
1984 if (qpcb->send_grh_flag_al) {
1985 qp_attr->alt_ah_attr.ah_flags = IB_AH_GRH;
1988 qp_attr->alt_ah_attr.static_rate = qpcb->max_static_rate_al;
1989 qp_attr->alt_ah_attr.dlid = qpcb->dlid_al;
1990 qp_attr->alt_ah_attr.src_path_bits = qpcb->source_path_bits_al;
1993 qp_attr->alt_ah_attr.grh.traffic_class = qpcb->traffic_class_al;
1994 qp_attr->alt_ah_attr.grh.hop_limit = qpcb->hop_limit_al;
1995 qp_attr->alt_ah_attr.grh.sgid_index = qpcb->source_gid_idx_al;
1996 qp_attr->alt_ah_attr.grh.flow_label = qpcb->flow_label_al;
1998 for (cnt = 0; cnt < 16; cnt++)
1999 qp_attr->alt_ah_attr.grh.dgid.raw[cnt] =
2000 qpcb->dest_gid_al.byte[cnt];
2002 /* return init attributes given in ehca_create_qp */
2004 *qp_init_attr = my_qp->init_attr;
2006 if (ehca_debug_level >= 2)
2007 ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num);
2010 ehca_free_fw_ctrlblock(qpcb);
2015 int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
2016 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
2018 struct ehca_qp *my_qp =
2019 container_of(ibsrq, struct ehca_qp, ib_srq);
2020 struct ehca_shca *shca =
2021 container_of(ibsrq->pd->device, struct ehca_shca, ib_device);
2022 struct hcp_modify_qp_control_block *mqpcb;
2027 mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
2029 ehca_err(ibsrq->device, "Could not get zeroed page for mqpcb "
2030 "ehca_qp=%p qp_num=%x ", my_qp, my_qp->real_qp_num);
2035 if (attr_mask & IB_SRQ_LIMIT) {
2036 attr_mask &= ~IB_SRQ_LIMIT;
2038 EHCA_BMASK_SET(MQPCB_MASK_CURR_SRQ_LIMIT, 1)
2039 | EHCA_BMASK_SET(MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG, 1);
2040 mqpcb->curr_srq_limit =
2041 EHCA_BMASK_SET(MQPCB_CURR_SRQ_LIMIT, attr->srq_limit);
2042 mqpcb->qp_aff_asyn_ev_log_reg =
2043 EHCA_BMASK_SET(QPX_AAELOG_RESET_SRQ_LIMIT, 1);
2046 /* by now, all bits in attr_mask should have been cleared */
2048 ehca_err(ibsrq->device, "invalid attribute mask bits set "
2049 "attr_mask=%x", attr_mask);
2051 goto modify_srq_exit0;
2054 if (ehca_debug_level >= 2)
2055 ehca_dmp(mqpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
2057 h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, my_qp->ipz_qp_handle,
2058 NULL, update_mask, mqpcb,
2059 my_qp->galpas.kernel);
2061 if (h_ret != H_SUCCESS) {
2062 ret = ehca2ib_return_code(h_ret);
2063 ehca_err(ibsrq->device, "hipz_h_modify_qp() failed h_ret=%li "
2064 "ehca_qp=%p qp_num=%x",
2065 h_ret, my_qp, my_qp->real_qp_num);
2069 ehca_free_fw_ctrlblock(mqpcb);
2074 int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr)
2076 struct ehca_qp *my_qp = container_of(srq, struct ehca_qp, ib_srq);
2077 struct ehca_shca *shca = container_of(srq->device, struct ehca_shca,
2079 struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
2080 struct hcp_modify_qp_control_block *qpcb;
2084 qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
2086 ehca_err(srq->device, "Out of memory for qpcb "
2087 "ehca_qp=%p qp_num=%x", my_qp, my_qp->real_qp_num);
2091 h_ret = hipz_h_query_qp(adapter_handle, my_qp->ipz_qp_handle,
2092 NULL, qpcb, my_qp->galpas.kernel);
2094 if (h_ret != H_SUCCESS) {
2095 ret = ehca2ib_return_code(h_ret);
2096 ehca_err(srq->device, "hipz_h_query_qp() failed "
2097 "ehca_qp=%p qp_num=%x h_ret=%li",
2098 my_qp, my_qp->real_qp_num, h_ret);
2099 goto query_srq_exit1;
2102 srq_attr->max_wr = qpcb->max_nr_outst_recv_wr - 1;
2103 srq_attr->max_sge = 3;
2104 srq_attr->srq_limit = EHCA_BMASK_GET(
2105 MQPCB_CURR_SRQ_LIMIT, qpcb->curr_srq_limit);
2107 if (ehca_debug_level >= 2)
2108 ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
2111 ehca_free_fw_ctrlblock(qpcb);
2116 static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
2117 struct ib_uobject *uobject)
2119 struct ehca_shca *shca = container_of(dev, struct ehca_shca, ib_device);
2120 struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
2122 struct ehca_sport *sport = &shca->sport[my_qp->init_attr.port_num - 1];
2123 u32 qp_num = my_qp->real_qp_num;
2127 enum ib_qp_type qp_type;
2128 unsigned long flags;
2131 if (my_qp->mm_count_galpa ||
2132 my_qp->mm_count_rqueue || my_qp->mm_count_squeue) {
2133 ehca_err(dev, "Resources still referenced in "
2134 "user space qp_num=%x", qp_num);
2139 if (my_qp->send_cq) {
2140 ret = ehca_cq_unassign_qp(my_qp->send_cq, qp_num);
2142 ehca_err(dev, "Couldn't unassign qp from "
2143 "send_cq ret=%i qp_num=%x cq_num=%x", ret,
2144 qp_num, my_qp->send_cq->cq_number);
2149 write_lock_irqsave(&ehca_qp_idr_lock, flags);
2150 idr_remove(&ehca_qp_idr, my_qp->token);
2151 write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
2154 * SRQs will never get into an error list and do not have a recv_cq,
2155 * so we need to skip them here.
2157 if (HAS_RQ(my_qp) && !IS_SRQ(my_qp))
2158 del_from_err_list(my_qp->recv_cq, &my_qp->rq_err_node);
2161 del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
2163 /* now wait until all pending events have completed */
2164 wait_event(my_qp->wait_completion, !atomic_read(&my_qp->nr_events));
2166 h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
2167 if (h_ret != H_SUCCESS) {
2168 ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%li "
2169 "ehca_qp=%p qp_num=%x", h_ret, my_qp, qp_num);
2170 return ehca2ib_return_code(h_ret);
2173 port_num = my_qp->init_attr.port_num;
2174 qp_type = my_qp->init_attr.qp_type;
2176 if (qp_type == IB_QPT_SMI || qp_type == IB_QPT_GSI) {
2177 spin_lock_irqsave(&sport->mod_sqp_lock, flags);
2178 kfree(my_qp->mod_qp_parm);
2179 my_qp->mod_qp_parm = NULL;
2180 shca->sport[port_num - 1].ibqp_sqp[qp_type] = NULL;
2181 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
2184 /* no support for IB_QPT_SMI yet */
2185 if (qp_type == IB_QPT_GSI) {
2186 struct ib_event event;
2187 ehca_info(dev, "device %s: port %x is inactive.",
2188 shca->ib_device.name, port_num);
2189 event.device = &shca->ib_device;
2190 event.event = IB_EVENT_PORT_ERR;
2191 event.element.port_num = port_num;
2192 shca->sport[port_num - 1].port_state = IB_PORT_DOWN;
2193 ib_dispatch_event(&event);
2196 if (HAS_RQ(my_qp)) {
2197 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
2199 vfree(my_qp->rq_map.map);
2201 if (HAS_SQ(my_qp)) {
2202 ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
2204 vfree(my_qp->sq_map.map);
2206 kmem_cache_free(qp_cache, my_qp);
2207 atomic_dec(&shca->num_qps);
2211 int ehca_destroy_qp(struct ib_qp *qp)
2213 return internal_destroy_qp(qp->device,
2214 container_of(qp, struct ehca_qp, ib_qp),
2218 int ehca_destroy_srq(struct ib_srq *srq)
2220 return internal_destroy_qp(srq->device,
2221 container_of(srq, struct ehca_qp, ib_srq),
2225 int ehca_init_qp_cache(void)
2227 qp_cache = kmem_cache_create("ehca_cache_qp",
2228 sizeof(struct ehca_qp), 0,
2236 void ehca_cleanup_qp_cache(void)
2239 kmem_cache_destroy(qp_cache);