2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
4 * Firmware Infiniband Interface code for POWER
6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 * Joachim Fenkes <fenkes@de.ibm.com>
9 * Gerd Bayer <gerd.bayer@de.ibm.com>
10 * Waleri Fomin <fomin@de.ibm.com>
12 * Copyright (c) 2005 IBM Corporation
14 * All rights reserved.
16 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions are met:
24 * Redistributions of source code must retain the above copyright notice, this
25 * list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright notice,
28 * this list of conditions and the following disclaimer in the documentation
29 * and/or other materials
30 * provided with the distribution.
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
33 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
36 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
37 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
38 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
39 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
40 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
41 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
42 * POSSIBILITY OF SUCH DAMAGE.
45 #include <asm/hvcall.h>
46 #include "ehca_tools.h"
50 #include "ipz_pt_fn.h"
52 #define H_ALL_RES_QP_ENHANCED_OPS EHCA_BMASK_IBM(9, 11)
53 #define H_ALL_RES_QP_PTE_PIN EHCA_BMASK_IBM(12, 12)
54 #define H_ALL_RES_QP_SERVICE_TYPE EHCA_BMASK_IBM(13, 15)
55 #define H_ALL_RES_QP_STORAGE EHCA_BMASK_IBM(16, 17)
56 #define H_ALL_RES_QP_LL_RQ_CQE_POSTING EHCA_BMASK_IBM(18, 18)
57 #define H_ALL_RES_QP_LL_SQ_CQE_POSTING EHCA_BMASK_IBM(19, 21)
58 #define H_ALL_RES_QP_SIGNALING_TYPE EHCA_BMASK_IBM(22, 23)
59 #define H_ALL_RES_QP_UD_AV_LKEY_CTRL EHCA_BMASK_IBM(31, 31)
60 #define H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE EHCA_BMASK_IBM(32, 35)
61 #define H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE EHCA_BMASK_IBM(36, 39)
62 #define H_ALL_RES_QP_RESOURCE_TYPE EHCA_BMASK_IBM(56, 63)
64 #define H_ALL_RES_QP_MAX_OUTST_SEND_WR EHCA_BMASK_IBM(0, 15)
65 #define H_ALL_RES_QP_MAX_OUTST_RECV_WR EHCA_BMASK_IBM(16, 31)
66 #define H_ALL_RES_QP_MAX_SEND_SGE EHCA_BMASK_IBM(32, 39)
67 #define H_ALL_RES_QP_MAX_RECV_SGE EHCA_BMASK_IBM(40, 47)
69 #define H_ALL_RES_QP_UD_AV_LKEY EHCA_BMASK_IBM(32, 63)
70 #define H_ALL_RES_QP_SRQ_QP_TOKEN EHCA_BMASK_IBM(0, 31)
71 #define H_ALL_RES_QP_SRQ_QP_HANDLE EHCA_BMASK_IBM(0, 64)
72 #define H_ALL_RES_QP_SRQ_LIMIT EHCA_BMASK_IBM(48, 63)
73 #define H_ALL_RES_QP_SRQ_QPN EHCA_BMASK_IBM(40, 63)
75 #define H_ALL_RES_QP_ACT_OUTST_SEND_WR EHCA_BMASK_IBM(16, 31)
76 #define H_ALL_RES_QP_ACT_OUTST_RECV_WR EHCA_BMASK_IBM(48, 63)
77 #define H_ALL_RES_QP_ACT_SEND_SGE EHCA_BMASK_IBM(8, 15)
78 #define H_ALL_RES_QP_ACT_RECV_SGE EHCA_BMASK_IBM(24, 31)
80 #define H_ALL_RES_QP_SQUEUE_SIZE_PAGES EHCA_BMASK_IBM(0, 31)
81 #define H_ALL_RES_QP_RQUEUE_SIZE_PAGES EHCA_BMASK_IBM(32, 63)
83 #define H_MP_INIT_TYPE EHCA_BMASK_IBM(44, 47)
84 #define H_MP_SHUTDOWN EHCA_BMASK_IBM(48, 48)
85 #define H_MP_RESET_QKEY_CTR EHCA_BMASK_IBM(49, 49)
87 #define HCALL4_REGS_FORMAT "r4=%lx r5=%lx r6=%lx r7=%lx"
88 #define HCALL7_REGS_FORMAT HCALL4_REGS_FORMAT " r8=%lx r9=%lx r10=%lx"
89 #define HCALL9_REGS_FORMAT HCALL7_REGS_FORMAT " r11=%lx r12=%lx"
91 static DEFINE_SPINLOCK(hcall_lock);
93 static u32 get_longbusy_msecs(int longbusy_rc)
95 switch (longbusy_rc) {
96 case H_LONG_BUSY_ORDER_1_MSEC:
98 case H_LONG_BUSY_ORDER_10_MSEC:
100 case H_LONG_BUSY_ORDER_100_MSEC:
102 case H_LONG_BUSY_ORDER_1_SEC:
104 case H_LONG_BUSY_ORDER_10_SEC:
106 case H_LONG_BUSY_ORDER_100_SEC:
113 static long ehca_plpar_hcall_norets(unsigned long opcode,
125 ehca_gen_dbg("opcode=%lx " HCALL7_REGS_FORMAT,
126 opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
128 for (i = 0; i < 5; i++) {
129 ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4,
132 if (H_IS_LONG_BUSY(ret)) {
133 sleep_msecs = get_longbusy_msecs(ret);
134 msleep_interruptible(sleep_msecs);
139 ehca_gen_err("opcode=%lx ret=%li " HCALL7_REGS_FORMAT,
140 opcode, ret, arg1, arg2, arg3,
141 arg4, arg5, arg6, arg7);
143 ehca_gen_dbg("opcode=%lx ret=%li", opcode, ret);
151 static long ehca_plpar_hcall9(unsigned long opcode,
152 unsigned long *outs, /* array of 9 outputs */
164 int i, sleep_msecs, lock_is_set = 0;
165 unsigned long flags = 0;
167 ehca_gen_dbg("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT, opcode,
168 arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
170 for (i = 0; i < 5; i++) {
171 if ((opcode == H_ALLOC_RESOURCE) && (arg2 == 5)) {
172 spin_lock_irqsave(&hcall_lock, flags);
176 ret = plpar_hcall9(opcode, outs,
177 arg1, arg2, arg3, arg4, arg5,
178 arg6, arg7, arg8, arg9);
181 spin_unlock_irqrestore(&hcall_lock, flags);
183 if (H_IS_LONG_BUSY(ret)) {
184 sleep_msecs = get_longbusy_msecs(ret);
185 msleep_interruptible(sleep_msecs);
189 if (ret < H_SUCCESS) {
190 ehca_gen_err("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT,
191 opcode, arg1, arg2, arg3, arg4, arg5,
192 arg6, arg7, arg8, arg9);
193 ehca_gen_err("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT,
194 ret, outs[0], outs[1], outs[2], outs[3],
195 outs[4], outs[5], outs[6], outs[7],
198 ehca_gen_dbg("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT,
199 ret, outs[0], outs[1], outs[2], outs[3],
200 outs[4], outs[5], outs[6], outs[7],
208 u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
209 struct ehca_pfeq *pfeq,
210 const u32 neq_control,
211 const u32 number_of_entries,
212 struct ipz_eq_handle *eq_handle,
213 u32 *act_nr_of_entries,
218 u64 outs[PLPAR_HCALL9_BUFSIZE];
219 u64 allocate_controls;
222 allocate_controls = 3ULL;
224 /* ISN is associated */
225 if (neq_control != 1)
226 allocate_controls = (1ULL << (63 - 7)) | allocate_controls;
227 else /* notification event queue */
228 allocate_controls = (1ULL << 63) | allocate_controls;
230 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
231 adapter_handle.handle, /* r4 */
232 allocate_controls, /* r5 */
233 number_of_entries, /* r6 */
235 eq_handle->handle = outs[0];
236 *act_nr_of_entries = (u32)outs[3];
237 *act_pages = (u32)outs[4];
238 *eq_ist = (u32)outs[5];
240 if (ret == H_NOT_ENOUGH_RESOURCES)
241 ehca_gen_err("Not enough resource - ret=%lx ", ret);
246 u64 hipz_h_reset_event(const struct ipz_adapter_handle adapter_handle,
247 struct ipz_eq_handle eq_handle,
248 const u64 event_mask)
250 return ehca_plpar_hcall_norets(H_RESET_EVENTS,
251 adapter_handle.handle, /* r4 */
252 eq_handle.handle, /* r5 */
257 u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
259 struct ehca_alloc_cq_parms *param)
262 u64 outs[PLPAR_HCALL9_BUFSIZE];
264 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
265 adapter_handle.handle, /* r4 */
267 param->eq_handle.handle, /* r6 */
269 param->nr_cqe, /* r8 */
271 cq->ipz_cq_handle.handle = outs[0];
272 param->act_nr_of_entries = (u32)outs[3];
273 param->act_pages = (u32)outs[4];
275 if (ret == H_SUCCESS)
276 hcp_galpas_ctor(&cq->galpas, outs[5], outs[6]);
278 if (ret == H_NOT_ENOUGH_RESOURCES)
279 ehca_gen_err("Not enough resources. ret=%lx", ret);
284 u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
285 struct ehca_alloc_qp_parms *parms)
288 u64 allocate_controls, max_r10_reg, r11, r12;
289 u64 outs[PLPAR_HCALL9_BUFSIZE];
292 EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS, parms->ext_type)
293 | EHCA_BMASK_SET(H_ALL_RES_QP_PTE_PIN, 0)
294 | EHCA_BMASK_SET(H_ALL_RES_QP_SERVICE_TYPE, parms->servicetype)
295 | EHCA_BMASK_SET(H_ALL_RES_QP_SIGNALING_TYPE, parms->sigtype)
296 | EHCA_BMASK_SET(H_ALL_RES_QP_STORAGE, parms->qp_storage)
297 | EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE,
298 parms->squeue.page_size)
299 | EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE,
300 parms->rqueue.page_size)
301 | EHCA_BMASK_SET(H_ALL_RES_QP_LL_RQ_CQE_POSTING,
302 !!(parms->ll_comp_flags & LLQP_RECV_COMP))
303 | EHCA_BMASK_SET(H_ALL_RES_QP_LL_SQ_CQE_POSTING,
304 !!(parms->ll_comp_flags & LLQP_SEND_COMP))
305 | EHCA_BMASK_SET(H_ALL_RES_QP_UD_AV_LKEY_CTRL,
306 parms->ud_av_l_key_ctl)
307 | EHCA_BMASK_SET(H_ALL_RES_QP_RESOURCE_TYPE, 1);
310 EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_SEND_WR,
311 parms->squeue.max_wr + 1)
312 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_RECV_WR,
313 parms->rqueue.max_wr + 1)
314 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_SEND_SGE,
315 parms->squeue.max_sge)
316 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_RECV_SGE,
317 parms->rqueue.max_sge);
319 r11 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QP_TOKEN, parms->srq_token);
321 if (parms->ext_type == EQPT_SRQ)
322 r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_LIMIT, parms->srq_limit);
324 r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QPN, parms->srq_qpn);
326 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
327 adapter_handle.handle, /* r4 */
328 allocate_controls, /* r5 */
329 parms->send_cq_handle.handle,
330 parms->recv_cq_handle.handle,
331 parms->eq_handle.handle,
332 ((u64)parms->token << 32) | parms->pd.value,
333 max_r10_reg, r11, r12);
335 parms->qp_handle.handle = outs[0];
336 parms->real_qp_num = (u32)outs[1];
337 parms->squeue.act_nr_wqes =
338 (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_SEND_WR, outs[2]);
339 parms->rqueue.act_nr_wqes =
340 (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_RECV_WR, outs[2]);
341 parms->squeue.act_nr_sges =
342 (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_SEND_SGE, outs[3]);
343 parms->rqueue.act_nr_sges =
344 (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_RECV_SGE, outs[3]);
345 parms->squeue.queue_size =
346 (u32)EHCA_BMASK_GET(H_ALL_RES_QP_SQUEUE_SIZE_PAGES, outs[4]);
347 parms->rqueue.queue_size =
348 (u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]);
350 if (ret == H_SUCCESS)
351 hcp_galpas_ctor(&parms->galpas, outs[6], outs[6]);
353 if (ret == H_NOT_ENOUGH_RESOURCES)
354 ehca_gen_err("Not enough resources. ret=%lx", ret);
359 u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
361 struct hipz_query_port *query_port_response_block)
364 u64 r_cb = virt_to_abs(query_port_response_block);
366 if (r_cb & (EHCA_PAGESIZE-1)) {
367 ehca_gen_err("response block not page aligned");
371 ret = ehca_plpar_hcall_norets(H_QUERY_PORT,
372 adapter_handle.handle, /* r4 */
377 if (ehca_debug_level)
378 ehca_dmp(query_port_response_block, 64, "response_block");
383 u64 hipz_h_modify_port(const struct ipz_adapter_handle adapter_handle,
384 const u8 port_id, const u32 port_cap,
385 const u8 init_type, const int modify_mask)
387 u64 port_attributes = port_cap;
389 if (modify_mask & IB_PORT_SHUTDOWN)
390 port_attributes |= EHCA_BMASK_SET(H_MP_SHUTDOWN, 1);
391 if (modify_mask & IB_PORT_INIT_TYPE)
392 port_attributes |= EHCA_BMASK_SET(H_MP_INIT_TYPE, init_type);
393 if (modify_mask & IB_PORT_RESET_QKEY_CNTR)
394 port_attributes |= EHCA_BMASK_SET(H_MP_RESET_QKEY_CTR, 1);
396 return ehca_plpar_hcall_norets(H_MODIFY_PORT,
397 adapter_handle.handle, /* r4 */
399 port_attributes, /* r6 */
403 u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
404 struct hipz_query_hca *query_hca_rblock)
406 u64 r_cb = virt_to_abs(query_hca_rblock);
408 if (r_cb & (EHCA_PAGESIZE-1)) {
409 ehca_gen_err("response_block=%p not page aligned",
414 return ehca_plpar_hcall_norets(H_QUERY_HCA,
415 adapter_handle.handle, /* r4 */
420 u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle,
423 const u64 resource_handle,
424 const u64 logical_address_of_page,
427 return ehca_plpar_hcall_norets(H_REGISTER_RPAGES,
428 adapter_handle.handle, /* r4 */
429 (u64)queue_type | ((u64)pagesize) << 8,
431 resource_handle, /* r6 */
432 logical_address_of_page, /* r7 */
437 u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle,
438 const struct ipz_eq_handle eq_handle,
439 struct ehca_pfeq *pfeq,
442 const u64 logical_address_of_page,
446 ehca_gen_err("Ppage counter=%lx", count);
449 return hipz_h_register_rpage(adapter_handle,
453 logical_address_of_page, count);
456 u64 hipz_h_query_int_state(const struct ipz_adapter_handle adapter_handle,
460 ret = ehca_plpar_hcall_norets(H_QUERY_INT_STATE,
461 adapter_handle.handle, /* r4 */
465 if (ret != H_SUCCESS && ret != H_BUSY)
466 ehca_gen_err("Could not query interrupt state.");
471 u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle,
472 const struct ipz_cq_handle cq_handle,
473 struct ehca_pfcq *pfcq,
476 const u64 logical_address_of_page,
478 const struct h_galpa gal)
481 ehca_gen_err("Page counter=%lx", count);
485 return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
486 cq_handle.handle, logical_address_of_page,
490 u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle,
491 const struct ipz_qp_handle qp_handle,
492 struct ehca_pfqp *pfqp,
495 const u64 logical_address_of_page,
497 const struct h_galpa galpa)
500 ehca_gen_err("Page counter=%lx", count);
504 return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
505 qp_handle.handle, logical_address_of_page,
509 u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle,
510 const struct ipz_qp_handle qp_handle,
511 struct ehca_pfqp *pfqp,
512 void **log_addr_next_sq_wqe2processed,
513 void **log_addr_next_rq_wqe2processed,
514 int dis_and_get_function_code)
517 u64 outs[PLPAR_HCALL9_BUFSIZE];
519 ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
520 adapter_handle.handle, /* r4 */
521 dis_and_get_function_code, /* r5 */
522 qp_handle.handle, /* r6 */
524 if (log_addr_next_sq_wqe2processed)
525 *log_addr_next_sq_wqe2processed = (void *)outs[0];
526 if (log_addr_next_rq_wqe2processed)
527 *log_addr_next_rq_wqe2processed = (void *)outs[1];
532 u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
533 const struct ipz_qp_handle qp_handle,
534 struct ehca_pfqp *pfqp,
535 const u64 update_mask,
536 struct hcp_modify_qp_control_block *mqpcb,
540 u64 outs[PLPAR_HCALL9_BUFSIZE];
541 ret = ehca_plpar_hcall9(H_MODIFY_QP, outs,
542 adapter_handle.handle, /* r4 */
543 qp_handle.handle, /* r5 */
544 update_mask, /* r6 */
545 virt_to_abs(mqpcb), /* r7 */
548 if (ret == H_NOT_ENOUGH_RESOURCES)
549 ehca_gen_err("Insufficient resources ret=%lx", ret);
554 u64 hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle,
555 const struct ipz_qp_handle qp_handle,
556 struct ehca_pfqp *pfqp,
557 struct hcp_modify_qp_control_block *qqpcb,
560 return ehca_plpar_hcall_norets(H_QUERY_QP,
561 adapter_handle.handle, /* r4 */
562 qp_handle.handle, /* r5 */
563 virt_to_abs(qqpcb), /* r6 */
567 u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
571 u64 outs[PLPAR_HCALL9_BUFSIZE];
573 ret = hcp_galpas_dtor(&qp->galpas);
575 ehca_gen_err("Could not destruct qp->galpas");
578 ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
579 adapter_handle.handle, /* r4 */
582 qp->ipz_qp_handle.handle, /* r6 */
584 if (ret == H_HARDWARE)
585 ehca_gen_err("HCA not operational. ret=%lx", ret);
587 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
588 adapter_handle.handle, /* r4 */
589 qp->ipz_qp_handle.handle, /* r5 */
592 if (ret == H_RESOURCE)
593 ehca_gen_err("Resource still in use. ret=%lx", ret);
598 u64 hipz_h_define_aqp0(const struct ipz_adapter_handle adapter_handle,
599 const struct ipz_qp_handle qp_handle,
603 return ehca_plpar_hcall_norets(H_DEFINE_AQP0,
604 adapter_handle.handle, /* r4 */
605 qp_handle.handle, /* r5 */
610 u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
611 const struct ipz_qp_handle qp_handle,
613 u32 port, u32 * pma_qp_nr,
617 u64 outs[PLPAR_HCALL9_BUFSIZE];
619 ret = ehca_plpar_hcall9(H_DEFINE_AQP1, outs,
620 adapter_handle.handle, /* r4 */
621 qp_handle.handle, /* r5 */
624 *pma_qp_nr = (u32)outs[0];
625 *bma_qp_nr = (u32)outs[1];
627 if (ret == H_ALIAS_EXIST)
628 ehca_gen_err("AQP1 already exists. ret=%lx", ret);
633 u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle,
634 const struct ipz_qp_handle qp_handle,
637 u64 subnet_prefix, u64 interface_id)
641 ret = ehca_plpar_hcall_norets(H_ATTACH_MCQP,
642 adapter_handle.handle, /* r4 */
643 qp_handle.handle, /* r5 */
645 interface_id, /* r7 */
646 subnet_prefix, /* r8 */
649 if (ret == H_NOT_ENOUGH_RESOURCES)
650 ehca_gen_err("Not enough resources. ret=%lx", ret);
655 u64 hipz_h_detach_mcqp(const struct ipz_adapter_handle adapter_handle,
656 const struct ipz_qp_handle qp_handle,
659 u64 subnet_prefix, u64 interface_id)
661 return ehca_plpar_hcall_norets(H_DETACH_MCQP,
662 adapter_handle.handle, /* r4 */
663 qp_handle.handle, /* r5 */
665 interface_id, /* r7 */
666 subnet_prefix, /* r8 */
670 u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle,
676 ret = hcp_galpas_dtor(&cq->galpas);
678 ehca_gen_err("Could not destruct cp->galpas");
682 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
683 adapter_handle.handle, /* r4 */
684 cq->ipz_cq_handle.handle, /* r5 */
685 force_flag != 0 ? 1L : 0L, /* r6 */
688 if (ret == H_RESOURCE)
689 ehca_gen_err("H_FREE_RESOURCE failed ret=%lx ", ret);
694 u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle,
699 ret = hcp_galpas_dtor(&eq->galpas);
701 ehca_gen_err("Could not destruct eq->galpas");
705 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
706 adapter_handle.handle, /* r4 */
707 eq->ipz_eq_handle.handle, /* r5 */
710 if (ret == H_RESOURCE)
711 ehca_gen_err("Resource in use. ret=%lx ", ret);
716 u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
717 const struct ehca_mr *mr,
720 const u32 access_ctrl,
721 const struct ipz_pd pd,
722 struct ehca_mr_hipzout_parms *outparms)
725 u64 outs[PLPAR_HCALL9_BUFSIZE];
727 ehca_gen_dbg("kernel PAGE_SIZE=%x access_ctrl=%016x "
728 "vaddr=%lx length=%lx",
729 (u32)PAGE_SIZE, access_ctrl, vaddr, length);
730 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
731 adapter_handle.handle, /* r4 */
735 (((u64)access_ctrl) << 32ULL), /* r8 */
738 outparms->handle.handle = outs[0];
739 outparms->lkey = (u32)outs[2];
740 outparms->rkey = (u32)outs[3];
745 u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
746 const struct ehca_mr *mr,
749 const u64 logical_address_of_page,
754 if (unlikely(ehca_debug_level >= 2)) {
758 kpage = (u64 *)abs_to_virt(logical_address_of_page);
759 for (i = 0; i < count; i++)
760 ehca_gen_dbg("kpage[%d]=%p",
761 i, (void *)kpage[i]);
763 ehca_gen_dbg("kpage=%p",
764 (void *)logical_address_of_page);
767 if ((count > 1) && (logical_address_of_page & (EHCA_PAGESIZE-1))) {
768 ehca_gen_err("logical_address_of_page not on a 4k boundary "
769 "adapter_handle=%lx mr=%p mr_handle=%lx "
770 "pagesize=%x queue_type=%x "
771 "logical_address_of_page=%lx count=%lx",
772 adapter_handle.handle, mr,
773 mr->ipz_mr_handle.handle, pagesize, queue_type,
774 logical_address_of_page, count);
777 ret = hipz_h_register_rpage(adapter_handle, pagesize,
779 mr->ipz_mr_handle.handle,
780 logical_address_of_page, count);
784 u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle,
785 const struct ehca_mr *mr,
786 struct ehca_mr_hipzout_parms *outparms)
789 u64 outs[PLPAR_HCALL9_BUFSIZE];
791 ret = ehca_plpar_hcall9(H_QUERY_MR, outs,
792 adapter_handle.handle, /* r4 */
793 mr->ipz_mr_handle.handle, /* r5 */
794 0, 0, 0, 0, 0, 0, 0);
795 outparms->len = outs[0];
796 outparms->vaddr = outs[1];
797 outparms->acl = outs[4] >> 32;
798 outparms->lkey = (u32)(outs[5] >> 32);
799 outparms->rkey = (u32)(outs[5] & (0xffffffff));
804 u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle,
805 const struct ehca_mr *mr)
807 return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
808 adapter_handle.handle, /* r4 */
809 mr->ipz_mr_handle.handle, /* r5 */
813 u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle,
814 const struct ehca_mr *mr,
817 const u32 access_ctrl,
818 const struct ipz_pd pd,
819 const u64 mr_addr_cb,
820 struct ehca_mr_hipzout_parms *outparms)
823 u64 outs[PLPAR_HCALL9_BUFSIZE];
825 ret = ehca_plpar_hcall9(H_REREGISTER_PMR, outs,
826 adapter_handle.handle, /* r4 */
827 mr->ipz_mr_handle.handle, /* r5 */
831 ((((u64)access_ctrl) << 32ULL) | pd.value),
834 outparms->vaddr = outs[1];
835 outparms->lkey = (u32)outs[2];
836 outparms->rkey = (u32)outs[3];
841 u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle,
842 const struct ehca_mr *mr,
843 const struct ehca_mr *orig_mr,
845 const u32 access_ctrl,
846 const struct ipz_pd pd,
847 struct ehca_mr_hipzout_parms *outparms)
850 u64 outs[PLPAR_HCALL9_BUFSIZE];
852 ret = ehca_plpar_hcall9(H_REGISTER_SMR, outs,
853 adapter_handle.handle, /* r4 */
854 orig_mr->ipz_mr_handle.handle, /* r5 */
856 (((u64)access_ctrl) << 32ULL), /* r7 */
859 outparms->handle.handle = outs[0];
860 outparms->lkey = (u32)outs[2];
861 outparms->rkey = (u32)outs[3];
866 u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle,
867 const struct ehca_mw *mw,
868 const struct ipz_pd pd,
869 struct ehca_mw_hipzout_parms *outparms)
872 u64 outs[PLPAR_HCALL9_BUFSIZE];
874 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
875 adapter_handle.handle, /* r4 */
879 outparms->handle.handle = outs[0];
880 outparms->rkey = (u32)outs[3];
885 u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle,
886 const struct ehca_mw *mw,
887 struct ehca_mw_hipzout_parms *outparms)
890 u64 outs[PLPAR_HCALL9_BUFSIZE];
892 ret = ehca_plpar_hcall9(H_QUERY_MW, outs,
893 adapter_handle.handle, /* r4 */
894 mw->ipz_mw_handle.handle, /* r5 */
895 0, 0, 0, 0, 0, 0, 0);
896 outparms->rkey = (u32)outs[3];
901 u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle,
902 const struct ehca_mw *mw)
904 return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
905 adapter_handle.handle, /* r4 */
906 mw->ipz_mw_handle.handle, /* r5 */
910 u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle,
911 const u64 ressource_handle,
913 unsigned long *byte_count)
915 u64 r_cb = virt_to_abs(rblock);
917 if (r_cb & (EHCA_PAGESIZE-1)) {
918 ehca_gen_err("rblock not page aligned.");
922 return ehca_plpar_hcall_norets(H_ERROR_DATA,
923 adapter_handle.handle,