2 * Copyright (C) 2005 - 2008 ServerEngines
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@serverengines.com
14 * 209 N. Fair Oaks Ave
21 inline void mp_ring_create(struct mp_ring *ring, u32 num, u32 size, void *va)
24 memset(ring, 0, sizeof(struct mp_ring));
26 ring->pages = DIV_ROUND_UP(num * size, PAGE_SIZE);
27 ring->itemSize = size;
32 * -----------------------------------------------------------------------
33 * Interface for 2 index rings. i.e. consumer/producer rings
34 * --------------------------------------------------------------------------
37 /* Returns number items pending on ring. */
38 static inline u32 mp_ring_num_pending(struct mp_ring *ring)
43 return be_subc(ring->pidx, ring->cidx, ring->num);
46 /* Returns number items free on ring. */
47 static inline u32 mp_ring_num_empty(struct mp_ring *ring)
50 return ring->num - 1 - mp_ring_num_pending(ring);
54 static inline void mp_ring_consume(struct mp_ring *ring)
57 ASSERT(ring->pidx != ring->cidx);
59 ring->cidx = be_addc(ring->cidx, 1, ring->num);
63 static inline void mp_ring_produce(struct mp_ring *ring)
66 ring->pidx = be_addc(ring->pidx, 1, ring->num);
69 /* Consume count items */
70 static inline void mp_ring_consume_multiple(struct mp_ring *ring, u32 count)
73 ASSERT(mp_ring_num_pending(ring) >= count);
74 ring->cidx = be_addc(ring->cidx, count, ring->num);
77 static inline void *mp_ring_item(struct mp_ring *ring, u32 index)
80 ASSERT(index < ring->num);
81 ASSERT(ring->itemSize > 0);
82 return (u8 *) ring->va + index * ring->itemSize;
85 /* Ptr to produce item */
86 static inline void *mp_ring_producer_ptr(struct mp_ring *ring)
89 return mp_ring_item(ring, ring->pidx);
93 * Returns a pointer to the current location in the ring.
94 * This is used for rings with 1 index.
96 static inline void *mp_ring_current(struct mp_ring *ring)
99 ASSERT(ring->pidx == 0); /* not used */
101 return mp_ring_item(ring, ring->cidx);
105 * Increment index for rings with only 1 index.
106 * This is used for rings with 1 index.
108 static inline void *mp_ring_next(struct mp_ring *ring)
111 ASSERT(ring->num > 0);
112 ASSERT(ring->pidx == 0); /* not used */
114 ring->cidx = be_addc(ring->cidx, 1, ring->num);
115 return mp_ring_current(ring);
119 This routine waits for a previously posted mailbox WRB to be completed.
120 Specifically it waits for the mailbox to say that it's ready to accept
121 more data by setting the LSB of the mailbox pd register to 1.
123 pcontroller - The function object to post this data to
125 IRQL < DISPATCH_LEVEL
127 static void be_mcc_mailbox_wait(struct be_function_object *pfob)
129 struct MPU_MAILBOX_DB_AMAP mailbox_db;
134 /* No waiting for mailbox in emulated mode. */
138 mailbox_db.dw[0] = PD_READ(pfob, mcc_bootstrap_db);
139 ready = AMAP_GET_BITS_PTR(MPU_MAILBOX_DB, ready, &mailbox_db);
141 while (ready == false) {
142 if ((++i & 0x3FFFF) == 0) {
143 TRACE(DL_WARN, "Waiting for mailbox ready - %dk polls",
147 mailbox_db.dw[0] = PD_READ(pfob, mcc_bootstrap_db);
148 ready = AMAP_GET_BITS_PTR(MPU_MAILBOX_DB, ready, &mailbox_db);
153 This routine tells the MCC mailbox that there is data to processed
154 in the mailbox. It does this by setting the physical address for the
155 mailbox location and clearing the LSB. This routine returns immediately
156 and does not wait for the WRB to be processed.
158 pcontroller - The function object to post this data to
160 IRQL < DISPATCH_LEVEL
163 static void be_mcc_mailbox_notify(struct be_function_object *pfob)
165 struct MPU_MAILBOX_DB_AMAP mailbox_db;
168 ASSERT(pfob->mailbox.pa);
169 ASSERT(pfob->mailbox.va);
171 /* If emulated, do not ring the mailbox */
173 TRACE(DL_WARN, "MPU disabled. Skipping mailbox notify.");
177 /* form the higher bits in the address */
178 mailbox_db.dw[0] = 0; /* init */
179 AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, hi, &mailbox_db, 1);
180 AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, ready, &mailbox_db, 0);
183 pa = (u32) (pfob->mailbox.pa >> 34);
184 AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, address, &mailbox_db, pa);
186 /* Wait for the MPU to be ready */
187 be_mcc_mailbox_wait(pfob);
189 /* Ring doorbell 1st time */
190 PD_WRITE(pfob, mcc_bootstrap_db, mailbox_db.dw[0]);
192 /* Wait for 1st write to be acknowledged. */
193 be_mcc_mailbox_wait(pfob);
195 /* lower bits 30 bits from 4th bit (bits 4 to 33)*/
196 pa = (u32) (pfob->mailbox.pa >> 4) & 0x3FFFFFFF;
198 AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, hi, &mailbox_db, 0);
199 AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, ready, &mailbox_db, 0);
200 AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, address, &mailbox_db, pa);
202 /* Ring doorbell 2nd time */
203 PD_WRITE(pfob, mcc_bootstrap_db, mailbox_db.dw[0]);
207 This routine tells the MCC mailbox that there is data to processed
208 in the mailbox. It does this by setting the physical address for the
209 mailbox location and clearing the LSB. This routine spins until the
210 MPU writes a 1 into the LSB indicating that the data has been received
211 and is ready to be processed.
213 pcontroller - The function object to post this data to
215 IRQL < DISPATCH_LEVEL
218 be_mcc_mailbox_notify_and_wait(struct be_function_object *pfob)
223 be_mcc_mailbox_notify(pfob);
225 * Now wait for completion of WRB
227 be_mcc_mailbox_wait(pfob);
231 be_mcc_process_cqe(struct be_function_object *pfob,
232 struct MCC_CQ_ENTRY_AMAP *cqe)
234 struct be_mcc_wrb_context *wrb_context = NULL;
240 * A command completed. Commands complete out-of-order.
241 * Determine which command completed from the TAG.
243 offset = AMAP_BYTE_OFFSET(MCC_CQ_ENTRY, mcc_tag);
244 p = (u8 *) cqe + offset;
245 wrb_context = (struct be_mcc_wrb_context *)(void *)(size_t)(*(u64 *)p);
249 * Perform a response copy if requested.
250 * Only copy data if the FWCMD is successful.
252 status = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY, completion_status, cqe);
253 if (status == MGMT_STATUS_SUCCESS && wrb_context->copy.length > 0) {
254 ASSERT(wrb_context->wrb);
255 ASSERT(wrb_context->copy.va);
256 p = (u8 *)wrb_context->wrb + AMAP_BYTE_OFFSET(MCC_WRB, payload);
257 memcpy(wrb_context->copy.va,
258 (u8 *)p + wrb_context->copy.fwcmd_offset,
259 wrb_context->copy.length);
264 /* internal callback */
265 if (wrb_context->internal_cb) {
266 wrb_context->internal_cb(wrb_context->internal_cb_context,
267 status, wrb_context->wrb);
271 if (wrb_context->cb) {
272 wrb_context->cb(wrb_context->cb_context,
273 status, wrb_context->wrb);
275 /* Free the context structure */
276 _be_mcc_free_wrb_context(pfob, wrb_context);
279 void be_drive_mcc_wrb_queue(struct be_mcc_object *mcc)
281 struct be_function_object *pfob = NULL;
282 int status = BE_PENDING;
283 struct be_generic_q_ctxt *q_ctxt;
284 struct MCC_WRB_AMAP *wrb;
285 struct MCC_WRB_AMAP *queue_wrb;
286 u32 length, payload_length, sge_count, embedded;
289 BUILD_BUG_ON((sizeof(struct be_generic_q_ctxt) <
290 sizeof(struct be_queue_driver_context) +
291 sizeof(struct MCC_WRB_AMAP)));
292 pfob = mcc->parent_function;
294 spin_lock_irqsave(&pfob->post_lock, irql);
296 if (mcc->driving_backlog) {
297 spin_unlock_irqrestore(&pfob->post_lock, irql);
298 if (pfob->pend_queue_driving && pfob->mcc) {
299 pfob->pend_queue_driving = 0;
300 be_drive_mcc_wrb_queue(pfob->mcc);
304 /* Acquire the flag to limit 1 thread to redrive posts. */
305 mcc->driving_backlog = 1;
307 while (!list_empty(&mcc->backlog)) {
308 wrb = _be_mpu_peek_ring_wrb(mcc, true); /* Driving the queue */
310 break; /* No space in the ring yet. */
311 /* Get the next queued entry to process. */
312 q_ctxt = list_first_entry(&mcc->backlog,
313 struct be_generic_q_ctxt, context.list);
314 list_del(&q_ctxt->context.list);
315 pfob->mcc->backlog_length--;
317 * Compute the required length of the WRB.
318 * Since the queue element may be smaller than
319 * the complete WRB, copy only the required number of bytes.
321 queue_wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header;
322 embedded = AMAP_GET_BITS_PTR(MCC_WRB, embedded, queue_wrb);
324 payload_length = AMAP_GET_BITS_PTR(MCC_WRB,
325 payload_length, queue_wrb);
326 length = sizeof(struct be_mcc_wrb_header) +
329 sge_count = AMAP_GET_BITS_PTR(MCC_WRB, sge_count,
331 ASSERT(sge_count == 1); /* only 1 frag. */
332 length = sizeof(struct be_mcc_wrb_header) +
333 sge_count * sizeof(struct MCC_SGE_AMAP);
337 * Truncate the length based on the size of the
338 * queue element. Some elements that have output parameters
339 * can be smaller than the payload_length field would
340 * indicate. We really only need to copy the request
341 * parameters, not the response.
343 length = min(length, (u32) (q_ctxt->context.bytes -
344 offsetof(struct be_generic_q_ctxt, wrb_header)));
346 /* Copy the queue element WRB into the ring. */
347 memcpy(wrb, &q_ctxt->wrb_header, length);
349 /* Post the wrb. This should not fail assuming we have
350 * enough context structs. */
351 status = be_function_post_mcc_wrb(pfob, wrb, NULL,
352 q_ctxt->context.cb, q_ctxt->context.cb_context,
353 q_ctxt->context.internal_cb,
354 q_ctxt->context.internal_cb_context,
355 q_ctxt->context.optional_fwcmd_va,
356 &q_ctxt->context.copy);
358 if (status == BE_SUCCESS) {
360 * Synchronous completion. Since it was queued,
361 * we will invoke the callback.
362 * To the user, this is an asynchronous request.
364 spin_unlock_irqrestore(&pfob->post_lock, irql);
365 if (pfob->pend_queue_driving && pfob->mcc) {
366 pfob->pend_queue_driving = 0;
367 be_drive_mcc_wrb_queue(pfob->mcc);
370 ASSERT(q_ctxt->context.cb);
373 q_ctxt->context.cb_context,
376 spin_lock_irqsave(&pfob->post_lock, irql);
378 } else if (status != BE_PENDING) {
380 * Another resource failed. Should never happen
381 * if we have sufficient MCC_WRB_CONTEXT structs.
382 * Return to head of the queue.
384 TRACE(DL_WARN, "Failed to post a queued WRB. 0x%x",
386 list_add(&q_ctxt->context.list, &mcc->backlog);
387 pfob->mcc->backlog_length++;
392 /* Free the flag to limit 1 thread to redrive posts. */
393 mcc->driving_backlog = 0;
394 spin_unlock_irqrestore(&pfob->post_lock, irql);
397 /* This function asserts that the WRB was consumed in order. */
399 u32 be_mcc_wrb_consumed_in_order(struct be_mcc_object *mcc,
400 struct MCC_CQ_ENTRY_AMAP *cqe)
402 struct be_mcc_wrb_context *wrb_context = NULL;
404 u32 wrb_consumed_in_order;
410 * A command completed. Commands complete out-of-order.
411 * Determine which command completed from the TAG.
413 offset = AMAP_BYTE_OFFSET(MCC_CQ_ENTRY, mcc_tag);
414 p = (u8 *) cqe + offset;
415 wrb_context = (struct be_mcc_wrb_context *)(void *)(size_t)(*(u64 *)p);
419 wrb_index = (u32) (((u64)(size_t)wrb_context->ring_wrb -
420 (u64)(size_t)mcc->sq.ring.va) / sizeof(struct MCC_WRB_AMAP));
422 ASSERT(wrb_index < mcc->sq.ring.num);
424 wrb_consumed_in_order = (u32) (wrb_index == mcc->consumed_index);
425 mcc->consumed_index = be_addc(mcc->consumed_index, 1, mcc->sq.ring.num);
426 return wrb_consumed_in_order;
430 int be_mcc_process_cq(struct be_mcc_object *mcc, bool rearm)
432 struct be_function_object *pfob = NULL;
433 struct MCC_CQ_ENTRY_AMAP *cqe;
434 struct CQ_DB_AMAP db;
435 struct mp_ring *cq_ring = &mcc->cq.ring;
436 struct mp_ring *mp_ring = &mcc->sq.ring;
437 u32 num_processed = 0;
438 u32 consumed = 0, valid, completed, cqe_consumed, async_event;
440 pfob = mcc->parent_function;
442 spin_lock_irqsave(&pfob->cq_lock, pfob->cq_irq);
445 * Verify that only one thread is processing the CQ at once.
446 * We cannot hold the lock while processing the CQ due to
447 * the callbacks into the OS. Therefore, this flag is used
448 * to control it. If any of the threads want to
449 * rearm the CQ, we need to honor that.
451 if (mcc->processing != 0) {
452 mcc->rearm = mcc->rearm || rearm;
455 mcc->processing = 1; /* lock processing for this thread. */
456 mcc->rearm = rearm; /* set our rearm setting */
459 spin_unlock_irqrestore(&pfob->cq_lock, pfob->cq_irq);
461 cqe = mp_ring_current(cq_ring);
462 valid = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY, valid, cqe);
465 if (num_processed >= 8) {
466 /* coalesce doorbells, but free space in cq
467 * ring while processing. */
468 db.dw[0] = 0; /* clear */
469 AMAP_SET_BITS_PTR(CQ_DB, qid, &db, cq_ring->id);
470 AMAP_SET_BITS_PTR(CQ_DB, rearm, &db, false);
471 AMAP_SET_BITS_PTR(CQ_DB, event, &db, false);
472 AMAP_SET_BITS_PTR(CQ_DB, num_popped, &db,
476 PD_WRITE(pfob, cq_db, db.dw[0]);
479 async_event = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY, async_event, cqe);
481 /* This is an asynchronous event. */
482 struct ASYNC_EVENT_TRAILER_AMAP *async_trailer =
483 (struct ASYNC_EVENT_TRAILER_AMAP *)
484 ((u8 *) cqe + sizeof(struct MCC_CQ_ENTRY_AMAP) -
485 sizeof(struct ASYNC_EVENT_TRAILER_AMAP));
487 async_event = AMAP_GET_BITS_PTR(ASYNC_EVENT_TRAILER,
488 async_event, async_trailer);
489 ASSERT(async_event == 1);
492 valid = AMAP_GET_BITS_PTR(ASYNC_EVENT_TRAILER,
493 valid, async_trailer);
496 /* Call the async event handler if it is installed. */
499 AMAP_GET_BITS_PTR(ASYNC_EVENT_TRAILER,
500 event_code, async_trailer);
501 mcc->async_cb(mcc->async_context,
502 (u32) event_code, (void *) cqe);
506 /* This is a completion entry. */
508 /* No vm forwarding in this driver. */
510 cqe_consumed = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY,
514 * A command on the MCC ring was consumed.
515 * Update the consumer index.
516 * These occur in order.
518 ASSERT(be_mcc_wrb_consumed_in_order(mcc, cqe));
522 completed = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY,
525 /* A command completed. Use tag to
526 * determine which command. */
527 be_mcc_process_cqe(pfob, cqe);
532 AMAP_SET_BITS_PTR(MCC_CQ_ENTRY, valid, cqe, false);
535 /* Update our tracking for the CQ ring. */
536 cqe = mp_ring_next(cq_ring);
537 valid = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY, valid, cqe);
540 TRACE(DL_INFO, "num_processed:0x%x, and consumed:0x%x",
541 num_processed, consumed);
543 * Grab the CQ lock to synchronize the "rearm" setting for
544 * the doorbell, and for clearing the "processing" flag.
546 spin_lock_irqsave(&pfob->cq_lock, pfob->cq_irq);
549 * Rearm the cq. This is done based on the global mcc->rearm
550 * flag which combines the rearm parameter from the current
551 * call to process_cq and any other threads
552 * that tried to process the CQ while this one was active.
553 * This handles the situation where a sync. fwcmd was processing
554 * the CQ while the interrupt/dpc tries to process it.
555 * The sync process gets to continue -- but it is now
556 * responsible for the rearming.
558 if (num_processed > 0 || mcc->rearm == true) {
559 db.dw[0] = 0; /* clear */
560 AMAP_SET_BITS_PTR(CQ_DB, qid, &db, cq_ring->id);
561 AMAP_SET_BITS_PTR(CQ_DB, rearm, &db, mcc->rearm);
562 AMAP_SET_BITS_PTR(CQ_DB, event, &db, false);
563 AMAP_SET_BITS_PTR(CQ_DB, num_popped, &db, num_processed);
565 PD_WRITE(pfob, cq_db, db.dw[0]);
568 * Update the consumer index after ringing the CQ doorbell.
569 * We don't want another thread to post more WRBs before we
570 * have CQ space available.
572 mp_ring_consume_multiple(mp_ring, consumed);
574 /* Clear the processing flag. */
578 spin_unlock_irqrestore(&pfob->cq_lock, pfob->cq_irq);
580 * Use the local variable to detect if the current thread
581 * holds the WRB post lock. If rearm is false, this is
582 * either a synchronous command, or the upper layer driver is polling
583 * from a thread. We do not drive the queue from that
584 * context since the driver may hold the
585 * wrb post lock already.
588 be_drive_mcc_wrb_queue(mcc);
590 pfob->pend_queue_driving = 1;
596 *============================================================================
597 * P U B L I C R O U T I N E S
598 *============================================================================
602 This routine creates an MCC object. This object contains an MCC send queue
603 and a CQ private to the MCC.
605 pcontroller - Handle to a function object
607 EqObject - EQ object that will be used to dispatch this MCC
609 ppMccObject - Pointer to an internal Mcc Object returned.
611 Returns BE_SUCCESS if successfull,, otherwise a useful error code
614 IRQL < DISPATCH_LEVEL
618 be_mcc_ring_create(struct be_function_object *pfob,
619 struct ring_desc *rd, u32 length,
620 struct be_mcc_wrb_context *context_array,
621 u32 num_context_entries,
622 struct be_cq_object *cq, struct be_mcc_object *mcc)
626 struct FWCMD_COMMON_MCC_CREATE *fwcmd = NULL;
627 struct MCC_WRB_AMAP *wrb = NULL;
628 u32 num_entries_encoded, n, i;
632 if (length < sizeof(struct MCC_WRB_AMAP) * 2) {
633 TRACE(DL_ERR, "Invalid MCC ring length:%d", length);
637 * Reduce the actual ring size to be less than the number
638 * of context entries. This ensures that we run out of
639 * ring WRBs first so the queuing works correctly. We never
640 * queue based on context structs.
642 if (num_context_entries + 1 <
643 length / sizeof(struct MCC_WRB_AMAP) - 1) {
646 (num_context_entries + 2) * sizeof(struct MCC_WRB_AMAP);
648 if (is_power_of_2(max_length))
649 length = __roundup_pow_of_two(max_length+1) / 2;
651 length = __roundup_pow_of_two(max_length) / 2;
653 ASSERT(length <= max_length);
656 "MCC ring length reduced based on context entries."
657 " length:%d wrbs:%d context_entries:%d", length,
658 (int) (length / sizeof(struct MCC_WRB_AMAP)),
659 num_context_entries);
662 spin_lock_irqsave(&pfob->post_lock, irql);
664 num_entries_encoded =
665 be_ring_length_to_encoding(length, sizeof(struct MCC_WRB_AMAP));
667 /* Init MCC object. */
668 memset(mcc, 0, sizeof(*mcc));
669 mcc->parent_function = pfob;
672 INIT_LIST_HEAD(&mcc->backlog);
674 wrb = be_function_peek_mcc_wrb(pfob);
677 TRACE(DL_ERR, "No free MCC WRBs in create EQ.");
678 status = BE_STATUS_NO_MCC_WRB;
681 /* Prepares an embedded fwcmd, including request/response sizes. */
682 fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_MCC_CREATE);
684 fwcmd->params.request.num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
686 * Program MCC ring context
688 AMAP_SET_BITS_PTR(MCC_RING_CONTEXT, pdid,
689 &fwcmd->params.request.context, 0);
690 AMAP_SET_BITS_PTR(MCC_RING_CONTEXT, invalid,
691 &fwcmd->params.request.context, false);
692 AMAP_SET_BITS_PTR(MCC_RING_CONTEXT, ring_size,
693 &fwcmd->params.request.context, num_entries_encoded);
696 AMAP_SET_BITS_PTR(MCC_RING_CONTEXT,
697 cq_id, &fwcmd->params.request.context, n);
698 be_rd_to_pa_list(rd, fwcmd->params.request.pages,
699 ARRAY_SIZE(fwcmd->params.request.pages));
700 /* Post the f/w command */
701 status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
702 NULL, NULL, fwcmd, NULL);
703 if (status != BE_SUCCESS) {
704 TRACE(DL_ERR, "MCC to create CQ failed.");
708 * Create a linked list of context structures
710 mcc->wrb_context.base = context_array;
711 mcc->wrb_context.num = num_context_entries;
712 INIT_LIST_HEAD(&mcc->wrb_context.list_head);
713 memset(context_array, 0,
714 sizeof(struct be_mcc_wrb_context) * num_context_entries);
715 for (i = 0; i < mcc->wrb_context.num; i++) {
716 list_add_tail(&context_array[i].next,
717 &mcc->wrb_context.list_head);
722 * Create an mcc_ring for tracking WRB hw ring
726 mp_ring_create(&mcc->sq.ring, length / sizeof(struct MCC_WRB_AMAP),
727 sizeof(struct MCC_WRB_AMAP), va);
728 mcc->sq.ring.id = fwcmd->params.response.id;
730 * Init a mcc_ring for tracking the MCC CQ.
733 mp_ring_create(&mcc->cq.ring, cq->num_entries,
734 sizeof(struct MCC_CQ_ENTRY_AMAP), cq->va);
735 mcc->cq.ring.id = cq->cq_id;
737 /* Force zeroing of CQ. */
738 memset(cq->va, 0, cq->num_entries * sizeof(struct MCC_CQ_ENTRY_AMAP));
740 /* Initialize debug index. */
741 mcc->consumed_index = 0;
743 atomic_inc(&cq->ref_count);
746 TRACE(DL_INFO, "MCC ring created. id:%d bytes:%d cq_id:%d cq_entries:%d"
747 " num_context:%d", mcc->sq.ring.id, length,
748 cq->cq_id, cq->num_entries, num_context_entries);
751 spin_unlock_irqrestore(&pfob->post_lock, irql);
752 if (pfob->pend_queue_driving && pfob->mcc) {
753 pfob->pend_queue_driving = 0;
754 be_drive_mcc_wrb_queue(pfob->mcc);
760 This routine destroys an MCC send queue
762 MccObject - Internal Mcc Object to be destroyed.
764 Returns BE_SUCCESS if successfull, otherwise an error code is returned.
766 IRQL < DISPATCH_LEVEL
768 The caller of this routine must ensure that no other WRB may be posted
769 until this routine returns.
772 int be_mcc_ring_destroy(struct be_mcc_object *mcc)
775 struct be_function_object *pfob = mcc->parent_function;
778 ASSERT(mcc->processing == 0);
781 * Remove the ring from the function object.
782 * This transitions back to mailbox mode.
786 /* Send fwcmd to destroy the queue. (Using the mailbox.) */
787 status = be_function_ring_destroy(mcc->parent_function, mcc->sq.ring.id,
788 FWCMD_RING_TYPE_MCC, NULL, NULL, NULL, NULL);
791 /* Release the SQ reference to the CQ */
792 atomic_dec(&mcc->cq_object->ref_count);
798 mcc_wrb_sync_cb(void *context, int staus, struct MCC_WRB_AMAP *wrb)
800 struct be_mcc_wrb_context *wrb_context =
801 (struct be_mcc_wrb_context *) context;
803 *wrb_context->users_final_status = staus;
807 This routine posts a command to the MCC send queue
809 mcc - Internal Mcc Object to be destroyed.
813 Returns BE_SUCCESS if successfull, otherwise an error code is returned.
815 IRQL < DISPATCH_LEVEL if CompletionCallback is not NULL
816 IRQL <=DISPATCH_LEVEL if CompletionCallback is NULL
818 If this routine is called with CompletionCallback != NULL the
819 call is considered to be asynchronous and will return as soon
820 as the WRB is posted to the MCC with BE_PENDING.
822 If CompletionCallback is NULL, then this routine will not return until
823 a completion for this MCC command has been processed.
824 If called at DISPATCH_LEVEL the CompletionCallback must be NULL.
826 This routine should only be called if the MPU has been boostraped past
832 _be_mpu_post_wrb_ring(struct be_mcc_object *mcc, struct MCC_WRB_AMAP *wrb,
833 struct be_mcc_wrb_context *wrb_context)
836 struct MCC_WRB_AMAP *ring_wrb = NULL;
837 int status = BE_PENDING;
838 int final_status = BE_PENDING;
839 mcc_wrb_cqe_callback cb = NULL;
840 struct MCC_DB_AMAP mcc_db;
843 ASSERT(mp_ring_num_empty(&mcc->sq.ring) > 0);
845 * Input wrb is most likely the next wrb in the ring, since the client
846 * can peek at the address.
848 ring_wrb = mp_ring_producer_ptr(&mcc->sq.ring);
849 if (wrb != ring_wrb) {
850 /* If not equal, copy it into the ring. */
851 memcpy(ring_wrb, wrb, sizeof(struct MCC_WRB_AMAP));
854 wrb_context->ring_wrb = ring_wrb;
856 embedded = AMAP_GET_BITS_PTR(MCC_WRB, embedded, ring_wrb);
858 /* embedded commands will have the response within the WRB. */
859 wrb_context->wrb = ring_wrb;
862 * non-embedded commands will not have the response
863 * within the WRB, and they may complete out-of-order.
864 * The WRB will not be valid to inspect
865 * during the completion.
867 wrb_context->wrb = NULL;
869 cb = wrb_context->cb;
872 /* Assign our internal callback if this is a
873 * synchronous call. */
874 wrb_context->cb = mcc_wrb_sync_cb;
875 wrb_context->cb_context = wrb_context;
876 wrb_context->users_final_status = &final_status;
878 /* Increment producer index */
880 mcc_db.dw[0] = 0; /* initialize */
881 AMAP_SET_BITS_PTR(MCC_DB, rid, &mcc_db, mcc->sq.ring.id);
882 AMAP_SET_BITS_PTR(MCC_DB, numPosted, &mcc_db, 1);
884 mp_ring_produce(&mcc->sq.ring);
885 PD_WRITE(mcc->parent_function, mpu_mcc_db, mcc_db.dw[0]);
886 TRACE(DL_INFO, "pidx: %x and cidx: %x.", mcc->sq.ring.pidx,
890 int polls = 0; /* At >= 1 us per poll */
891 /* Wait until this command completes, polling the CQ. */
893 TRACE(DL_INFO, "FWCMD submitted in the poll mode.");
894 /* Do not rearm CQ in this context. */
895 be_mcc_process_cq(mcc, false);
897 if (final_status == BE_PENDING) {
898 if ((++polls & 0x7FFFF) == 0) {
900 "Warning : polling MCC CQ for %d"
901 "ms.", polls / 1000);
907 /* final_status changed when the command completes */
908 } while (final_status == BE_PENDING);
910 status = final_status;
916 struct MCC_WRB_AMAP *
917 _be_mpu_peek_ring_wrb(struct be_mcc_object *mcc, bool driving_queue)
919 /* If we have queued items, do not allow a post to bypass the queue. */
920 if (!driving_queue && !list_empty(&mcc->backlog))
923 if (mp_ring_num_empty(&mcc->sq.ring) <= 0)
925 return (struct MCC_WRB_AMAP *) mp_ring_producer_ptr(&mcc->sq.ring);
929 be_mpu_init_mailbox(struct be_function_object *pfob, struct ring_desc *mailbox)
932 pfob->mailbox.va = mailbox->va;
933 pfob->mailbox.pa = cpu_to_le64(mailbox->pa);
934 pfob->mailbox.length = mailbox->length;
936 ASSERT(((u32)(size_t)pfob->mailbox.va & 0xf) == 0);
937 ASSERT(((u32)(size_t)pfob->mailbox.pa & 0xf) == 0);
939 * Issue the WRB to set MPU endianness
942 u64 *endian_check = (u64 *) (pfob->mailbox.va +
943 AMAP_BYTE_OFFSET(MCC_MAILBOX, wrb));
944 *endian_check = 0xFF1234FFFF5678FFULL;
947 be_mcc_mailbox_notify_and_wait(pfob);
954 This routine posts a command to the MCC mailbox.
956 FuncObj - Function Object to post the WRB on behalf of.
958 CompletionCallback - Address of a callback routine to invoke once the WRB
960 CompletionCallbackContext - Opaque context to be passed during the call to
961 the CompletionCallback.
962 Returns BE_SUCCESS if successfull, otherwise an error code is returned.
964 IRQL <=DISPATCH_LEVEL if CompletionCallback is NULL
966 This routine will block until a completion for this MCC command has been
967 processed. If called at DISPATCH_LEVEL the CompletionCallback must be NULL.
969 This routine should only be called if the MPU has not been boostraped past
973 _be_mpu_post_wrb_mailbox(struct be_function_object *pfob,
974 struct MCC_WRB_AMAP *wrb, struct be_mcc_wrb_context *wrb_context)
976 struct MCC_MAILBOX_AMAP *mailbox = NULL;
977 struct MCC_WRB_AMAP *mb_wrb;
978 struct MCC_CQ_ENTRY_AMAP *mb_cq;
981 ASSERT(pfob->mcc == NULL);
982 mailbox = pfob->mailbox.va;
985 offset = AMAP_BYTE_OFFSET(MCC_MAILBOX, wrb);
986 mb_wrb = (struct MCC_WRB_AMAP *) (u8 *)mailbox + offset;
988 memset(mailbox, 0, sizeof(*mailbox));
989 memcpy(mb_wrb, wrb, sizeof(struct MCC_WRB_AMAP));
991 /* The callback can inspect the final WRB to get output parameters. */
992 wrb_context->wrb = mb_wrb;
994 be_mcc_mailbox_notify_and_wait(pfob);
996 /* A command completed. Use tag to determine which command. */
997 offset = AMAP_BYTE_OFFSET(MCC_MAILBOX, cq);
998 mb_cq = (struct MCC_CQ_ENTRY_AMAP *) ((u8 *)mailbox + offset);
999 be_mcc_process_cqe(pfob, mb_cq);
1001 status = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY, completion_status, mb_cq);
1007 struct be_mcc_wrb_context *
1008 _be_mcc_allocate_wrb_context(struct be_function_object *pfob)
1010 struct be_mcc_wrb_context *context = NULL;
1013 spin_lock_irqsave(&pfob->mcc_context_lock, irq);
1015 if (!pfob->mailbox.default_context_allocated) {
1016 /* Use the single default context that we
1017 * always have allocated. */
1018 pfob->mailbox.default_context_allocated = true;
1019 context = &pfob->mailbox.default_context;
1020 } else if (pfob->mcc) {
1021 /* Get a context from the free list. If any are available. */
1022 if (!list_empty(&pfob->mcc->wrb_context.list_head)) {
1023 context = list_first_entry(
1024 &pfob->mcc->wrb_context.list_head,
1025 struct be_mcc_wrb_context, next);
1029 spin_unlock_irqrestore(&pfob->mcc_context_lock, irq);
1035 _be_mcc_free_wrb_context(struct be_function_object *pfob,
1036 struct be_mcc_wrb_context *context)
1042 * Zero during free to try and catch any bugs where the context
1043 * is accessed after a free.
1045 memset(context, 0, sizeof(context));
1047 spin_lock_irqsave(&pfob->mcc_context_lock, irq);
1049 if (context == &pfob->mailbox.default_context) {
1050 /* Free the default context. */
1051 ASSERT(pfob->mailbox.default_context_allocated);
1052 pfob->mailbox.default_context_allocated = false;
1054 /* Add to free list. */
1056 list_add_tail(&context->next,
1057 &pfob->mcc->wrb_context.list_head);
1060 spin_unlock_irqrestore(&pfob->mcc_context_lock, irq);
1064 be_mcc_add_async_event_callback(struct be_mcc_object *mcc_object,
1065 mcc_async_event_callback cb, void *cb_context)
1067 /* Lock against anyone trying to change the callback/context pointers
1068 * while being used. */
1069 spin_lock_irqsave(&mcc_object->parent_function->cq_lock,
1070 mcc_object->parent_function->cq_irq);
1072 /* Assign the async callback. */
1073 mcc_object->async_context = cb_context;
1074 mcc_object->async_cb = cb;
1076 spin_unlock_irqrestore(&mcc_object->parent_function->cq_lock,
1077 mcc_object->parent_function->cq_irq);
1082 #define MPU_EP_CONTROL 0
1083 #define MPU_EP_SEMAPHORE 0xac
1086 *-------------------------------------------------------------------
1087 * Function: be_wait_for_POST_complete
1088 * Waits until the BladeEngine POST completes (either in error or success).
1090 * return status - BE_SUCCESS (0) on success. Negative error code on failure.
1091 *-------------------------------------------------------------------
1093 static int be_wait_for_POST_complete(struct be_function_object *pfob)
1095 struct MGMT_HBA_POST_STATUS_STRUCT_AMAP status;
1097 u32 post_error, post_stage;
1099 const u32 us_per_loop = 1000; /* 1000us */
1100 const u32 print_frequency_loops = 1000000 / us_per_loop;
1101 const u32 max_loops = 60 * print_frequency_loops;
1105 * Wait for arm fw indicating it is done or a fatal error happened.
1106 * Note: POST can take some time to complete depending on configuration
1107 * settings (consider ARM attempts to acquire an IP address
1112 status.dw[0] = ioread32(pfob->csr_va + MPU_EP_SEMAPHORE);
1113 post_error = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT,
1115 post_stage = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT,
1117 if (0 == (loops % print_frequency_loops)) {
1118 /* Print current status */
1119 TRACE(DL_INFO, "POST status = 0x%x (stage = 0x%x)",
1120 status.dw[0], post_stage);
1122 udelay(us_per_loop);
1123 } while ((post_error != 1) &&
1124 (post_stage != POST_STAGE_ARMFW_READY) &&
1125 (++loops < max_loops));
1127 if (post_error == 1) {
1128 TRACE(DL_ERR, "POST error! Status = 0x%x (stage = 0x%x)",
1129 status.dw[0], post_stage);
1131 } else if (post_stage != POST_STAGE_ARMFW_READY) {
1132 TRACE(DL_ERR, "POST time-out! Status = 0x%x (stage = 0x%x)",
1133 status.dw[0], post_stage);
1142 *-------------------------------------------------------------------
1143 * Function: be_kickoff_and_wait_for_POST
1144 * Interacts with the BladeEngine management processor to initiate POST, and
1145 * subsequently waits until POST completes (either in error or success).
1146 * The caller must acquire the reset semaphore before initiating POST
1147 * to prevent multiple drivers interacting with the management processor.
1148 * Once POST is complete the caller must release the reset semaphore.
1149 * Callers who only want to wait for POST complete may call
1150 * be_wait_for_POST_complete.
1152 * return status - BE_SUCCESS (0) on success. Negative error code on failure.
1153 *-------------------------------------------------------------------
1156 be_kickoff_and_wait_for_POST(struct be_function_object *pfob)
1158 struct MGMT_HBA_POST_STATUS_STRUCT_AMAP status;
1161 const u32 us_per_loop = 1000; /* 1000us */
1162 const u32 print_frequency_loops = 1000000 / us_per_loop;
1163 const u32 max_loops = 5 * print_frequency_loops;
1165 u32 post_error, post_stage;
1167 /* Wait for arm fw awaiting host ready or a fatal error happened. */
1168 TRACE(DL_INFO, "Wait for BladeEngine ready to POST");
1170 status.dw[0] = ioread32(pfob->csr_va + MPU_EP_SEMAPHORE);
1171 post_error = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT,
1173 post_stage = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT,
1175 if (0 == (loops % print_frequency_loops)) {
1176 /* Print current status */
1177 TRACE(DL_INFO, "POST status = 0x%x (stage = 0x%x)",
1178 status.dw[0], post_stage);
1180 udelay(us_per_loop);
1181 } while ((post_error != 1) &&
1182 (post_stage < POST_STAGE_AWAITING_HOST_RDY) &&
1183 (++loops < max_loops));
1185 if (post_error == 1) {
1186 TRACE(DL_ERR, "Pre-POST error! Status = 0x%x (stage = 0x%x)",
1187 status.dw[0], post_stage);
1189 } else if (post_stage == POST_STAGE_AWAITING_HOST_RDY) {
1190 iowrite32(POST_STAGE_HOST_RDY, pfob->csr_va + MPU_EP_SEMAPHORE);
1192 /* Wait for POST to complete */
1193 s = be_wait_for_POST_complete(pfob);
1196 * Either a timeout waiting for host ready signal or POST has
1197 * moved ahead without requiring a host ready signal.
1198 * Might as well give POST a chance to complete
1199 * (or timeout again).
1201 s = be_wait_for_POST_complete(pfob);
1207 *-------------------------------------------------------------------
1208 * Function: be_pci_soft_reset
1209 * This function is called to issue a BladeEngine soft reset.
1210 * Callers should acquire the soft reset semaphore before calling this
1211 * function. Additionaly, callers should ensure they cannot be pre-empted
1212 * while the routine executes. Upon completion of this routine, callers
1213 * should release the reset semaphore. This routine implicitly waits
1214 * for BladeEngine POST to complete.
1216 * return status - BE_SUCCESS (0) on success. Negative error code on failure.
1217 *-------------------------------------------------------------------
1219 int be_pci_soft_reset(struct be_function_object *pfob)
1221 struct PCICFG_SOFT_RESET_CSR_AMAP soft_reset;
1222 struct PCICFG_ONLINE0_CSR_AMAP pciOnline0;
1223 struct PCICFG_ONLINE1_CSR_AMAP pciOnline1;
1224 struct EP_CONTROL_CSR_AMAP epControlCsr;
1225 int status = BE_SUCCESS;
1226 u32 i, soft_reset_bit;
1228 TRACE(DL_NOTE, "PCI reset...");
1230 /* Issue soft reset #1 to get BladeEngine into a known state. */
1231 soft_reset.dw[0] = PCICFG0_READ(pfob, soft_reset);
1232 AMAP_SET_BITS_PTR(PCICFG_SOFT_RESET_CSR, softreset, soft_reset.dw, 1);
1233 PCICFG0_WRITE(pfob, host_timer_int_ctrl, soft_reset.dw[0]);
1235 * wait til soft reset is deasserted - hardware
1236 * deasserts after some time.
1241 soft_reset.dw[0] = PCICFG0_READ(pfob, soft_reset);
1242 soft_reset_bit = AMAP_GET_BITS_PTR(PCICFG_SOFT_RESET_CSR,
1243 softreset, soft_reset.dw);
1244 } while (soft_reset_bit && (i++ < 1024));
1245 if (soft_reset_bit != 0) {
1246 TRACE(DL_ERR, "Soft-reset #1 did not deassert as expected.");
1250 /* Mask everything */
1251 PCICFG0_WRITE(pfob, ue_status_low_mask, 0xFFFFFFFF);
1252 PCICFG0_WRITE(pfob, ue_status_hi_mask, 0xFFFFFFFF);
1254 * Set everything offline except MPU IRAM (it is offline with
1255 * the soft-reset, but soft-reset does not reset the PCICFG registers!)
1257 pciOnline0.dw[0] = 0;
1258 pciOnline1.dw[0] = 0;
1259 AMAP_SET_BITS_PTR(PCICFG_ONLINE1_CSR, mpu_iram_online,
1261 PCICFG0_WRITE(pfob, online0, pciOnline0.dw[0]);
1262 PCICFG0_WRITE(pfob, online1, pciOnline1.dw[0]);
1266 /* Issue soft reset #2. */
1267 AMAP_SET_BITS_PTR(PCICFG_SOFT_RESET_CSR, softreset, soft_reset.dw, 1);
1268 PCICFG0_WRITE(pfob, host_timer_int_ctrl, soft_reset.dw[0]);
1270 * wait til soft reset is deasserted - hardware
1271 * deasserts after some time.
1276 soft_reset.dw[0] = PCICFG0_READ(pfob, soft_reset);
1277 soft_reset_bit = AMAP_GET_BITS_PTR(PCICFG_SOFT_RESET_CSR,
1278 softreset, soft_reset.dw);
1279 } while (soft_reset_bit && (i++ < 1024));
1280 if (soft_reset_bit != 0) {
1281 TRACE(DL_ERR, "Soft-reset #1 did not deassert as expected.");
1289 /* Take MPU out of reset. */
1291 epControlCsr.dw[0] = ioread32(pfob->csr_va + MPU_EP_CONTROL);
1292 AMAP_SET_BITS_PTR(EP_CONTROL_CSR, CPU_reset, &epControlCsr, 0);
1293 iowrite32((u32)epControlCsr.dw[0], pfob->csr_va + MPU_EP_CONTROL);
1295 /* Kickoff BE POST and wait for completion */
1296 status = be_kickoff_and_wait_for_POST(pfob);
1304 *-------------------------------------------------------------------
1305 * Function: be_pci_reset_required
1306 * This private function is called to detect if a host entity is
1307 * required to issue a PCI soft reset and subsequently drive
1308 * BladeEngine POST. Scenarios where this is required:
1309 * 1) BIOS-less configuration
1310 * 2) Hot-swap/plug/power-on
1312 * return true if a reset is required, false otherwise
1313 *-------------------------------------------------------------------
1315 static bool be_pci_reset_required(struct be_function_object *pfob)
1317 struct MGMT_HBA_POST_STATUS_STRUCT_AMAP status;
1318 bool do_reset = false;
1319 u32 post_error, post_stage;
1322 * Read the POST status register
1324 status.dw[0] = ioread32(pfob->csr_va + MPU_EP_SEMAPHORE);
1325 post_error = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT, error,
1327 post_stage = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT, stage,
1329 if (post_stage <= POST_STAGE_AWAITING_HOST_RDY) {
1331 * If BladeEngine is waiting for host ready indication,
1332 * we want to do a PCI reset.
1341 *-------------------------------------------------------------------
1342 * Function: be_drive_POST
1343 * This function is called to drive BladeEngine POST. The
1344 * caller should ensure they cannot be pre-empted while this routine executes.
1346 * return status - BE_SUCCESS (0) on success. Negative error code on failure.
1347 *-------------------------------------------------------------------
1349 int be_drive_POST(struct be_function_object *pfob)
1353 if (false != be_pci_reset_required(pfob)) {
1354 /* PCI reset is needed (implicitly starts and waits for POST) */
1355 status = be_pci_soft_reset(pfob);
1357 /* No PCI reset is needed, start POST */
1358 status = be_kickoff_and_wait_for_POST(pfob);