]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Merge with Greg's USB tree at kernel.org:/pub/scm/linux/kernel/git/gregkh/usb-2.6...
authorLinus Torvalds <torvalds@ppc970.osdl.org.(none)>
Tue, 19 Apr 2005 14:28:57 +0000 (07:28 -0700)
committerLinus Torvalds <torvalds@ppc970.osdl.org.(none)>
Tue, 19 Apr 2005 14:28:57 +0000 (07:28 -0700)
Yah, it does work to merge. Knock wood.

72 files changed:
Documentation/DMA-mapping.txt
Documentation/scsi/ChangeLog.lpfc [new file with mode: 0644]
Documentation/scsi/lpfc.txt [new file with mode: 0644]
Documentation/scsi/qla2xxx.revision.notes [deleted file]
Documentation/scsi/scsi_mid_low_api.txt
drivers/block/ll_rw_blk.c
drivers/s390/scsi/zfcp_aux.c
drivers/scsi/53c700.c
drivers/scsi/53c700.h
drivers/scsi/Kconfig
drivers/scsi/Makefile
drivers/scsi/NCR_D700.c
drivers/scsi/aic7xxx/Kconfig.aic7xxx
drivers/scsi/aic7xxx/aic79xx_osm.c
drivers/scsi/aic7xxx/aic7xxx_osm.c
drivers/scsi/aic7xxx/aic7xxx_osm.h
drivers/scsi/aic7xxx/cam.h
drivers/scsi/aic7xxx_old.c
drivers/scsi/arm/fas216.c
drivers/scsi/cpqfcTSinit.c
drivers/scsi/cpqfcTSworker.c
drivers/scsi/gdth.c
drivers/scsi/gdth.h
drivers/scsi/ips.c
drivers/scsi/lasi700.c
drivers/scsi/libata-scsi.c
drivers/scsi/lpfc/Makefile [new file with mode: 0644]
drivers/scsi/lpfc/lpfc.h [new file with mode: 0644]
drivers/scsi/lpfc/lpfc_attr.c [new file with mode: 0644]
drivers/scsi/lpfc/lpfc_compat.h [new file with mode: 0644]
drivers/scsi/lpfc/lpfc_crtn.h [new file with mode: 0644]
drivers/scsi/lpfc/lpfc_ct.c [new file with mode: 0644]
drivers/scsi/lpfc/lpfc_disc.h [new file with mode: 0644]
drivers/scsi/lpfc/lpfc_els.c [new file with mode: 0644]
drivers/scsi/lpfc/lpfc_hbadisc.c [new file with mode: 0644]
drivers/scsi/lpfc/lpfc_hw.h [new file with mode: 0644]
drivers/scsi/lpfc/lpfc_init.c [new file with mode: 0644]
drivers/scsi/lpfc/lpfc_logmsg.h [new file with mode: 0644]
drivers/scsi/lpfc/lpfc_mbox.c [new file with mode: 0644]
drivers/scsi/lpfc/lpfc_mem.c [new file with mode: 0644]
drivers/scsi/lpfc/lpfc_nportdisc.c [new file with mode: 0644]
drivers/scsi/lpfc/lpfc_scsi.c [new file with mode: 0644]
drivers/scsi/lpfc/lpfc_scsi.h [new file with mode: 0644]
drivers/scsi/lpfc/lpfc_sli.c [new file with mode: 0644]
drivers/scsi/lpfc/lpfc_sli.h [new file with mode: 0644]
drivers/scsi/lpfc/lpfc_version.h [new file with mode: 0644]
drivers/scsi/pci2000.c
drivers/scsi/qla2xxx/Makefile
drivers/scsi/qla2xxx/qla_attr.c [new file with mode: 0644]
drivers/scsi/qla2xxx/qla_dbg.c
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_gbl.h
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_inline.h
drivers/scsi/qla2xxx/qla_iocb.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_listops.h [deleted file]
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_version.h
drivers/scsi/qlogicfc.c
drivers/scsi/qlogicisp.c
drivers/scsi/scsi.h
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/sg.c
drivers/scsi/sim710.c
include/linux/blkdev.h
include/scsi/scsi_device.h
include/scsi/scsi_host.h

index f4ac37f157ea905e06e132c6ee5a1587cb05cbae..684557474c156210114243f89b2f79667bdb179e 100644 (file)
@@ -443,15 +443,9 @@ Only streaming mappings specify a direction, consistent mappings
 implicitly have a direction attribute setting of
 PCI_DMA_BIDIRECTIONAL.
 
-The SCSI subsystem provides mechanisms for you to easily obtain
-the direction to use, in the SCSI command:
-
-       scsi_to_pci_dma_dir(SCSI_DIRECTION)
-
-Where SCSI_DIRECTION is obtained from the 'sc_data_direction'
-member of the SCSI command your driver is working on.  The
-mentioned interface above returns a value suitable for passing
-into the streaming DMA mapping interfaces below.
+The SCSI subsystem tells you the direction to use in the
+'sc_data_direction' member of the SCSI command your driver is
+working on.
 
 For Networking drivers, it's a rather simple affair.  For transmit
 packets, map/unmap them with the PCI_DMA_TODEVICE direction
diff --git a/Documentation/scsi/ChangeLog.lpfc b/Documentation/scsi/ChangeLog.lpfc
new file mode 100644 (file)
index 0000000..ae3f962
--- /dev/null
@@ -0,0 +1,1865 @@
+Known issues :
+       * Please read the associated RELEASE-NOTES file !!!
+       * This source release intended for upstream kernel releases only!
+
+Changes from 20050323 to 20050413
+
+       * Changed version number to 8.0.28
+       * Fixed build warning for 2.6.12-rc2 kernels: mempool_alloc now
+         requires a function which takes an unsigned int for gfp_flags.
+       * Removed pci dma sync calls to coherent/consistent pci memory.
+       * Merged patch from Christoph Hellwig <hch@lst.de>: split helpers
+         for fabric and nport logins out of lpfc_cmpl_els_flogi.
+       * Removed sysfs attributes that are used to dump the various
+         discovery lists.
+       * Fix for issue where not all luns are seen.  Search all lists
+         other than unmap list in lpfc_find_target().  Otherwise INQUIRY
+         to luns on nodes in NPR or other relevant states (PLOGI,
+         PRLI...) are errored back and scan() terminates.
+       * Removed FC_TRANSPORT_PATCHESxxx defines.  They're in 2.6.12-rc1.
+       * Compare return value of lpfc_scsi_tgt_reset against SCSI
+         midlayer codes SUCCESS/FAILED which that function returns rather
+         than SLI return code.
+       * Removed extraneous calls to lpfc_sli_next_iotag which should
+         only be called from lpfc_sli_submit_iocb.  Also make
+         lpfc_sli_next_iotag static.
+       * Added PCI ID for LP10000-S.
+       * Changes in lpfc_abort_handler(): Return SUCCESS if we did not
+         find command in both TX and TX completion queues.  Return ERROR
+         if we timed out waiting for command to complete after abort was
+         issued.
+       * Zero-out response sense length in lpfc_scsi_prep_cmnd to prevent
+         interpretation of stale sense length when the command completes
+         - was causing spurious 0710 messages.
+       * Moved clearing of host_scribble inside host_lock in IO
+         completion path.
+       * Fixed a bunch of mixed tab/space indentation.
+       * Allow hex format numbers in sysfs attribute setting.  Fix
+         application hang when invalid numbers are used in sysfs
+         settings.
+       * Removed extra iotag allocation by lpfc_abort_handler.
+       * Clear host_scribble in the scsi_cmnd structure when failing in
+         queuecommand.
+       * Changed logic at top of lpfc_abort_handler so that if the
+         command's host_scibble field is NULL, return SUCCESS because the
+         driver has already returned the command to the midlayer.
+
+Changes from 20050308 to 20050323
+
+       * Changed version number to 8.0.27
+       * Changed a few lines from patch submitted by Christoph Hellwig
+         (3/19). MAILBOX_WSIZE * (uint32_t) is replaced with an
+         equivalent MAILBOX_CMDSIZE macro.
+       * Merged patch from Christoph Hellwig (3/19): some misc patches
+         against the latest drivers:
+         - stop using volatile.  if you need special ordering use memory
+           barriers but that doesn't seem to be the case here
+         - switch lpfc_sli_pcimem_bcopy to take void * arguments.
+         - remove typecast for constants - a U postfix marks them
+           unsigned int in C
+         - add a MAILBOX_CMD_SIZE macro, as most users of
+           MAILBOX_CMD_WSIZE didn't really want the word count
+         - kill struct lpfc_scsi_dma_buf and embedded the two members
+           directly in struct lpfc_scsi_buf
+         - don't call dma_sync function on allocations from
+           pci_pool_alloc - it's only for streaming mappings (pci_map_*)
+       * Merged patch from Christoph Hellwig (3/19) - nlp_failMask isn't
+         ever used by the driver, just reported to userspace (and that in
+         a multi-value file which is against the sysfs guidelines).
+       * Change pci_module_init to pci_register_module() with appropriate
+         ifdefs.
+       * Added #include <linux/dma-mapping.h> as required by the DMA
+         32bit and 64bit defines on some archs.
+       * Merged patch from Christoph Hellwig (03/19) - fix initialization
+         order - scsi_add_host must happen last from scsi POV. Also some
+         minor style/comment fixups.
+       * Fixed use of TRANSPORT_PATCHES_V2 by changing to
+         FC_TRANSPORT_PATCHES_V2.
+
+Changes from 20050223 to 20050308
+
+       * Changed version number to 8.0.26
+       * Revise TRANSPORT_PATCHES_V2 so that lpfc_target is removed and
+         rport data is used instead. Removed device_queue_hash[].
+       * Changed RW attributes of scan_down, max_luns and fcp_bind_method
+         to R only.
+       * Fixed RSCN handling during initial link initialization.
+       * Fixed issue with receiving PLOGI handling when node is on NPR
+         list and marked for ADISC.
+       * Fixed RSCN timeout issues.
+       * Reduced severity of "SCSI layer issued abort device" message to
+         KERN_WARNING.
+       * Feedback from Christoph Hellwig (on 2/5) - In the LPFC_EVT_SCAN
+         case the caller already has the target ID handly, so pass that
+         one in evt_arg1.
+       * Fix compile warning/resultant panic in
+         lpfc_register_remote_port().
+
+Changes from 20050215 to 20050223
+
+       * Changed version number to 8.0.25
+       * Add appropriate comments to lpfc_sli.c.
+       * Use DMA_64BIT_MASK and DMA_32BIT_MASK defines instead of
+         0xffffffffffffffffULL & 0xffffffffULL respectively.  Use pci
+         equivalents instead of dma_set_mask and also modify condition
+         clause to actually exit on error condition.
+       * Restart els timeout handler only if txcmplq_cnt. On submission,
+         mod_timer the els_tmofunc.  This prevents the worker thread from
+         waking up the els_tmo handler un-necessarily.  The thread was
+         being woken up even when there were no pending els commands.
+       * Added new typedefs for abort and reset functions.
+       * Collapsed lpfc_sli_abort_iocb_xxx into a single function.
+       * Collapsed lpfc_sli_sum_iocb_xxx into a single function.
+       * Removed TXQ from all abort and reset handlers since it is never
+         used.
+       * Fixed Oops panic in 8.0.23 (reported on SourceForge).  The
+         driver was not handling LPFC_IO_POLL cases correctly in
+         fast_ring_event and was setting the tgt_reset timeout to 0 in
+         lpfc_reset_bus_handler.  This 0 timeout would not allow the FW
+         to timeout ABTS's on bad targets and allow the driver to have an
+         iocb on two lists.  Also split the lpfc_sli_ringtxcmpl_get
+         function into two routines to match the fast and slow completion
+         semantics - ELS completions worked for the wrong reasons.  Also
+         provided new log message number - had two 0326 entries.
+       * Removed unused #define LPFC_SCSI_INITIAL_BPL_SIZE.
+       * Removed unused struct lpfc_node_farp_pend definition.
+       * Removed unused #define LPFC_SLIM2_PAGE_AREA.
+       * Changed zeros used as pointers to NULL.
+       * Removed unneeded braces around single line in lpfc_do_work.
+       * Close humongous memory leak in lpfc_sli.c - driver was losing 13
+         iocbq structures per LIP.
+       * Removed last of GFP_ATOMIC allocations.
+       * Locks are not taken outside of nportdisc, hbadisc, els and most
+         of the init, sli, mbox and ct groups of functions
+       * Fix comment for lpfc_sli_iocb_cmd_type to fit within 80 columns.
+       * Replaced wait_event() with wait_event_interruptible().
+         wait_event() puts the woker thread in an UNINTERRUPTIBLE state
+         causing it to figure in load average calculations. Also add a
+         BUG_ON to the ret code of wait_event_interruptible() since the
+         premise is that the worker thread is signal-immune.
+
+Changes from 20050208 to 20050215
+
+       * Changed version number to 8.0.24
+       * Fixed a memory leak of iocbq structure.  For ELS solicited iocbs
+         sli layer now frees the response iocbs after processing it.
+       * Closed large memory leak -- we were losing 13 iocbq structures
+         per LIP.
+       * Changing EIO and ENOMEM to -EIO and -ENOMEM respectively.
+       * Cleanup of lpfc_sli_iocb_cmd_type array and typing of iocb type.
+       * Implemented Christoph Hellwig's feedback from 02/05: Remove
+         macros putLunHigh, putLunLow. Use lpfc_put_lun() inline instead.
+       * Integrated Christoph Hellwig's feedback from 02/05: Instead of
+         cpu_to_be32(), use swab16((uint16_t)lun). This is the same as
+         "swab16() on LE" and "<<16 on BE".
+       * Added updates for revised FC remote port patch (dev_loss_tmo
+         moved to rport, hostdata renamed dd_data, add fc_remote_host()
+         on shutdown).
+       * Removed unnecessary function prototype.
+       * Added code to prevent waking up worker thread after the exit of
+         worker thread.  Fixes panic seen with insmod/rmmod testing with
+         70 disks.
+       * Integrated Christoph Hellwig's patch from 1/30: Make some
+         variables/code static (namely lpfcAlpaArray and
+         process_nodev_timeout()).
+       * Integrated Christoph Hellwig's patch from 1/30: Use
+         switch...case instead of if...else if...else if while decoding
+         JDEC id.
+
+Changes from 20050201 to 20050208
+
+       * Changed version number to 8.0.23
+       * Make lpfc_work_done, lpfc_get_scsi_buf,
+         lpfc_mbx_process_link_up, lpfc_mbx_issue_link_down and
+         lpfc_sli_chipset_init static.
+       * Cleaned up references to list_head->next field in the driver.
+       * Replaced lpfc_discq_post_event with lpfc_workq_post_event.
+       * Implmented Christoph Hellwig's review from 2/5: Check for return
+         values of kmalloc.
+       * Integrated Christoph Hellwig's patch from 1/30: Protecting
+         scan_tmo and friends in !FC_TRANSPORT_PATCHES_V2 &&
+         !USE_SCAN_TARGET.
+       * Integrated Christoph Hellwig's patch from 1/30: Some fixes in
+         the evt handling area.
+       * Integrated Christoph Hellwig's patch from 1/30: Remove usage of
+         intr_inited variable. The interrupt initilization from OS side
+         now happens in lpfc_probe_one().
+       * Integrated Christoph Hellwig's patch from 1/30: remove shim
+         lpfc_alloc_transport_attr - remove shim lpfc_alloc_shost_attrs -
+         remove shim lpfc_scsi_host_init - allocate phba mem in scsi's
+         hostdata readjust code so that they are no use after free's
+         (don't use after scsi_host_put) - make lpfc_alloc_sysfs_attr
+         return errors
+       * Fixed panic in lpfc_probe_one(). Do not delete in a list
+         iterator that is not safe.
+       * Clean up fast lookup array of the fcp_ring when aborting iocbs.
+       * Following timeout handlers moved to the lpfc worker thread:
+         lpfc_disc_timeout, lpfc_els_timeout, lpfc_mbox, lpfc_fdmi_tmo,
+         lpfc_nodev_timeout, lpfc_els_retry_delay.
+       * Removed unused NLP_NS_NODE #define.
+       * Integrated Christoph Hellwig's patch from 1/30: remove unused
+         lpfc_hba_list; remove unused lpfc_rdrev_wd30; remove
+         lpfc_get_brd_no and use Linux provided IDR.
+       * Changed board reset procedure so that lpfc_sli_send_reset()
+         writes the INITFF bit and leaves lpfc_sli_brdreset() to clear
+         the bit.
+       * Removed outfcpio sysfs device attribute.
+       * VPD changes: 1) Modify driver to use the model name and
+         description from the VPD data if it exists 2) Rework use of DUMP
+         mailbox command to support HBAs with 256 bytes of SLIM.
+       * Fixed compile error for implicit definition of struct
+         scsi_target
+
+Changes from 20050124 to 20050201
+
+       * Changed version number to 8.0.22
+       * Moved discovery timeout handler to worker thread. There are
+         function calls in this function which are not safe to call from
+         HW interrupt context.
+       * Removed free_irq from the error path of HBA initialization.
+         This will fix the free of uninitialised IRQ when config_port
+         fails.
+       * Make sure function which processes unsolicited IOCBs on ELS ring
+         still is called with the lock held.
+       * Clear LA bit from work_ha when we are not supposed to handle LA.
+       * Fix double locking bug in the error handling part of
+         lpfc_mbx_cmpl_read_la.
+       * Implemented fast IOCB processing for FCP ring.
+       * Since mboxes are now unconditionally allocated outside of the
+         lock, free them in cases where they are not used.
+       * Moved out a couple of GFP_ATOMICs in lpfc_disc_timeout, to
+         before locks so that they can GFP_KERNEL instead. Also cleaned
+         up code.
+       * Collapsed interrupt handling code into one function.
+       * Removed event posting and handling of solicited and unsolicited
+         iocbs.
+       * Remove ELS ring handling leftovers from the lpfc_sli_inter().
+       * ELS ring (any slow ring) moved from the lpfc_sli_inter() into a
+         worker thread.  Link Attention, Mbox Attention, and Error
+         Attention, as well as slow rings' attention is passed to the
+         worker thread via worker thread copy of Host Attention
+         register. Corresponding events are removed from the event queue
+         handling.
+       * Add entries to hba structure to delegate some functionality from
+         the lpfc_sli_inter() to a worker thread.
+       * Reduced used of GFP_ATOMIC for memory allocations.
+       * Moved locks deeper in order to change GFP_ATOMIC to GFP_KERNEL.
+       * IOCB initialization fix for Raw IO.
+       * Removed qcmdcnt, iodonecnt, errcnt from lpfc_target and from
+         driver.
+       * Added call to lpfc_els_abort in lpfc_free_node.  Modified
+         lpfc_els_abort to reset txq and txcmplq iterator after a
+         iocb_cmpl call.
+       * Fixed a use after free issue in lpfc_init.c.
+       * Defined default mailbox completion routine and removed code in
+         the sli layer which checks the mbox_cmpl == 0 to free mail box
+         resources.
+       * In lpfc_workq_post_event, clean up comment formatting and remove
+         unneeded cast of kmalloc's return.
+       * Removed loop which calls fc_remote_port_unblock and
+         fc_remote_port_delete for every target as this same effect is
+         accomplished by the scsi_remove_host call.
+       * Minor cleanup of header files.  Stop header files including
+         other header files.  Removed sentinels which hide multiple
+         inclusions.  Removed unneeded #include directives.
+       * Fixed memory leaks in mailbox error paths.
+       * Moved lock from around of lpfc_work_done to lpfc_work_done
+         itself.
+       * Removed typedef for LPFC_WORK_EVT_t and left just struct
+         lpfc_work_evt to comply with linux_scsi review coding style.
+       * Fixed some trailing whitespaces, spaces used for indentation and
+         ill-formatting multiline comments.
+       * Bug fix for Raw IO errors.  Reuse of IOCBs now mandates setting
+         of ulpPU and fcpi_parm to avoid incorrect read check of Write IO
+         and incorrect read length.
+
+Changes from 20050110 to 20050124
+
+       * Changed version number to 8.0.21
+       * Removed unpleasant casting in the definition and use of
+         lpfc_disc_action function pointer array.
+       * Makefile cleanup.  Use ?= operator for setting default
+         KERNELVERSION and BASEINCLUDE values.  Use $(PWD) consistently.
+       * Removed call to lpfc_sli_intr from lpfc_config_port_post.  All
+         Linux systems will service hardware interrupts while bringing up
+         the driver.
+       * Christoph Hellwig change request: Reorg of contents of
+         lpfc_hbadisc.c, lpfc_scsi.h, lpfc_init.c, lpfc_sli.c,
+         lpfc_attr.c, lpfc_scsi.c.
+       * Renamed discovery thread to lpfc_worker thread.  Moved handling
+         of error attention and link attention and mbox event handler to
+         lpfc_worker thread.
+       * Removed .proc_info and .proc_name from the driver template and
+         associated code.
+       * Removed check of FC_UNLOADING flag in lpfc_queuecommand to
+         determine what result to return.
+       * Move modification of FC_UNLOADING flag under host_lock.
+       * Fix IOERR_RCV_BUFFER_WAITING handling for CT and ELS subsystem.
+       * Workaround firmware bug for IOERR_RCV_BUFFER_WAITING on ELS
+         ring.
+       * Fixed a couple lpfc_post_buffer problems in lpfc_init.c.
+       * Add missing spaces to the parameter descriptions for
+         lpfc_cr_delay, lpfc_cr_count and lpfc_discovery_threads.
+       * Lock before calling lpfc_sli_hba_down().
+       * Fix leak of "host" in the error path in the remove_one() path.
+       * Fix comment for lpfc_cr_count.  It defaults to 1.
+       * Fix issue where we are calling lpfc_disc_done() recursively from
+         lpfc_linkdown(), but list_for_each_entry_safe() is not safe for
+         such use.
+       * Bump lpfc_discovery_threads (count of outstading ELS commands in
+         discovery) to 32
+       * If the SCSI midlayer tries to recover from an error on a lun
+         while the corresponding target is in the NPR state, lpfc driver
+         will reject all the resets. This will cause the target to be
+         moved to offline state and block all the I/Os. The fix for this
+         is to delay the lun reset to a target which is not in MAPPED
+         state until the target is rediscovered or nodev timeout is
+         fired.
+
+Changes from 20041229 to 20050110
+
+       * Changed version number to 8.0.20
+       * rport fix: use new fc_remote_port_rolechg() function instead of
+         direct structure change
+       * rport fix: last null pointer check
+       * Phase II of GFP_ATOMIC effort.  Replaced iocb_mem_pool and
+         scsibuf_mem_pool with kmalloc and linked list.  Inserted list
+         operations for mempool_alloc calls.  General code cleanup.  All
+         abort and reset routines converted.  Handle_ring_event
+         converted.
+       * If the mbox_cmpl == lpfc_sli_wake_mbox_wait in
+         lpfc_sli_handle_mb_event, pmb->context1 points to a waitq. Do
+         not free the structure.
+       * rport fixes: fix for rmmod crash
+       * rport fixes: when receiving PRLI's, set node/rport role values
+       * rport fixes: fix for unload and for fabric port deletes
+       * VPD info bug fix.
+       * lpfc_linkdown() should be able to process all outstanding events
+         by calling lpfc_disc_done() even if it is called from
+         lpfc_disc_done() Moving all events from phba->dpc_disc to local
+         local_dpc_disc prevents those events from being processed.
+         Removing that queue. From now on we should not see "Illegal
+         State Transition" messages.
+       * Release host lock and enable interrupts when calling
+         del_timer_sync()
+       * All related to rports: Clean up issues with rport deletion
+         Convert to using block/unblock on list remove (was del/add)
+         Moved rport delete to freenode - so rport tracks node.
+       * rport fixes: for fport, get maxframe and class support
+         information
+       * Added use of wait_event to work with kthread interface.
+       * Ensure that scsi_transport_fc.h is always pulled in by
+         lpfc_scsiport.c
+       * In remote port changes: no longer nulling target->pnode when
+         removing from mapped list. Pnode get nulled when the node is
+         freed (after nodev tmo). This bug was causing i/o recieved in
+         the small window while the device was blocked to be errored w/
+         did_no_connect. With the fix, it returns host_busy
+         (per the pre-remote port changes).
+       * Merge in support for fc transport remote port use. This removes
+         any consistent bindings within the driver. All scanning is now
+         on a per-target basis driven by the discovery engine.
+
+Changes from 20041220 to 20041229
+
+       * Changed version number to 8.0.19
+       * Fixed bug for handling RSCN type 3.  Terminate RSCN mode
+         properly after ADISC handling completes.
+       * Add list_remove_head macro.  Macro cleans up memory allocation
+         list handling.  Also clean up lpfc_reset_bus_handler - routine
+         does not need to allocate its own scsi_cmnd and scsi_device
+         structures.
+       * Fixed potential discovery bug, nlp list corrutpion fix potential
+         memory leak
+       * Part 1 of the memory allocation rework request by linux-scsi.
+         This effort fixes the number of bdes per scsi_buf to 64, makes
+         the scatter-gather count a module parameter, builds a linked
+         list of scsi_bufs, and removes all dependencies on lpfc_mem.h.
+       * Reverted lpfc_do_dpc, probe_one, remove_one to original
+         implementation.  Too many problems (driver not completing
+         initial discovery, and IO not starting to disks).  Backs out
+         kthread patch.
+       * Fix race condition in lpfc_do_dpc.  If wake_up interrupt occurs
+         while lpfc_do_dpc is running disc_done and the dpc list is
+         empty, the latest insertion is missed and the schedule_timeout
+         does not wakeup.  The sleep interval is MAX_SCHEDULE_TIMEOUT
+         defined as ~0UL >> 1, a very large number.  Hacked it to 5*HZ
+         for now.
+       * Fixed bug introduced when discovery thread implementation was
+         moved to kthread. kthread_stop() is not able to wake up thread
+         waiting on a semaphore and "modprobe -r lpfc" is not always
+         (most of the times) able to complete. Fix is in not using
+         semaphore for the interruptable sleep.
+       * Small Makefile cleanup - Remove remnants of 2.4 vs. 2.6
+         determination.
+
+Changes from 20041213 to 20041220
+
+       * Changed version number to 8.0.18
+       * Janitorial cleanup after removal of sliinit and ringinit[] ring
+         statistic is owned by the ring and SLI stats are in sli
+         structure.
+       * Integrated patch from Christoph Hellwig <hch@lst.de> Kill
+         compile warnings on 64 bit platforms: %variables for %llx format
+         specifiers must be caste to long long because %(u)int64_t can
+         just be long on 64bit platforms.
+       * Integrated patch from Christoph Hellwig <hch@lst.de> Removes
+         dead code.
+       * Integrated patch from Christoph Hellwig <hch@lst.de>: use
+         kthread interface.
+       * Print LPFC_MODULE_DESC banner in module init routine.
+       * Removed sliinit structure and ringinit[] array.
+       * Changed log message number from 324 to 326 in lpfc_sli.c.
+       * Wait longer for commands to complete in lpfc_reset_bus_handler
+         and lpfc_reset_bus_handler.  Also use schedule_timeout() instead
+         of msleep() and add error message in lpfc_abort_handler()
+       * When setting lpfc_nodev_tmo, from dev_loss set routine, make 1
+         sec minimum value.
+       * Functions which assume lock being held were called without lock
+         and kernel complained about unlocking lock which is not locked.
+       * Added code in linkdown to unreg if we know login session will be
+         terminated.
+       * Removed automap config parameter and fixed up use_adisc logic to
+         include FCP2 devices.
+
+Changes from 20041207 to 20041213
+
+       * Changed version number to 8.0.17
+       * Fix sparse warnings by adding __iomem markers to lpfc_compat.h.
+       * Fix some sparse warnings -- 0 used as NULL pointer.
+       * Make sure there's a space between every if and it's (.
+       * Fix some overly long lines and make sure hard tabs are used for
+         indentation.
+       * Remove all trailing whitespace.
+       * Integrate Christoph Hellwig's patch for 8.0.14: if
+         pci_module_init fails we need to release the transport template.
+         (also don't print the driver name at startup, linux drivers can
+         be loaded without hardware present, and noise in the log for
+         that case is considered unpolite, better print messages only for
+         hardware actually found).
+       * Integrate Christoph Hellwig's patch for 8.0.14: Add missing
+         __iomem annotations, remove broken casts, mark functions static.
+         Only major changes is chaning of some offsets from word-based to
+         byte-based so we cans simply do void pointer arithmetics (gcc
+         extension) instead of casting to uint32_t.
+       * Integrate Christoph Hellwig's patch for 8.0.14: flag is always
+         LPFC_SLI_ABORT_IMED, aka 0 - remove dead code.
+       * Modified preprocessor #ifdef, #if, #ifndef to reflect upstream
+         kernel submission.  Clean build with make clean;make and make
+         clean;make ADVANCED=1 on SMP x86, 2.6.10-rc2 on RHEL 4 Beta
+         1. IO with a few lips and a long cable pull behaved accordingly.
+       * Implement full VPD support.
+       * Abort handler will try to wait for abort completion before
+         returning.  Fixes some panics in iocb completion code path.
+
+Changes from 20041130 to 20041207
+       
+       * Changed version number to 8.0.16
+       * Hung dt session fix.  When the midlayer calls to abort a scsi
+         command, make sure the driver does not complete post-abort
+         handler.  Just NULL the iocb_cmpl callback handler and let SLI
+         take over.
+       * Add Read check that uses SLI option to validate all READ data
+         actually received.
+
+
+Changes from 20041123 to 20041130
+
+       * Changed version number to 8.0.15
+       * Ifdef'd unused "binary" attributes by DFC_DEBUG for clean
+         compiles
+       * Stop DID_ERROR from showing up along with QUEUE_FULL set by the
+         Clarion array (SCSI error ret. val.  0x70028) There is no need
+         for driver to hard fail command which was failed by the target
+         device.
+       * Fix for Scsi device scan bug reported on SourceForge.  Driver
+         was returning a DID_ERROR in lpfc_handle_fcp_error causing
+         midlayer to mark report luns as failing even though it
+         succeeded.
+       * Don't ignore SCSI status on underrun conditions for inquiries,
+         test unit ready's, etc.  This was causing us to lose
+         reservation conflicts, etc
+
+Changes from 20041018 to 20041123
+       
+       * Changed version number to 8.0.14
+       * Added new function "iterator" lpfc_sli_next_iocb_slot() which
+         returns pointer to iocb entry at cmdidx if queue is not full.
+         It also updates next_cmdidx, and local_getidx (but not cmdidx)
+       * lpfc_sli_submit_iocb() copies next_cmdidx into cmdidx. Now it is
+         the only place were we are updating cmdidx.
+       * lpfc_sli_update_ring() is split in to two --
+         lpfc_sli_update_ring() and lpfc_sli_update_full_ring().
+       * lpfc_sli_update_ring() don't to read back correct value of
+         cmdidx.
+       * Simplified lpfc_sli_resume_iocb() and its use.
+       * New static function lpfc_sli_next_iocb(phba, pring, &piocb) to
+         iterate through commands in the TX queue and new command (at the
+         end).
+       * Reduced max_lun to 256 (due to issues reported to some arrays).
+         Fixed comment, and macro values so def=256, min=1, max=32768.
+       * Fix an obvious typo/bug: kfree was used to free lpfc_scsi_buf
+         instead of mempool_free in lpfc_scsiport.c.
+       * Suppress nodev_tmo message for FABRIC nodes.
+       * Fixed some usage of plain integer as NULL pointer.
+       * Bug fix for FLOGI cmpl, lpfc_els_chk_latt error path code
+         cleanup.
+       * Fixup lpfc_els_chk_latt() to have Fabric NPorts go thru
+         discovery state machine as well.
+       * Fixes to lpfc_els_chk_latt().
+       * Use DID not SCSI target id as a port_id and add some missing
+         locks in lpfc_fcp.c.
+       * Changed eh_abort_handler to return FAILED if command is not
+         found in driver.
+       * Fix crash: paging request at virtual address 0000000000100108 -
+         a result of removing from the txcmpl list item which was already
+         removed (100100 is a LIST_POISON1 value from the next pointer
+         and 8 is an offset of the "prev") Driver runs out of iotags and
+         does not handle that case well. The root of the proble is in the
+         initialization code in lpfc_sli.c
+       * Changes to work with proposed linux kernel patch to support
+         hotplug.
+       * Zero out seg_cnt in prep_io failure path to prevent double sg
+         unmap calls.
+       * Fix setting of upper 32 bits for Host Group Ring Pointers if in
+         SLIM. Old code was inappropriately masking off low order bits.
+       * Use scsi_[activate|deactivate]_tcq calls provided in scsi_tcq.h.
+       * Integrated patch from Christoph Hellwig (hch@lst.de): don't call
+         pci_dma_sync_* on coherent memory. pci_dma_sync_* is need and
+         must be used only with streaming dma mappings pci_map_*, not
+         coherent mappings.  Note: There are more consistent mappings
+         that are using pci_dma_sync calls. Probably these should be
+         removed as well.
+       * Modified lpfc_free_scsi_buf to accomodate all three scsi_buf
+         free types to alleviate miscellaneous panics with cable pull
+         testing.
+       * Set hotplug to default 0 and lpfc_target_remove to not remove
+         devices unless hotplug is enabled.
+       * Fixed discovery bug: plogi cmpl uses ndlp after its freed.
+       * Fixed discovery bug: rnid acc cmpl, can potentially use ndlp
+         after its freed.
+       * Modularize code path in lpfc_target_remove().
+       * Changes to support SCSI hotplug (ifdef'ed out because they need
+         kernel support USE_SCAN_TARGET requires kernel support to export
+         the interface to scsi_scan_target and to move the SCAN_WILD_CARD
+         define to a general scsi header file.  USE_RESCAN_HOST requires
+         kernel support to export an interface to scan_scsi_host() with
+         the rescan flag turned on).
+       * Removed redundant variable declaration of lpfc_linkdown_tmo.
+       * Fix for large port count remove test.
+       * Added check to see if BAR1 register is valid before using BAR1
+         register for programming config_port mail box command.
+       * Added lpfc_scsi_hotplug to enable/disable driver support of SCSI
+         hotplug.
+       * Changed lpfc_disc_neverdev() to lpfc_disc_illegal() and changed
+         lpfc_disc_nodev() to lpfc_disc_noop().  Adjusted appropriate
+         events to use these routines.
+       * Add support for SCSI device hotplug.
+       * Take dummy lpfc_target's into account for lpfc_slave_destroy().
+       * Bug fix to store WWPN / WWNN in NameServer / FDMI lpfc_nodelist
+         entries.
+       * Added slavecnt in lpfc_target for diagnostic purposes.
+       * Added lpfc_hba load/unload flags to take care of special cases
+         for add/remove device.
+       * Have target add/remove delay before scanning.
+       * Have rmmod path cleanup blocked devices before scsi_remove_host.
+       * Added a #define for msleep for 2.6.5 kernels.
+       * In reset bus handler if memory allocation fails, return FAILED
+         and not SUCCESS.
+       * Have lpfc eh handlers, bus_reset and lun_reset, wait for all
+         associated I/Os to complete before returning.
+       * Fix memset byte count in lpfc_hba_init so that
+         LP1050 would initialize correctly.
+       * Backround nodev_timeout processing to DPC This enables us to
+         unblock (stop dev_loss_tmo) when appopriate.
+       * Fix array discovery with multiple luns.  The max_luns was 0 at
+         the time the host structure was intialized.  lpfc_cfg_params
+         then set the max_luns to the correct value afterwards.
+       * Remove unused define LPFC_MAX_LUN and set the default value of
+         lpfc_max_lun parameter to 512.
+       * Reduced stack usage of lpfc_hba_init.
+       * Cleaned up the following warning generated by
+         scripts/checkincludes.pl lpfc_fcp.c: scsi/scsi_cmnd.h is
+         included more than once.
+       * Replaced "set_current_state(TASK_UNINTERRUPTIBLE);
+         schedule_timeout(timeout)" with "msleep(timeout)".
+       * Fixnode was loosing starget when rediscovered. We saw messages
+         like: lpfc 0000:04:02.0: 0:0263 Cannot block scsi target as a
+         result.  Moved starget field into struct lpfc_target which is
+         referenced from the node.
+       * Add additional SLI layer logging in lpfc_sli.c.
+       * Ignore more unexpected completions in lpfc_nportdisc.c.
+       * Can not call lpfc_target_unblock from the soft interrupt
+         context.  It seems to be not nessasery to unblock target from
+         nodev timeout.
+       * Introduce and use less lethal event handler for unexpected
+         events in lpfc_nportdisc.c.
+       * Can not call fc_target_(un)block() functions with interrupts
+         disabled in lpfc_scsiport.c.
+       * Added new configuration parameter, lpfc_max_luns range 1-32768,
+         default 32768.
+       * Allow lpfc_fcp.c to call lpfc_get_hba_sym_node_name().
+       * Increase nodev timeout from 20 seconds to 30 seconds.
+       * Replace some kfree((void*)ptr) with kfree(ptr).
+       * Make 3 functions static: lpfc_get_hba_sym_node_name,
+         lpfc_intr_prep and lpfc_setup_slim_access.  Move lpfc_intr_prep
+         and lpfc_setup_slim_access so they're defined before being used.
+       * Remove an unecessary list_del() in lpfc_hbadisc.c.
+       * Set nlp_state before calling lpfc_nlp_list() since this will
+         potentially call fc_target_unblock which may cause a race in
+         queuecommand by releasing host_lock.
+       * Since lpfc_nodev_tmo < dev_loss_tmo remove queuecommand
+         DID_BAD_TARGET return for now.
+       * Fix a problem with rcv logo.
+       * Remove unused portstatistics_t structure.
+       * Remove #if 0 and unnecessary checks in lpfc_fcp.c.
+       * Simplify lpfc_issue_lip: Extra layer of protection removed.
+       * Grab lock before calling lpfc_sli_issue_mbox(phba, pmb,
+         MBX_NOWAIT) in lpfc_sli_issue_mbox_wait().
+
+Changes from 20040920 to 20041018
+
+       * Changed version number to 8.0.13
+       * Hide some attributes using #ifndef DFC_DEBUG ... #endif.
+       * Modify Makefile to (1) make BUILD_NO_DEBUG=1 will hide some
+         (binary) attributes (2) make BUILD_FC_TRANS=0 will build driver
+         for 2.6.5 kernel with block/unblock patch.
+       * Modified #ifdef names.
+       * Added support for proposed FC transport host attributes (which
+         replaces some of the attributes we had local to the driver).
+         Removed the binary statistics sysfs attribute.
+       * Added extra ELS verbose logging for ELS responses.
+       * Added recognition for BUILD_FC_TRANS=2 to Makefile to define
+         FC_TRANS_VER2.
+       * Add a pointer for link stats allocation.
+       * Exported lpfc_get_hba_sym_node_name for use by FC_TRANS_VER2
+         sysfs routines.
+       * Fix discovery problem in lip testing: if device sends an ELS cmd
+         (i.e. LOGO) before our FLOGI completes it should be LS_RJT'ed.
+       * Moved #defines around to provide target_add/remove for upstream
+         kernel deliverables only not SLES9.  Provided ifdefs to #include
+         target_block/unblock only if FC_TRANS_VER1.
+       * Add sanity check in lpfc_nlp_list move setting nlp_Target
+         outside #ifdef.
+       * Added a blocked member to the lpfc_target structure for
+         block/unblock.  This member allows the driver to know when to
+         unblock for pci_remove_one or pci_add_one.  #ifdef'd some more
+         block/unblock stuff and removed some defensive checks from
+         target_block/unblock.
+       * Moved + 5 second window to dev_loss_tmo setting and updated
+         comments.
+       * Removed NULL target check from target_block/unblock and fixed up
+         a few comments.
+       * Enable sysfs attributes on 2.6.5 kernels and remove extra
+         compatibility code.
+       * Remove any and all trailing whitespace.
+       * Added message 0718 and return error when dma_map_single fails.
+       * Changed the fcpCntl2 commands to include an FCP_ prefix to get
+         rid of build warnings on later 2.6.9-rc kernels.  Build
+         conflicts with scsi/scsi.h.  Remove inclusions of scsi/scsi.h
+         from hbadisc.c, sli.c, and fcp.c since these modules had no
+         dependencies on scsi.h.
+       * Fixed a bug with RSCN handling. A RSCN received on one device,
+         shouldn't affect other devices not referenced by the RSCN.
+       * Moved #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,6) to include
+         lpfc_jedec_to_ascii to prevent warning in SLES 9.
+       * Update Makefile to account for SLES 9 and scsi-target upstream
+         kernel.
+       * This checkin provides block/unblock hooks for the upstream scsi
+         target kernel and 2.6.5 on SLES9 SP1 with the block/unblock
+         patch.
+       * Discovery changes regarding setting targetp->pnode and
+         ndlp->nlp_Target Ensure fc_target_* routines are called properly
+         from discovery.  Remove list_del's from lpfc_cleanup().  Ensure
+         all the lpfc_consistent_bind_* routines don't set any driver
+         structure objects.
+       * Fix for timeout of READ_LA or READ_SPARAM mailbox command
+         causing panic.
+       * Cleanup list_del()'s for Discovery ndlp lists.
+       * Bug fixes for some insmod/rmmod crashes, link down crashes and
+         device loss crashes.
+       * Removed NLP_SEARCH_DEQUE.
+       * Call lpfc_target_unblock only if the targetp is nonNull and with
+         the host_lock held.
+       * Added qcmdcnt back along with misc bug fixes to discovery.
+       * Changed tgt_io to outfcpio lpfc_fcp.c.
+       * Fixed errors caused by LIP and cable pulls both with and without
+         block/unblock patch.
+       * For now we have to call fc_target_unblock and fc_target_block
+         with interrupts enabled.
+       * Save seg_cnt from dma_map_sg.  Save scatter-gather start address
+         and pass back to dma_unmap_sg in error with seg_cnt.
+       * Incorporating block/unblock calls into driver with ifdefs.  This
+         change is supported by scsi-target-2.6 kernel and forward only.
+       * Merged in some discovery bug fixes and added tgt io counters.
+       * Added sysfs attributes/interfaces: read only attribute
+         "management_version" and write only attribute "issue_lip".
+       * Fix build on big endian machines: while #if was OK with
+         __BIG_ENDIAN which defined as 4321, __BIG_ENDIAN_BITFIELD has to
+         be tested with #ifdef because it does not have any value, it is
+         either defined or not.
+       * Add fabric_name and port_type attributes.
+       * Change mdelay to msleep.  mdelay works, but wastefully uses cpu
+         resources without a lock held. Revert to msleep.  Tested with
+         sg_reset for bus and three attached targets.
+       * Added the customary #ifndef...#define...#endif to
+         lpfc_version.h.
+       * Integrate patches from Christoph Hellwig: two new helpers common
+         to lpfc_sli_resume_iocb and lpfc_sli_issue_iocb - singificant
+         cleanup of those two functions - the unused SLI_IOCB_USE_TXQ is
+         gone - lpfc_sli_issue_iocb_wait loses it's flags argument
+         totally.
+       * Fix in lpfc_sli.c: we can not store a 5 bit value in a 4-bit
+         field.
+       * Moved some routines out of lpfc_fcp.c into more appropriate
+         files.
+       * Whitespace cleanup: remove all trailing whitespace.
+       * Make lpfc_disc_ndlp_show static to lpfc_fcp.c.
+       * Remove leftover printk and replace some with
+         printk(KERN_WARNING)
+       * Trivial: fix a few long lines and a soft tab.
+       * Remove warnings generated by Sparse against driver (make
+         C=1). Mostly these are "using integer as pointer warnings"
+         i.e. use NULL instead of 0.
+       * Integrated patch from Christoph Hellwig: Quite a lot of changes
+         here, the most notable is that the phba->slim2p lpfc_dmabuf goes
+         away in favour of a typede pointer and a dma_addr_t.  Due to the
+         typed pointer lots of the cast mess can go away, and while at it
+         I also replaced the messy SLI2_SLIM_t with a simple struct
+         lpfc2_sli2_slim that only contains the part of the union we care
+         about while using SLI2_SLIM_SIZE for all size calculations
+         directly.
+       * Integrated patch from Christoph Hellwig: This streamlines the
+         I/O completion path a little more, especially taking care of
+         fast-pathing the non-error case.  Also removes tons of dead
+         members and defines from lpfc_scsi.h - e.g. lpfc_target is down
+         to nothing more then the lpfc_nodelist pointer.
+       * Added binary sysfs file to issue mbox commands
+       * Replaced #if __BIG_ENDIAN with #if __BIG_ENDIAN_BITFIELD for
+         compatibility with the user space applications.
+       * Decrease the amount of data in proc_info.
+       * Condense nodelist flag members.
+       * Expand INFO for discovery sysfs shost entries.
+       * Notify user if information exceeds 4k sysfs limit.
+       * Removed a bunch of unused #defines.
+       * Added initial sysfs discovery shost attributes.
+       * Remove unused #defines lpfc_disc.h.
+       * Fixed failMask nodelist settings.
+       * Cleanup some old comments / unused variables.
+       * Add LP101 to list of recognized adapters.
+
+Changes from 20040908 to 20040920
+
+       * Changed version number to 8.0.12
+       * Removed used #defines: DEFAULT_PCI_LATENCY_CLOCKS and
+         PCI_LATENCY_VALUE from lpfc_hw.h.
+       * Changes to accomodate rnid.
+       * Fix RSCN handling so RSCN NS queries only effect NPorts found in
+         RSCN data.
+       * If we rcv a plogi on a NPort queued up for discovery, clear the
+         NLP_NPR_2B_DISC bit since rcv plogi logic will force NPort thru
+         discovery.
+       * Ensure lpfc_target is also cleaned up in lpfc_cleanup().
+       * Preliminary changes for block/unblock kernel API extensions in
+         progress with linux-scsi list.  These are name changes and
+         prototype changes only.
+       * Added send_abts flag to lpfc_els_abort. For rcv LOGO when ADISC
+         sent, the XRI of the LOGO rcv'ed is the same as the ADISC
+         sent. Thus we cannot ABTS the ADISC before sending the LOGO ACC.
+       * Weed out some unused fc_flags.  Add FC_DISC_TMO.
+       * board_online sysfs attribute added to support libdfc functions
+         InitDiagEnv and SetBrdEnv.
+       * Streamline code in lpfc_els_retry fixup abort case in
+         lpfc_els_timeout_handler().
+       * Flush discovery/ELS events when we bring SLI layer down.
+       * ctlreg and slimem binary attributes added to support libdfc
+         read/write mem/ctl functions.
+       * Integrated Christoph Hellwig's patch: Cleanup
+         lpfc_sli_ringpostbuf_get.
+       * Modified lpfc_slave_alloc and lpfc_slave_destroy to allocate and
+         free a dummy target pointer.  This allows queuecommand to skip
+         the NULL target pointer check and avoid the console spam when
+         slave_alloc fails.
+       * Fix cfg_scan_down logic, it was reversed.
+       * Init list head ctrspbuflist.
+       * Change name of lpfc_driver_abort to lpfc_els_abort since it is
+         only valid for ELS ring.
+       * Remove unused third argument for lpfc_consistent_bind_get().
+       * Fix up iotag fields in lpfc_prep_els_iocb().
+       * Remove log message on code path triggered by lpfc_els_abort().
+       * Set host->unique_id in lpfc_fcp.c.
+       * Removed deadwood: lpfc_target.pHba not necessary anymore.
+       * Integrated patch from Christoph Hellwig: remove dead
+         SLI_IOCB_POLL handling.
+       * Integrated patch from Christoph Hellwig: Streamline I/O
+         submission and completion path a little.
+       * Remove unnecessary lpfc_brd_no.  Ensure brd_no assignment is
+         unique.
+       * Removed unused MAX_FCP_LUN.
+       * Use mod_timer instead of add_timer for fdmi in lpfc_ct.c.
+       * Fixed misc discovery problems.
+       * Move stopping timers till just before lpfc_mem_free() call.
+       * Fix up NameServer reglogin error path.
+       * Cleanup possible outstanding discovery timers on rmmod.
+       * Fix discovery NPort to NPort pt2pt problem.
+       * Get rid of ip_tmofunc / scsi_tmofunc.
+       * Integrated patch from Christoph Hellwig:
+         lpfc_disc_done/lpfc_do_dpc cleanup - lpfc_disc_done can return
+         void - move lpfc_do_dpc and lpfc_disc_done to lpfc_hbadisc.c -
+         remove checking of list emptiness before calling lpfc_disc_done,
+         it handles the emtpy list case just fine and the additional
+         instructions cost less then the bustlocked spinlock operations.
+       * Integrated patch from Christoph Hellwig: This adds a new 64bit
+         counter instead, brd_no isn't reused anymore.  Also some tiny
+         whitespace cleanups in surrounding code.
+       * Reorder functions in lpfc_els.c to remove need for prototypes.
+       * Removed unsed prototypes from lpfc_crtn.h -
+         lpfc_ip_timeout_handler, lpfc_read_pci and lpfc_revoke.
+       * Removed some unused prototypes from lpfc_crtn.h -
+         lpfc_scsi_hba_reset, lpfc_scsi_issue_inqsn,
+         lpfc_scsi_issue_inqp0, lpfc_scsi_timeout_handler.
+       * Integrated patch from Christoph Hellwig: remove TRUE/FALSE
+         usage.
+       * Integrated patch from Christoph Hellwig: Remove unused function
+         prototypes lpfc_set_pkt_len and lpfc_get_pkt_data from
+         lpfc_crtn.h - fixes build warnings.
+       * Removed unused struct lpfc_dmabufip definition from lpfc_mem.h.
+       * Removed pre-2.6.5 MODULE_VERSION macro from lpfc_compat.h.
+       * Fixing missing static and removing dead code.
+       * Adding nodewwn, portwwn and portfcid shost attributes.
+       * Initial support for CT via sysfs. request payloads of size less
+         than PAGE_SIZE and rsp payloads of size PAGE_SIZE are supported.
+         Driver maintains a list of rsp's and passes back rsp's
+         corresponding to the pid of the calling process.
+       * Support for RefreshInformation, GetAdapterAttributes,
+         GetPortStatistics.
+       * Make nodev-tmo default to 20 seconds.
+       * Fix up some DSM error cases, unreg_login rpi where needed.
+       * Fix up comments for fc_target_block / fc_target_unblock.
+       * Fix up code for scsi_block_requests / scsi_unblock_requests.
+       * Add NLP_FCP_TARGET for nodeinfo support.
+       * Move suspend/resume in lpfc_nlp_list under appropriate case -
+         Used host_lock for DPC to avoid race (remove dpc_lock)
+       * Fix some corner cases for PLOGI receive - simplify error case
+         for cmpl_reglogin_reglogin_issue.
+       * Bug fix for ppc64 EEH MMIO panic - always do readl after
+         writel's of HBA registers to force flush.
+       * Get rid of initial static routine declarations in lpfc_hbadisc.c
+         and lpfc_els.c.
+       * Updates to discovery processing.
+
+Changes from 20040823 to 20040908
+
+       * Changed version number to 8.0.11
+       * Removed persistent binding code.
+       * Display both ASC and ASCQ info.
+       * Fixed link down->up transitions when linkdown tmo expires. Fix
+         was in the defensive error checking at the start of
+         queuecommand.
+       * Removed lpfc_scsi_timeout_handler as this timer is no longer
+         required.  The midlayer will exhaust retries and then call
+         lpfc_abort_handler, lpfc_reset_lun_handler, and
+         lpfc_reset_target_handler.
+       * Minimal support for SCSI flat space addressing/volume set
+         addressing.  Use 16 bits of LUN address so that flat
+         addressing/VSA will work.
+       * Changed 2 occurences of if( 1 != f(x)) to if(f(x) != 1)
+       * Drop include of lpfc_cfgparm.h.
+       * Reduce stack usage of lpfc_fdmi_cmd in lpfc_ct.c.
+       * Add minimum range checking property to /sys write/store
+         functions.
+       * Fix display of node_name and port_name via fc transport
+         attr.
+       * Removed biosparam code.
+       * Removed range checking. phba->config[] array elements are now
+         embedded into the hba struct. lpfc_config_setup() has been
+         removed.
+       * Collapsed lpfc_scsi_cmd_start into lpfc_queuecommand and cleaned
+         up combined routines.
+       * Removed unused prototypes myprint and
+         lpfc_sched_service_high_priority_queue.
+       * Removed unused function lpfc_nodev.
+       * Removed scsi_cmnd->timeout_per_command cancelation. SCSI midlayer
+         now times out all commands - FW is instructed to not timeout.
+       * Removed polling code from lpfc_scsi_cmd_start. Reorganized
+         queuecommand and cmd_start some.
+
+Changes from 20040810 to 20040823
+
+       * Changed version number to 8.0.10
+       * Additional timer changes as per Arjan / Christoph's comments.
+       * Used mod_timer() instead of del_timer_sync() where appropriate.
+       * Fixed a use after free case (panic on 2.6.8.1 with
+         CONFIG_DEBUG_SLAB set).
+       * Fix compile warning in lpfc_fcp.c.
+       * Minor fix for log message, that prints unassigned brdno which is
+         zero.
+       * Move scsi_host_alloc() to the beginning of probe_one(). This
+         ensures that host_lock is available at later stages and also
+         avoids tons of unnecessary initializing if host_alloc()
+         fails.
+       * Removed else clause from lpfc_slave_configure that set
+         sdev->queue_depth.  The driver informs the midlayer of its
+         setting in the template and only overrides if queue tagging is
+         enabled.
+       * Added PCI_DEVICE_ID_ZEPHYR and PCI_DEVICE_ID_ZFLY (Junior
+         Zephyr) support
+
+Changes from 20040730 to 20040810
+       
+       * Changed version number to 8.0.9
+       * Removed per HBA driver lock.  Driver now uses the host->host_lock
+       * Restored support for the 2.6.5 kernel for those linux distributions
+         shipped with the 2.6.5 kernel.
+       * Applied patch from Christoph Hellwig (hch@infradead.org) as follows
+         "[PATCH] use scsi host private data in ->proc_info.  
+       * Applied patch from Christoph Hellwig (hch@infradead.org) as follows
+         "Re: [Emulex] Ready for next round.  This patch cleans up the memory 
+         allocation routines a little and fixes a missing mempool_destroy and
+         some missing error handling."
+       * Changed pointers assignments from 0 to NULL.
+       * Added fixes to the lpfc_reset_lun_handler and lpfc_reset_bus_handler
+         entry points that caused kernel to Oops or hang.
+       * Added fixes to targetless hosts that caused modprobe and insmod to hang.
+       * Ongoing cleanup to many files
+         
+Changes from 20040723 to 20040730
+
+       * Changed version number to 8.0.8
+       * Removed unused LPFN_DRIVER_VERSION #define.
+       * Folded lpfc_findnode_scsiid into lpfc_find_target, its only
+         caller.
+       * Removed 2 unneeded arguments to lpfc_find_target (lun and
+         create_flag).
+       * Make lpfc_sli_reset_on_init = 1
+       * Minor cleanup to quieten sparse.
+       * Removed missing function = 0 in tmo routine in lpfc_els.c.
+       * Moved additional binding parameters into lpfc_defaults.c:
+         lpfc_automap / lpfc_fcp_bind_method
+       * Use msecs_to_jiffies() where applicable.
+       * Only use queue depth attribute only after SLI HBA setup was
+         completed.
+       * Put in memory barriers for PPC
+       * Added PCI_DEVICE_ID_HELIOS and PCI_DEVICE_ID_JFLY (Junior
+         Helios) support
+       * Added 4&10 gigabit choices in user option link_speed
+       * Updated timer logic: Set timer data after init_timer use
+         timer_pending() instead of expires.
+       * Removed some remnants of IP over FC support from Kconfig and
+         Makefile.
+       * Remove redundant prototypes for lpfc_handle_eratt,
+         lpfc_handle_latt and lpfc_read_pci.
+       * Ongoing cleanup of lpfc_init.c.
+       * Changed LPFC_CFG_DFT_HBA_Q_DEPTH -> LPFC_CFG_HBA_Q_DEPTH.
+       * Another cleanup stab at lpfc_ct.c. Remove castings, structure
+         code sanely, remove redundant code, reorganize code so that
+         functions are invoked after definition.
+
+Changes from 20040716 to 20040723
+
+       * Changed version number to 8.0.7
+       * Cleanup of lpfc_ct.c. Removed number of casts, removed tons of
+         dead/redundant code, cleaned up badly and poorly written code,
+         cleaned up return values.
+       * Fixed Persistent binding implementation
+       * Removed all references to lpfc_scsi_req_tmo
+       * Removed last references to lun_skip config parameter.
+       * Removed LPFC_DEV_RPTLUN node failure bit because we don't issue
+         REPORT_LUNS from the driver anymore.
+       * Removed LUN-tracking in driver.  Removed lpfc_lun struct and
+         moved any functionality we still need to lpfc_target.
+       * Added new lpfc_jedec_to_ascii() call and replace two instances
+         of duplicate code with calls to this function.
+       * Removed Volume Set Addressing handling on LUN IDs.
+       * Applied patch from Christoph Hellwig (hch@infradead.org) that
+         removes dead code belonging to lpfc_build_scsi_cmnd() and its
+         call path. This is related to the recently removed report_lun
+         code.
+
+Changes from 20040709 to 20040716
+
+       * Changed version number to 8.0.6
+       * Removed internal report LUNs usage.  Removed functions:
+         lpfc_disc_issue_rptlun, lpfc_disc_cmpl_rptlun,
+         lpfc_disc_retry_rptlun and their use.
+       * Removed usused scheduler prototypes in lpfc_crtn.h
+       * Replace lpfc_geportname() with generic memcmp().
+       * Rearrange code in lpfc_rcv_plogi_plogi_issue() to make it a
+         little more readable.
+       * Remove redundant port_cmp != 2 check in if
+         (!port_cmp) { .... if (port_cmp != 2).... }
+       * Clock changes: removed struct clk_data and timerList.
+       * Clock changes: seperate nodev_tmo and els_retry_delay into 2
+         seperate timers and convert to 1 argument changed
+         LPFC_NODE_FARP_PEND_t to struct lpfc_node_farp_pend convert
+         ipfarp_tmo to 1 argument convert target struct tmofunc and
+         rtplunfunc to 1 argument * cr_count, cr_delay and
+         discovery_threads are only needed to be module_params and not
+         visible via sysfs.
+
+Changes from 20040614 to 20040709
+
+       * Changed version number to 8.0.5
+       * Make lpfc_info static.
+       * Make lpfc_get_scsi_buf static.
+       * Print a warning if pci_set_mwi returns an error.
+       * Changed SERV_PARM to struct serv_parm.
+       * Changed LS_RJT to struct ls_rjt.
+       * Changed CSP to struct csp.
+       * Changed CLASS_PARMS to struct class_parms.
+       * Some cosmetic coding style cleanups to lpfc_fcp.c.
+       * Providing a sysfs interface that dumps the last 32
+         LINK_[UP|DOWN] and RSCN events.
+       * Get rid of delay_iodone timer.
+       * Remove qfull timers and qfull logic.
+       * Convert mbox_tmo, nlp_xri_tmo to 1 argment clock handler
+       * Removed duplicate extern defs of the bind variables.
+       * Streamline usage of the defines CLASS2 and CLASS3, removing
+         un-necessary checks on config[LPFC_CFG_FCP_CLASS].
+       * Moving the persistent binding variables to new file
+         lpfc_defaults.c
+       * Changed LPFC_SCSI_BUF_t to struct lpfc_scsi_buf.
+       * Moved config specific code from probe_one() into
+         config_setup(). Removing a redundant check on scandown value
+         from bind_setup() as this is already done in config_setup().
+       * Changed LPFC_SLI_t to struct lpfc_sli.
+       * Changed FCP_CMND to struct fcp_cmnd.
+       * Changed FCP_RSP to struct fcp_rsp.
+       * Remove the need for buf_tmo.
+       * Changed ULP_BDE64 to struct ulp_bde64.
+       * Changed ULP_BDE to struct ulp_bde.
+       * Cleanup lpfc_os_return_scsi_cmd() and it's call path.
+       * Removed lpfc_no_device_delay.
+       * Consolidating lpfc_hba_put_event() into lpfc_put_event().
+       * Removed following attributes and their functionality:
+         lpfc_extra_io_tmo, lpfc_nodev_holdio, lpfc_delay_rsp_err,
+         lpfc_tgt_queue_depth and lpfc_check_cond_err.
+       * Clock changes consolidating timers, just in the struct lpfc_hba,
+         to get rid of clkData and pass only one argument to timeout
+         routine. Also, removing need for outstanding clock linked list
+         to stop these timers at rmmod.
+       * Move lpfc.conf contents into lpfc_fcp.c. Removing per adapter
+         attributes in favor of global attributes.
+       * Fix a potential null pointer reference of pmbuf in lpfc_ct.c.
+       * On reset_lun, issue LUN_RESET as opposed to ABORT_TASK_SET.
+       * Removed SCSI_REQ_TMO related code.
+       * Introducing two new defines LPFC_ATTR_R and LPFC_ATTR_RW that do
+         a module_param, MODULE_PARM_DESC, lpfc_param_show,
+         [lpfc_param_store] and CLASS_DEVICE_ATTRIBUTE.
+       * Properly clean up when allocation of a linked BDE fails in the
+         SCSI queuecommand path.
+       * Fail SCSI command if dma_map_sg call fails.
+       * Remove unused macros SWAP_ALWAYS and SWAP_ALWAYS16.
+       * Reset context2 to 0 on exit in
+         lpfc_sli_issue_iocb_wait_high_priority() and
+         lpfc_sli_issue_iocb_wait().
+       * Arranging lpfc_scsiport.c to follow style of use after
+         definition. This removes the need for the cruft of forward
+         declarations. Also removing a redundant #define ScsiResult as it
+         already available elsewhere.
+       * Applying "Streamline lpfc error handling" patch from Christoph
+         Hellwig (hch@infradead.org) with following modifications: fix
+         mem leaks, remove some misplaced code that need not be there,
+         print a message on exit (old code prints two (entry/exit)), make
+         ret values consistent (either 1/0 or SUCCESS/FAILURE), keep all
+         eh routines in a single file (lpfc_scsiport.c).
+       * Move contents of lpfc_module_param.h into lpfc_fcp.c.
+       * Changed sysfs attributes to CLASS_DEVICE_ATTRIBUTES (previously
+         DEVICE_ATTRIBUTES). They now appear in
+         /sys/class/scsi_host/hostx (previously in
+         /sys/bus/pci/drivers/lpfc/devx).
+       * Removed lpfc_syfs.h and lpfc_sysfs.c.
+       * Cleanup of config params.  Throttle params have been removed.
+         max_lun has been removed. max_target is replaced with a #define,
+         lun_skip is removed.  Remove ipfc config params and related
+         code.
+       * Changed DMABUF_t usage to struct lpfc_dmabuf.
+       * Downsizing iCfgParam structure to include a_string, a_low, a_hi
+         and a_default values only.
+       * Free SCSI buf safety memory pool on shutdown to eliminate memory
+         leak.
+       * Change lpfc_printf_log to a #define. Also include phba->brd_no
+         and newline in the print string rather than in the #define.
+       * Remove code that optionally locates Host Group Pointers in host
+         memory SLIM since this is no longer needed for PPC64, once
+         CONFIG_PORT uses HBA's view of its BAR0.
+       * Removed the forward declarations of the sli functions and
+         rearranging the code in lpfc_sli.c.
+       * Removed the preamble functionality from logging.
+       * Make lpfc_sli_hba_setup() return negative error codes on error
+         and correct the comment left over in lpfc_fcp.c
+       * Removed the lpfc_loadtime variable.
+       * Put a space between all ifs and their open parens '('.
+       * Change Studly_Caps LPFC_SCSI_BUF_t to struct lpfc_scsi_buf.
+       * Fixed insmod hang after hardware error.
+       * Relocated scsi_host alloc to before we enable the interrupt
+         handler
+       * Add .tmp_versions directory to Makefile clean target.  This
+         directory is created in the 2.6.5+ build process (with Red Hat
+         kernels at least).
+       * Changing phba->config to kmalloc lpfc_icfgparam and not
+         *phba->config. This is manifesting itself as a panic in
+         pci_release_region().
+       * Fix for firmware download / board reset problem.
+       * Integrated patch from Christoph Hellwig (hch@infradead.org) to
+         reorganize and cleanup lpfc_fcp.c
+       * Don't abort commands immediately when there is an RSCN event to
+         give driver time to rediscover targets before the midlayer
+         retries the SCSI commands.
+
+Changes from 20040604 to 20040614
+
+       * Changed version number to 8.0.4
+       * Removed lpfc_valid_lun function.
+       * Added scsi_buf safety pool to address scsi_buf failures in
+         queuecommand under low memory conditions.  Allocations now come
+         from kmalloc initially, but if kmalloc fails, the allocation
+         comes from the safety pool.
+       * Modified lpfc_slave_alloc to only set the scsi_device->hostdata
+         pointer if the driver has discovered the target.  This routine
+         always returns success now as well since no error ever occurs in
+         the alloc routine.
+       * Mask only info and warning messages.  Print all error messages
+         irrespective of mask.
+       * Removing lpfc_log_chk_msg_disabled()
+       * Changed lpfc_printf_log to take struct lpfc_hba * directly
+         instead of a "board number".
+       * Convert dma_sync_single to pci_dma_sync_single_for_{device/cpu}.
+       * Implemented new style log messages. The message strings are now
+         embedded in the call to lpfc_printf_log.
+       * Decreased FLOGI discovery timeout to 20 seconds.
+       * On error in lpfc_pci_probe_one() return -1 and not 1.
+       * Allow for board numbers that are not sequential, paving the way
+         for hotplug support.
+       * scsi_add_host() can fail, so wrap it around in an if(). Also
+         initiate scsi_scan_host() after attaching the sysfs attributes.
+       * lpfc_release_version is used only in lpfc_ct.c, so move it there
+         and mark it as static.
+       * Removed lpfc_sleep_ms and replaced with mdelay or schedule calls
+         directly
+       * Removed all (struct list_head *) casts from clkData-related list
+         handling in list_add, list_del macros.
+       * Removed EXPORT_SYMBOLs.
+       * Removed LPFC_MIN_QFULL and lpfc_qthrottle_up.
+       * Replace LPFCSCSITARGET_t with struct lpfc_target.
+       * Replace LPFCSCSILUN_t with struct lpfc_lun.
+       * Remove unused struct declarations (fcPathId and fcRouteId) from
+         lpfc_scsi.h.
+       * Rewrite use of FC transport attributes.
+       * Fix crash when link is lost.  This was due to lpfc_delay_iodone
+         calling list_del on an object that was never put on a list.
+       * Remove trailing spaces at the end of all lines.
+       * Set MAX_FCP_TARGET to 256 from 0xff.  Set MAX_FCP_LUN and
+         MAX_FCP_CMDS to their decimal equivalents and updated
+         documentation.
+
+Changes from 20040526 to 20040604
+
+       * Changed version number to 8.0.3
+       * Completed sysfs FC transport support.
+       * Removed unused fields in SCSI LUN and SCSI Target structures:
+         void *pTargetProto; void *pTargetOSEnv; void *pLunOSEnv;
+       * Modified list_for_each to list_for_each_entry. Modified
+         list_for_each_safe to list_for_each_entry_safe.
+       * Remove lpfc_dfc.h file.
+       * Changed pHba->phba, pCommand->pcmd
+       * Changed plogi_ndlp -> plogindlp, pos_tmp->postmp, pRsp->prsp,
+         pCmd->pcmd
+       * Changed pText -> ptext
+       * Changed p_tmp_buff -> ptmpbuff
+       * Changed pBufList -> pbuflist, pRsp -> prsp, pCmd -> pcmd
+       * Changed *pos_tmp -> *postmp, *p_mbuf -> *pmbuf
+       * Following changes are made to the SCSI fast path: Added
+         DMA_BUF_t member to the lpfc_scsi_buf_t.  This will reduce a
+         memory allocation in the scsi fast path.  Added check for
+         targetp == NULL in the scsi fast path.  Increased number of
+         scatter gather entries in lpfc_scsi_dma_ext to 4 from 3 and
+         changed the size of lpfc_scsi_dma_ext to 264
+       * Fixing some missing static lpfc_nportdisc.c.
+       * Reordered #include lines so that lpfc.h doesn't have to #include
+         other header files.
+       * Remove lpfc_get_hba_sym_node_name() as a global EXPORT and make
+         it static.
+       * Move struct clk_data definition from lpfc_hw.h to lpfc_sli.h.
+       * Changed LPFC_IOCBQ_t to struct lpfc_iocbq.
+       * Changed LPFC_SLI_RING_t to struct lpfc_sli_ring.
+       * Changed LPFC_NODELIST_t to struct lpfc_nodelist.
+       * Rearranged lpfc_nportdisc.c by moving state machine array
+         (lpfc_disc_action) and the one function that uses it,
+         lpfc_disc_state_machine, to the end of the file, removing the
+         need for the raft of prototypes at the top.
+       * Changed LPFC_BINDLIST_t to struct lpfc_bindlist.
+       * Removed lpfc_issue_ct_rsp(), lpfc_sleep(), lpfc_add_bind(),
+         lpfc_del_bind(), lpfc_sli_wake_mbox_wait() and
+         lpfc_sli_issue_mbox_wait().
+       * Fixed a large number of overly-long lines.
+       * Fixed some discovery problems: Introduced deferred ndlp removal
+         when in DSM to avoid panic when in nested DMSs Fix NportId
+         fffc01 handling to not relogin after LOGO fixed handling of LOGO
+         on PLOGI issue.
+       * Changed SLI_CT_REQUEST to lpfc_sli_ct_request.
+       * Changed NAME_TYPE to struct lpfc_name.
+       * Changed lpfcCfgParam_t to struct lpfc_cfgparam.
+       * Changed LPFC_STAT_t to struct lpfc_stats.
+       * Changed HBAEVT_t to struct lpfc_hba_event.
+       * Changed Studly_Caps lpfcHBA_t to struct lpfc_hba.
+       * Removed no longer used tasklet_running flag.
+       * Removing *PSOME_VAR typedefs and using SOME_VAR* directly.
+       * Changing .use_clustering to ENABLE_CLUSTERING.
+       * Modify lpfc_queuecommand to return SCSI_MLQUEUE_HOST_BUSY when
+         it can't queue a SCSI command.  Also, remove cmnds_in_flight
+         member of struct lpfcHBA for 2.6 kernels as it was only needed
+         to determine what to return from queuecommand.
+       * Change return type of lpfc_evt_iocb_free to void as it doesn't
+         return anything.
+       * Remove unused cmnd_retry_list and in_retry members in struct
+         lpfcHBA.
+       * Remove some instances of unneeded casting of kmalloc's return in
+         lpfc_scsiport.c
+       * Remove lpfc_linux_attach() and lpfc_linux_detach(). Integrate
+         them into lpfc_probe_one() and lpfc_release_one() respectively.
+       * Remove lpfc_num_iocbs, lpfc_num_bufs module parameters
+       * Remove #defines for NUM_NODES, NUM_BUFS and NUM_IOCBS
+
+Changes from 20040515 to 20040526
+
+       * Changing version number to 8.0.2.
+       * Including dma-mapping.h as one of the include headers.  Also
+         rearrange the #include order.
+       * Make functions static as appropriate.
+       * queuecommand() will now return SCSI_MLQUEUE_HOST_BUSY instead of
+         1 to backpressure midlayer.
+       * Removed function prototypes for lpfc_start_timer() and
+         lpfc_stop_timer()
+       * Changed timer support to be inline.  Clk_data is now declared
+         right next to the corresponding timer_list entry so we don't
+         have to allocate these clk_data dynamically.
+       * Add readls after writels to PCI space to flush the writes.
+       * Fix misspelled word "safety" in function names.
+       * Fix up comments in lpfc.conf for per HBA parameters to reflect
+         new implementation.
+       * Change lpfc_proc_info handler to get the Nodename from
+         fc_nodename and not fc_portname.
+       * Fix up some comments and whitespace in lpfc_fcp.c.
+       * Formatting changes: get rid of leading spaces in code
+       * Move discovery processing from tasklet to a kernel thread.
+       * Move ndlp node from unmap list to map list if ADISC completed
+         successfully.
+       * Flush all the ELS IOCBs when there is a link event.
+       * LP9802 qdepth is twice the LP9802DC qdepth.  Delay
+         elx_sched_init after READ_CONFIG to get max_xri from the
+         firmware.  Reset ELX_CFG_DFT_HBA_Q_DEPTH to max_xri after
+         READ_CONFIG
+       * Fix fc_get_cfg_parm() to be more robust and support embedded hex
+         values.  The lpfc_param's are now defined as:
+         lpfc_log_verbose="lpfc:0,lpfc0:0x10,lpfc1:4,lpfc100:0xffff" The
+         "," delimter does not matter. It can be anything or not exist at
+         all. ie param = "lpfc:0lpfc0:0x10.lpfc1:4txtlpfc100:0xffff" will
+         also work.  Additionally the string is treated as case
+         insensitive.
+       * Changed all usage of lpfc_find_lun_device() to lpfc_find_lun().
+       * Removed unnecessary wrappers lpfc_find_lun_device() and
+         lpfc_tran_find_lun().
+       * Switch from using internal bus/id/lun to similar data from
+         scsi_device structure.
+       * Eliminate one-line function lpfc_find_target()
+       * Added slave_alloc, slave_destory
+       * lpfc_scsi_cmd_start can now acquire lun pointer from
+         scsi_device->hostdata, which is setup in slave_alloc.
+       * Eliminate unnecessary checking on every cmd just to see if we
+         are accessing the device the first time.
+       * Remove assumption in lpfc_reset_lun_handler that a valid
+         lpfc_scsi_buf is hung off of linux's scsi_cmnd->host_scribble
+         when our reset is called.
+
+Changes from 20040507 to 20040515
+
+       * Changed version to 8.0.1
+       * Fixed crash on driver rmmod after error injection tests and
+         lpfc_tasklet deadlock.
+       * Modified lpfc.conf to remove limit on number of support hosts
+       * Removed HBAAPI 
+       * Removed duplication of SCSI opcodes from lpfc_fcp.h that are
+         available in scsi/scsi.h
+       * Rework module_param usage
+       * Added MODULE_PARAM_DESC for various module_params
+       * Removed #define EXPORT_SYMTAB
+       * Removed #includes of if_arp.h and rtnetlink.h
+       * Removed string "Open Source" from MODULE_DESC
+       * Cleanup duplicated string definitions used by MODULE_DESC
+       * Renamed lpfc_pci_[detect|release] to lpfc_pci_[probe|remove]_one
+       * Fix formatting of lpfc_driver
+       * Remove unnecessary memset to 0 of lpfcDRVR
+       * Attach driver attributes always unless pci_module_init failed
+       * Remove all one-line wrappers from lpfc_mem.
+       * Remove lpfc_sysfs_set_[show|store] as it is no longer needed
+       * Redo lpfc_sysfs_params_[show|store] to one value per attribute rule
+       * Breakdown lpfc_sysfs_info_show into smaller one value per attribute
+       * Use device attributes instead of driver attributes where appropriate
+       * Remove no longer needed EXPORT_SYMBOLs
+       * Remove some unused code (1600 msg's related)
+
+Changes from 20040429 to 20040507
+
+       * Change version to 8.0.0
+       * Fix the number of cmd / rsp ring entries in lpfc_fcp.c to match
+         the divisions setup in lpfc_hw.h.
+       * Remove phba->iflag reference.
+       * Several locking improvements.
+       * Remove functions lpfc_drvr_init_lock, lpfc_drvr_lock,
+         lpfc_drvr_unlock and lpfc_hipri_*.
+       * Remove LPFC_DRVR_LOCK and LPFC_DRVR_UNLOCK macros.
+       * Make lpfc_info() use lpfc_get_hba_model_desc() instead of
+         rewriting almost identical code.
+       * Fix 1 overly long line in each of lpfc_cfgparm.h, lpfc_ftp.c and
+         lpfc_sli.c.
+       * Fix build for Red Hat 2.6.3 kernel by #defining MODULE_VERSION
+         only if it isn't already defined.
+       * Change elx_sli_issue_mbox_wait to return correct error code to
+         the caller.
+       * In some of the els completion routines, after calling
+         lpfc_elx_chk_latt, driver ignores the return code of the
+         lpfc_elx_chk_latt. This will prevent the discovery state machine
+         restarting correctly when there are link events in the middle of
+         discovery state machine running. Fix this by exiting discovery
+         state machine if lpfc_els_chk_latt returns a non zero value.
+       * Removed MAX_LPFC_BRDS from lpfc_diag.h
+       * Removed unused first_check.
+       * Remove some unused fields and defines.
+       * Change lpfc-param names to lpfc_param.
+       * Add use of MODULE_VERSION macro for 2.6 kernels.
+       * Shorten length of some of the comment lines to make them more
+         readable.
+       * Move FCP_* definitions to their own header file, lpfc_fcp.h.
+       * Remove unused prototypes from lpfc_crtn.h: fcptst, iptst,
+         lpfc_DELAYMS.
+       * Remove duplicated prototypes from lpfc_crtn.h:
+         lpfc_config_port_prep, lpfc_config_port_post,
+         lpfc_hba_down_prep.
+       * Removed some unused export_symbols.
+       * Install driver files into */drivers/scsi/lpfc instead of
+         */drivers/scsi.
+
+Changes from 20040426 to 20040429
+
+       * Declared export symbol lpfc_page_alloc and lpfc_page_free
+       * Changed lpfc version number to 6.98.3
+       * Move the definition of MAX_LPFC_BRDS to the only header file
+         that uses it (lpfc_diag.h).
+       * Change lpfc_sli_wake_iocb_wait to do a regular wake_up since
+         lpfc_sli_issue_iocb_wait now sleeps uninterruptible.
+       * Replace list_for_each() with list_for_each_safe() when a list
+         element could be deleted.
+       * Fix IOCB memory leak
+
+Changes from 20040416 to 20040426
+
+       * Change lpfc_config_port_prep() to interpret word 4 of the DUMP
+         mbox response as a byte-count
+       * Add info attribute to sysfs
+       * Minor formatting (spaces to tabs) cleanup in lpfc_sched.h
+       * Remove unused log message number 732
+       * Completing MODULE_PARM -> module_param changes
+       * Removed unused targetenable module parameter
+       * Removed locks from lpfc_sli_issue_mbox_wait routine
+       * Removed code that retry 29,00 check condition
+       * Removed code that manipulates rspSnsLen.
+       * Fix use of lun-q-depth config param
+       * Fix severity inconsistency with log message 249
+       * Removed lpfc_max_target from lpfc_linux_attach
+       * Replace references to lpfcDRVR.pHba[] with lpfc_get_phba_by_inst()
+       * Change lpfc_param to lpfc-param
+       * Partially removed 32 HBA restriction within driver.  Incorported
+         lpfc_instcnt, lpfc_instance[], and pHba[] into lpfcDRVR
+         structure Added routines lpfc_get_phba_by_inst()
+         lpfc_get_inst_by_phba() lpfc_check_valid_phba()
+       * Turn on attributes "set" & "params" by default.
+       * Further formatting/whitespace/line length cleanup on: lpfc_ct.c
+         lpfc_els.c lpfc_fcp.c lpfc_hbadisc.c lpfc_init.c lpfc_ipport.c
+         lpfc_mbox.c lpfc_nportdisc.c lpfc_sched.c lpfc_sched.h
+         lpfc_scsi.h lpfc_scsiport.c lpfc_sli.c and lpfc_sli.h
+       * Add log message 249 to log any unsupported device addressing
+         modes encountered.
+       * Add support for 256 targets and 256 LUNs
+       * Fixed panic in lpfc_linkdown.
+       * Removed (struct list_head*) casting in several calls to list_del
+       * Free irq reservation and kill running timers when insmod or
+         modprobe are killed via ctrl-c
+       * Remove drivers/scsi from include path
+       * Wrap use of log message 311 in macro
+       * Detect failure return from pci_map_sg call in lpfc_os_prep_io
+       * Fix use-after-free of IOCB in lpfc_sli_process_sol_iocb which
+         was causing an Oops on 2.6.5 kernel.
+       * Cleanup use of several gotos not used for error exit.
+       * Replace memcpy_toio() and memcpy_toio() with endian-dependent
+         lpfc_memcpy_to_slim() and lpfc_memcpy_from_slim() so that for
+         big endian hosts like PPC64, the SLIM is accessed 4 bytes at a
+         time instead of as a byte-stream.
+
+Changes from 20040409 to 20040416
+
+       * The scsi_register and scsi_alloc_host OS calls can fail and
+         return a zero-valued host pointer.  A ctrl-C on 2.6 kernels
+         during driver load will cause this and the driver to panic.
+         Fixed this bug.  Also found a bug in the error_x handling with
+         lpfc_sli_hba_down - it was in the wrong place and the driver
+         lock was not held, but needed to be (in lpfc_linux_attach) Fixed
+         both.  Did some minor comment clean up.
+       * Removed unwanted (void *) castings.
+       * Replace define of INVALID_PHYS, with kernel 2.6.5's
+         dma_mapping_error() and add a inline function for earlier
+         kernels.  Remove lpfc_bad_scatterlist().
+       * Clean up formatting in hbaapi.h, lpfc.h, lpfc_cfgparm.h,
+         lpfc_crtn.h, lpfc_ct.c, lpfc_diag.h, lpfc_disc.h, lpfc_els.c,
+         lpfc_fcp.c, lpfc_hbadisc.c, lpfc_hw.h, lpfc_init.c,
+         lpfc_ipport.c, lpfc_logmsg.c, lpfc_logmsg.h and lpfc_scsiport.c
+         - mostly replacing groups of 8 spaces with hard tabs and keeping
+         lines to 80 column max..
+       * Removed LPFC_DRVR_LOCK call from lpfc_unblock_requests for 2.4
+         kernels.  The lpfc_scsi_done routine already unlocks the driver
+         lock since it expects this lock to be held.
+       * Removed global lock capabilities from driver lock routines
+       * Remove SA_INTERRUPT flag from request_irq
+       * Move dma_addr_t cast inside of getPaddr macro as everywhere
+         getPaddr is used, the return is cast to dma_addr_t.
+       * Clean up formatting in lpfc_sli.c and lpfc_sysfs.c - mostly
+          replacing groups of 8 spaces with hard tabs and keeping lines
+          to 80 column max.
+       * Fix build for RHEL 2.1 BOOT kernels by always #including
+         interrupt.h in lpfc.h.
+       * Fix RHEL 3 build by #defining EXPORT_SYMTAB.
+       * Replace sprintf with snprintf in lpfc_proc_info.
+       * Fix build warnings on 2.6 kernels - remove no longer used calls
+         to  character device initialization.
+       * Initial support code for discovery in tasklet conversion.
+       * Removing char interface and ioctl code.
+       * Change all elx prefixes to lpfc
+       * Replace lpfc_write_slim() & lpfc_read_slim() with memcpy_toio(),
+         memcpy_fromio(), writel() & readl().
+
+Changes from 20040402 to 20040409
+
+       * Replaced lpfc_read_hbaregs_plus_offset and
+         lpfc_write_hbaregs_plus_offset functions with readl and writel.
+       * Get rid of long mdelay's in insmod path
+       * Changed the way our pci_device_id structures are initialized
+       * Replace lpfc_read/write_CA/HA/HC/HS with calls to readl() &
+         writel() directly.
+       * Increase SLI2_SLIM to 16K Increase cmd / rsp IOCBs accordingly
+       * Removed lpfc_els_chk_latt from the lpfc_config_post function.
+         lpfc_els_chk_latt will enable the link event interrupts when
+         flogi is pending which causes two discovery state machines
+         running parallely.
+       * Add pci_disable_device to unload path.
+       * Move lpfc_sleep_event from lpfc_fcp.c to lpfc_util_ioctl.c
+       * Call dma_map_single() & pci_map_single() directly instead of via
+         macro lpfc_pci_map().  Allow address 0 for PPC64.
+       * Change sleep to uninterruptible in lpfc_sli_issue_icob_wait
+         because this function doesn't handle signals.
+       * Move lpfc_wakeup_event from lpfc_fcp.c to lpfc_ioctl.c
+       * Remove unneeded #include <linux/netdevice.h>
+       * Remove unused clock variables lpfc_clkCnt and lpfc_sec_clk.
+       * Get rid of capitalization of function names.
+       * Removed lpfc_addr_sprintf.
+       * Implemented gotos in lpfc_linux_attach for error cases.
+       * Replace mlist->dma.list = dmp->dma.list; to mlist = dmp.
+       * Remove functions lpfc_get_OsNameVersion and elx_wakeup. Change
+         elx_wakeup to wake_up_interruptible
+       * Add function lpfc_get_os_nameversion and change
+         lpfc_get_OsNameVersion to lpfc_get_os_nameversion.
+       * Remove lpfc_get_OsNameVersion
+       * Change driver name to a consistent lpfc in every visible place.
+       * Fix build warning: removed unused variable ret in lpfc_fdmi_tmo.
+       * Remove lpfc_utsname_nodename_check function
+       * Remove functions lpfc_register_intr and lpfc_unregister_intr
+       * Fill in owner field in lpfc_ops file_operations struct and
+         remove now unnecessary open and close entry points.
+       * Change function name prefixes from elx_ to lpfc_
+       * Remove special case check for TUR in elx_os_prep_io()
+       * Renamed elx_scsi.h to lpfc_scsi.h
+       * Renamed elx_sched.h to lpfc_sched.h
+       * Renamed elx_mem.h to lpfc_mem.h
+       * Renamed elx_sli.h to lpfc_sli.h
+       * Renamed elx_logmsg.h to lpfc_logmsg.h
+       * Renamed elx.h to lpfc.h
+       * Renamed elx_sli.c to lpfc_sli.c
+       * Renamed elx_sched.c to lpfc_sched.c
+       * Renamed elx_mem.c to lpfc_mem.c
+       * Renamed elx_logmsg.c to lpfc_logmsg.c
+       * Renamed lpfcLINUXfcp.c lpfc_fcp.c
+       * Renamed elx_clock.c to lpfc_clock.c
+       * Reduce stack usage in lpfc_info().
+       * Move lpip_stats structure from lpfc_hba.h to lpfc_ip.h.
+       * Move lpfc_stats and HBAEVT_t structures from lpfc_hba.h to
+         lpfc.h
+       * Remove lpfc_hba.h
+       * Remove duplicate rc definitions from 
+       * Removed code which used next pointer to store mbox structure.
+       * Cleaned up list iterations.
+       * Removed non list manipulation of the next pointers.
+       * Change list_del()/INIT_LIST_HEAD sequences to list_del_init()
+       * In ELX_IOCBQ_t: Moved hipri_trigger field to iocb_flag. Combined
+         hipri_wait_queue and rsp_iocb in union
+       * Replaced casting from list_head with list_entry macro.
+       * Added ct_ndlp_context field to the ELX_IOCBQ_t.
+       * Do not use DMABUf_t list to store ndlp context
+       * Return 0 from lpfc_process_iotcl_util() when ELX_INITBRDS
+         succeeds.
+       * remove elx_os_scsiport.h
+       * Do not use DMABUf_t list to hold rpi context
+       * Replace elx_cfg_* names with lpfc_cfg-*
+       * Moved FCP activity to ring 0.  Moved ELS/CT activity to ring 2.
+       * Clean up formatting of elx_sli.h (tabs for indents, 80 column
+         lines).
+       * Remove unused elxclock declaration in elx_sli.h.
+       * Since everywhere IOCB_ENTRY is used, the return value is cast,
+         move the cast into the macro.
+       * Split ioctls out into seperate files
+
+Changes from 20040326 to 20040402
+
+       * Updated ChangeLog for 20040402 SourceForge drop.
+       * Use safe list iterator for ndlp list
+       * Added code to return NLP_STE_FREED_NODE from the discovery
+         state machine functions if the node is freed from the
+         function.
+       * Fixes to DMABUF_t handling
+       * Fix for load error in discovery
+       * Remove loop_cnt variable from lpfc_rcv_plogi_unused_node.
+       * Remove nle. reference.
+        * Remove support for building 2.4 drivers
+       * Remove elx_util.h and replace elx_disc.h with lpfc_disc.h
+       * Implemented the Linux list macros in the discovery code.
+         Also moved elx_disc.h contents into lpfc_disc.h
+       * Unused variable cleanup
+       * Use Linux list macros for DMABUF_t
+       * Break up ioctls into 3 sections, dfc, util, hbaapi
+         rearranged code so this could be easily seperated into a
+         differnet module later All 3 are currently turned on by
+         defines in lpfc_ioctl.c LPFC_DFC_IOCTL, LPFC_UTIL_IOCTL,
+         LPFC_HBAAPI_IOCTL
+       * Misc cleanup: some goto's; add comments; clarify function
+         args
+       * Added code to use list macro for ELXSCSITARGET_t.
+       * New list implementation for ELX_MBOXQ_t
+       * Cleaned up some list_head casting.
+       * Put IPFC ifdef around two members of struct lpfc_nodelist.
+       * Cleaned up iocb list using list macros and list_head data
+         structure.
+       * lpfc_online() was missing some timer routines that were
+         started by lpfc_linux_attach().  These routines are now also
+         started by lpfc_online().  lpfc_offline() only stopped
+         els_timeout routine.  It now stops all timeout routines
+         associated with that hba.
+       * Replace seperate next and prev pointers in struct
+         lpfc_bindlist with list_head type.  In elxHBA_t, replace
+         fc_nlpbind_start and _end with fc_nlpbind_list and use
+         list_head macros to access it.
+       * Fix ulpStatus for aborting I/Os overlaps with newer firmware
+         ulpStatus values
+       * Rework params_show/store to be consistent as the other
+         routines.  Remove generic'ness and rely on set attribute.
+       * Remove unused log message.
+       * Collapse elx_crtn.h and prod_crtn.h into lpfc_crtn.h
+       * Ifdef Scheduler specific routines
+       * Removed following ununsed ioclt's: ELX_READ_IOCB
+         ELX_READ_MEMSEG ELX_READ_BINFO ELX_READ_EINVAL ELX_READ_LHBA
+         ELX_READ_LXHBA ELX_SET ELX_DBG LPFC_TRACE 
+       * Removed variable fc_dbg_flg
+       * Fixed a bug where HBA_Q_DEPTH was set incorrectly for
+         3-digit HBAs.  Also changed can_queue so midlayer will only
+         send (HBA_Q_DEPTH - 10) cmds.
+       * Clean up code in the error path, check condition.  Remove
+         ununsed sense-related fields in lun structure.
+       * Added code for safety pools for following objects: mbuf/bpl,
+         mbox, iocb, ndlp, bind
+       * Wrapped '#include <elx_sched.h>' in '#ifdef USE_SCHEDULER'.
+       * Fixed 'make clean' target.
+        * Build now ignores elx_sched.o, and includes lpfc_sysfs.o.
+       * Wrapped lpfndd.o target in BUILD_IPFC ifdef.
+       * Removed elx_os.h inclusion in implementation files.
+       * Removed ELX_OS_IO_t data structure and put data direction
+         and non scatter/gather physical address into the scsi buffer
+         structure directly.  Moved DRVR_LOCK, putPaddr, getPaddr
+         macros and some defines into elx.h since they are required
+         by the whole driver.
+       * Migrated following ioctls (debug) ELX_DISPLAY_PCI_ALL
+         ELX_DEVP ELX_READ_BPLIST ELX_RESET_QDEPTH ELX_STAT.
+       * Step 1 of attempt to move all Debug ioctls to sysfs.
+         Implemented the following IOCTLs in sysfs: ELX_WRITE_HC
+         ELX_WRITE_HS ELX_WRITE_HA ELX_WRITE_CA ELX_READ_HC
+         ELX_READ_HS ELX_READ_HA ELX_READ_CA ELX_READ_MB ELX_RESET
+         ELX_READ_HBA ELX_INSTANCE ELX_LIP.  Also introduced
+         attribute "set" to be used in conjuction with the above
+         attributes.
+       * Removed DLINK, enque and deque declarations now that clock
+         doesn't use them anymore
+       * Separated install rule so that BUILD_IPFC has to be set when
+         make is called in order for the install rule to attempt to
+         copy the lpfndd.o driver.  This change fixes a bug that
+         occurs because the install rule by default attempted to
+         install lpfndd.o, whereas the default make rule did not by
+         default build lpfndd.o.
+       * Keep track if hbaapi index numbers need to be refreshed.
+       * Removed prod_os.h from include list.
+       * Removed LPFC_LOCK and LPFC_UNLOCK macros.  Added OS calls
+         into elx_os_scsiport.c.  This file is now empty.
+       * Added spin_lock_irqsave and spin_unlock_irqrestore calls
+         into code directly and removed LPFC_LOCK_ and _UNLOCK_
+         macros
+       * Remove references to "elx_clock.h"
+       * Added utsname.h to include list.  The previous checkin to
+         elx_os.h removed its inclusion of utsname.h since there is
+         precious little in the file.  However, lpfcLINUXfcp.c needs
+         it and now has it.
+       * Removed some commented-out code
+       * Removed elx_lck_t data structure, stray elxDRVR_t type, and
+         include from file.  No longer used.
+       * Removed two PCI Sync defines.  Removed includes - not
+         needed.  Cleaned up macro lines.
+       * Added two functions from elxLINUXfcp.c.  These functions
+         were IPFC specific.
+       * Removed hipri lock abstractions and added OS call into code.
+         Removed elx_lck_t and added spinlock_t directly. Moved two
+         IPFC functions into lpfc_ipport.c
+       * Moved IP specific structures to lpfc_ip.h file.
+       * lpfc_ipfarp_timeout() uses system timer.  Remove all usages
+         of old internal clock support.
+       * Made changes to compile without IPFC support for the default
+         build.  Added ifdef IPFC for all lpfc_ip.h includes.
+       * Patched elx_free_scsi_buf
+       * Removed elx_sched.o from 2.6 dependencies
+       * Reworked lpfc_pcimap.
+       * Use Linux swap macros to replace ELX swapping macros
+         (SWAP_SHORT, SWAP_LONG, SWAP_DATA, SWAP_DATA16,
+         PCIMEM_SHORT, PCIMEM_LONG, PCIMEM_DATA).
+       * move in_interrupt() check inside of elx_sleep_ms()
+       * Moved location of pci.h include.
+       * Restored elx_lck_t types in elxHBA_t.
+       * Removed elx_pci_dma_sync call.  Also removed some PCI
+         defines from elx_hw.h and removed the spinlock_t locks that
+         are no longer used in elx.h
+       * elx_iodone() now uses system timer.
+       * elx_qfull_retry() now uses system timer.
+       * lpfc_put_buf(), lpfc_ip_xri_timeout() and
+         lpfc_ip_timeout_handler() now use system timer.
+       * lpfc_fdmi_tmo() and lpfc_qthrottle_up() now use system
+          timer.
+       * Removed num_bufs and num_iocbs configuration parameters.
+       * Fixed a memory corruption bug. This was caused by a memory
+         write to ndlp structure from lpfc_cmpl_els_acc function.
+         This ndlp structure was freed from lpfc_els_unsol_event.
+       * lpfc_disc_timeout() and lpfc_establish_link_tmo() now use
+         system timer.  Also update lpfc_els_retry_delay() to do a
+         single lock release at the end.
+       * Remove use of PAN (pseudo adapter number).
+       * Reintroduced usage of the cross compiler for building on
+         ppc64 to remove build errors that were cropping up when
+         using the standard gcc compiler.
+       * Fix no-unlock-before return in lpfc_els_retry_delay which was
+         causing  a deadlock on insmod in some environments.
+       * Minor format changes fix up comments
+       * Create utility clock function elx_start_timer() and
+         elx_stop_timer().  All timeout routines now use these common
+         routines.
+       * Minor formating changes fix up comments
+       * Minor formatting changes get rid of failover defines for
+         syntax checking
+       * Minor formatting changes remove ISCSI defines.
+       * Fix typo in install target for 2.4 kernels.
+       * Removed unused elx_scsi_add_timer extern function
+         declaration.
+       * Cleanup casting around DMA masks.
+       * Comment out lpfndd.o modules_install section as lpfndd.o is
+         not generated if CONFIG_NET_LPFC is not set. Also refer to
+         BASEINCLUDE only in out of kernel source module builds as it
+         will not exist otherwise.
+       * Removed unused malloc counters from lpfcLINUXfcp.c.
+       * Remove some unnecessary #includes in lpfcLINUXfcp.c
+       * Remove unncessary #includes in elxLINUXfcp.c
+       * Minor formatting cleanups in Makefile to avoid some
+          linewrapping.
+       * Removed unused elx_mem_pool data structure.
+       * Remove several unnecessary #includes.
+       * Moving fix for memory leak in ioctl lip area to sysfs's lip.
+       * Removed unused elx_dma_handle_t elx_acc_handle_t
+         FC_MAX_SEGSZ and FC_MAX_POOL.
+       * Rewrite of Makefile. Fixes breakages with make -j4 during
+         kernel compile. Does not recompile all files on every
+         build. Uses the kernel build's definitions of CFLAGS,
+         MODFLAGS etc. Removed "make rpm" option.
+       * Removed unused #defines CLOSED, DEAD, OPENED, NORMAL_OPEN
+         and unneeded #include of elx_sched.h in elx.h.
+       * Several log message updates
+       * Add PCI_DEVICE_ID_FIREFLY for LP6000
+       * Fixed known issues in 20040326: driver crashes on rmmod in
+         both 2.4 and 2.6 kernels
+       
+
+Changes from 20040319 to 20040326
+
+       * Updated ChangeLog for 20040326 SourceForge drop.
+       * remove lpfc_isr / lpfc_tmr logic fixed up 8 spaces from
+         previous checkins with tabs
+       * replace elx_in_intr() with in_interrupt()
+       * Remove unused messages 1602 and 1603.
+       * Fix the following issues with log messages: Remove unused
+         messages 406, 407, 409, 927, 928, 1201, 1202, 1204, 1205, 1206
+         and 1207.  Create a new message 738 to fix duplicate instances
+         of 736.
+       * Removed remaining pci interface abstractions from elxLINUXfcp.c.
+         Implemented OS calls directly in all remaining files and cleaned
+         up modules.  Removed prototypes as well.
+       * Removed following functions/structures elx_mem_dmapool
+         elx_idx_dmapool elx_size_dmapool elx_kmem_lock dfc_data_alloc
+         dfc_data_free dfc_mem struct mbuf_info elx_acc_handle_t
+         data_handle elx_dma_handle_t dma_handle struct elx_memseg
+         MEMSEG_t
+       * lpfc_els_timeout_handler() now uses system timer.
+       * Further cleanup of #ifdef powerpc
+       * lpfc_scsi_timeout_handler() now uses system timer.
+       * Replace common driver's own defines for endianess w/ Linux's
+         __BIG_ENDIAN etc.
+       * Added #ifdef IPFC for all IPFC specific code.
+       * lpfc_disc_retry_rptlun() now uses system timer.
+       * lpfc_npr_timeout() now uses system timer.
+       * Modified detect code, on insmod, to only wait a max of 2 secs if
+         link comes up and there are no devices.
+       * Move remaining message logging functions into
+         elx_logmsg.c/elx_logmsg.h.
+       * Added code to clear link attention bit when there is a pending
+         link event and the memory allocation for read_la mail box
+         command fails.
+       * Removed function calls for mapping bar registers and allocating
+         kernel virtual memory mappings to the mapped bars Removed
+         prototypes, lpfc_driver_cache_line, and pci_bar1_map rename to
+         pci_bar2_map.
+       * Allocate mbox only if the hba_state is in ready state.
+       * Complete lip support via sysfs. To lip, echo brdnum >
+         /sys/bus/pci/drivers/lpfc/lip.
+       * moving sysfs show/store implementations to lpfc_sysfs.c. Also add
+         support for lip.
+       * Add files: lpfc_sysfs.c, lpfc_sysfs.h
+       * move LPFC_DRIVER_NAME and LPFC_MODULE_DESC out of lpfcLINUXfcp.c
+         to lpfc_version.h, since it is now needed in lpfc_sysfs.c
+       * elx_mbox_timeout now uses system timer
+       * Changed lpfc_nodev_timeout, lpfc_els_retry_delay and
+         lpfc_linkdown_timeout to use the system timer instead of
+         internal clock support.
+       * Move remaining message logging functions in elx_util.c to
+         elx_logmsg.c.
+       * Remove some unnecessary typecasting.
+       * Remove log message that is no longer used (was used by
+         elx_str_atox).
+       * Replaced DLINK_t and SLINK_t by standard Linux list_head
+       * Removed deque macro
+       * Replaced ELX_DLINK_t ans ELX_SLINK_t by Linux struct list_head
+         (except for clock)
+       * Removed following functions from code: linux_kmalloc linux_kfree
+         elx_alloc_bigbuf elx_free_bigbuf
+       * Removed following abstract functions from the code.  elx_malloc
+         elx_free elx_ip_get_rcv_buf elx_ip_free_rcv_buf
+         elx_mem_alloc_dmabuf elx_mem_alloc_dmabufext elx_mem_alloc_dma
+         elx_mem_alloc_buf lpfc_bufmap
+       * Removed custom PCI configuration #defines and replaced with
+         OS-provided #defines. Also added linux/pci.h to *.c files.
+       * Remove elx_str_ctox.  Replace elx_str_atox with sscanf.
+       * Many indentation/whitespace fixes.
+       * Replace elx_str_ctox with isxdigit where it was only used to
+         check the value of a character.
+       * Removed following functions from the code.  elx_kmem_free
+         elx_kmem_alloc elx_kmem_zalloc
+       * Change use of 2.4 SCSI typedef Scsi_Host_Template to  struct
+         scsi_host_template for 2.6 kernels.
+       * Change use of 2.4 SCSI typedefs (Scsi_Device, Scsi_Cmnd,
+         Scsi_Request) the their real struct names.
+       * Move 2.6 compatibility irqreturn definitions to lpfc_compat.h.
+         Protect these definitions from conflicting with similar ones in
+         later 2.4 kernels.
+       * Remove unused definitions: LINUX_TGT_t, LINUX_LUN_t,
+         LINUX_BUF_t, elx_lun_t, SET_ADAPTER_STATUS.
+       * Convert pci_ calls to linux 2.6 dma_ equivalents.
+       * Removed unused types: struct buf, struct sc_buf, T_SCSIBUF
+         typedef.
+       * Fix Makefile so that 2.4 drivers don't always rebuild all files.
+       * Remove unused _static_ and fc_lun_t definitions.
+       * Cleaned up some memory pool implementation code.
+       * Fix panic with char dev changes. Turns out that 2.6.4 code does
+         the same in kernel space with the 2.4 interface style
+         definitions. So remove the new char dev code altogether.
+       * Remove typecasting from fc_get_cfg_param and consolidate
+         multiple instances of the parameter switch into a single
+         instance.
+       * Use lpfc_is_LC_HBA() macro that tests pcidev->device directly
+         instead of saving a private copy that undergoes varied shifting
+         & casting.
+       * Removed usage of all memory pools.
+
+Changes from 20040312 to 20040319
+       
+       * Use dev_warn instead of printk for 2.6 kernels
+       * Correct Iocbq completion routine for 2.6 kernel case
+       * Change void *pOSCmd to Scsi_Smnd *pCmd
+       * Change void *pOScmd to struct sk_buff *pCmd
+       * Remove data directon code.
+       * Removed memory pool for buf/bpl buffers and use kmalloc/kfree
+         pci_pool_alloc/free directly.
+       * Move PPC check for DMA address 0 in scatter-gather list, into
+         lpfc_compat.h
+       * Always use pci_unmap_single() instead of pci_unmap_page()
+       * Clean up the 2.6 vs 2.4 #if blocks.
+       * Conditionalize Scheduler
+       * Add a comment to explain a little what the first Makefile
+         section does.
+       * Removed lpfc_intr_post
+       * Sysfs new display format. Also added write functionality. You
+         can [ echo "0 log_verbose 3" >
+         /sys/bus/pci/drivers/lpfc/params]. Hex support yet to be added.
+       * Removed several #ifdef powerpc, including for a discovery issue
+         in lpfc_ValidLun()
+       * Change elx_printf_log to use vsprintf.
+       * Added lpfc_compat.h provides macros to aid compilation in the
+         Linux 2.4 kernel over various platform architectures.  Initially
+         support mapping to a DMA address.
+       * Removed memory pool for nlp/bind buffers and use kmalloc/kfree
+         directly.
+       * Removed memory pool for iocb buffers and use kmalloc/kfree
+         directly.
+       * Removed memory pool for mailbox buffers and use kmalloc/kfree
+         directly.
+       * Cleaned up back and forth casts
+       * Initial support for sysfs for 2.6 kernel.
+       * Changed elx_dma_addr_t to dma_addr_t
+       * Fix a 2.6 kernel check to be >= 2.6.0 instead of > (was missing
+         2.6.0).
+       * Remove elx_printf and elx_str_sprintf. Replace elx_print with
+         printk.
+       * Replace elx_printf with printk.
+       * Replace elx_str_sprintf with sprintf.
+       * Removed the mem_lock, its prototype, function, macro, and
+         iflags.
+       * Use kmalloc/kfree for ELX_SCSI_BUF_t
+       * Use linux pci_pools for SCSI_DMA_EXT
+       * Use linux pci_pools for BPLs.
+       * Minor cleanup of DFC args for PPC64.
+       * Several small indentation cleanups.
+       * New Linux 2.6 style of char device registration.
+       * Migrated members of LPFCHBA_t and LINUX_HBA_t into elxHBA_t
+       * Use strcpy, strncmp, isdigit, strlen instead of abstractions
+       * Cleanup of driver_template.
+       * Facilitate compile time turn on/off of lpfc_network_on.
+       * Split large source files into smaller, better named ones.
+
+Changes from 2.10a to 20040312
+
+       * Fix build for 2.4 kernels
+       * Move driver version macros into lpfc_version.h file.
+       * Fixed data miscompare with LIP.
+       * Removed elx_sli, elx_ioc, elx_disc, elx_sch routines,
+         prototypes, and reference points.
+       * Correct the space insertions with hardtabs
+       * Remove routine call pointers in ELX_SLI_INIT_t struct.
+       * Removed module locks except for drvr, mem, and clock.
+       * Removed unused module locks from sourcebase. Kept drvr_lock,
+         mem_lock, and clock_lock.
+       * Change NULL to 0
diff --git a/Documentation/scsi/lpfc.txt b/Documentation/scsi/lpfc.txt
new file mode 100644 (file)
index 0000000..4dbe413
--- /dev/null
@@ -0,0 +1,83 @@
+
+LPFC Driver Release Notes:
+
+=============================================================================
+
+
+                               IMPORTANT:
+
+  Starting in the 8.0.17 release, the driver began to be targeted strictly
+  toward the upstream kernel. As such, we removed #ifdefs for older kernels
+  (pre 2.6.10). The 8.0.16 release should be used if the driver is to be
+  run on one of the older kernels.
+
+  The proposed modifications to the transport layer for FC remote ports
+  and extended attribute support is now part of the upstream kernel
+  as of 2.6.12. We no longer need to provide patches for this support,
+  nor a *full* version which has old an new kernel support.
+  
+  The driver now requires a 2.6.12 (if pre-release, 2.6.12-rc1) or later
+  kernel.
+  
+  Please heed these dependencies....
+
+
+   ********************************************************************
+
+
+The following information is provided for additional background on the
+history of the driver as we push for upstream acceptance.
+
+Cable pull and temporary device Loss:
+
+  In older revisions of the lpfc driver, the driver internally queued i/o 
+  received from the midlayer. In the cases where a cable was pulled, link
+  jitter, or a device temporarily loses connectivity (due to its cable
+  being removed, a switch rebooting, or a device reboot), the driver could
+  hide the disappearance of the device from the midlayer. I/O's issued to
+  the LLDD would simply be queued for a short duration, allowing the device
+  to reappear or link come back alive, with no inadvertant side effects
+  to the system. If the driver did not hide these conditions, i/o would be
+  errored by the driver, the mid-layer would exhaust its retries, and the
+  device would be taken offline. Manual intervention would be required to
+  re-enable the device.
+
+  The community supporting kernel.org has driven an effort to remove
+  internal queuing from all LLDDs. The philosophy is that internal
+  queuing is unnecessary as the block layer already performs the 
+  queuing. Removing the queues from the LLDD makes a more predictable
+  and more simple LLDD.
+
+  As a potential new addition to kernel.org, the 8.x driver was asked to
+  have all internal queuing removed. Emulex complied with this request.
+  In explaining the impacts of this change, Emulex has worked with the
+  community in modifying the behavior of the SCSI midlayer so that SCSI
+  devices can be temporarily suspended while transport events (such as
+  those described) can occur.  
+
+  The proposed patch was posted to the linux-scsi mailing list. The patch
+  is contained in the 2.6.10-rc2 (and later) patch kits. As such, this
+  patch is part of the standard 2.6.10 kernel.
+
+  By default, the driver expects the patches for block/unblock interfaces
+  to be present in the kernel. No #define needs to be set to enable support.
+
+
+Kernel Support
+
+  This source package is targeted for the upstream kernel only. (See notes
+  at the top of this file). It relies on interfaces that are slowing
+  migrating into the kernel.org kernel.
+
+  At this time, the driver requires the 2.6.12 (if pre-release, 2.6.12-rc1)
+  kernel.
+
+  If a driver is needed for older kernels please utilize the 8.0.16
+  driver sources.
+
+
+Patches
+
+  Thankfully, at this time, patches are not needed.
+
+
diff --git a/Documentation/scsi/qla2xxx.revision.notes b/Documentation/scsi/qla2xxx.revision.notes
deleted file mode 100644 (file)
index 290cdaf..0000000
+++ /dev/null
@@ -1,457 +0,0 @@
-/*
- * QLogic ISP2200 and ISP2300 Linux Driver Revision List File.
- *
- ********************************************************************
- *
- * Revision History
- *
- *  Rev  8.00.00b8     December 5, 2003        AV
- *     - Instruct mid-layer to perform initial scan.
- *
- *  Rev  8.00.00b7     December 5, 2003        AV
- *     - Resync with Linux Kernel 2.6.0-test11.
- *     - Add basic NVRAM parser (extras/qla_nvr).
- *
- *  Rev  8.00.00b7-pre11 December 3, 2003      AV
- *     - Sanitize the scsi_qla_host structure:
- *       - Purge unused elements.
- *       - Reorganize high-priority members (cache coherency).
- *     - Add support for NVRAM access via a sysfs binary attribute:
- *       - Consolidate semaphore locking access.
- *     - Fix more PCI posting issues.
- *     - Add extras directory for dump/NVRAM tools.
- *     - Remove unused qla_vendor.c file.
- *
- *  Rev  8.00.00b7-pre11 November 26, 2003     DG/AV
- *     - Merge several patches from Christoph Hellwig [hch@lst.de]:
- *       - in Linux 2.6 both pci and the scsi layer use the generic
- *         dma direction bits, use them directly instead of the scsi
- *         and pci variants and the (noop) conversion routines.
- *     - Fix _IOXX_BAD() usage for external IOCTL interface.
- *     - Use atomic construct for HA loop_state member.
- *     - Add generic model description text for HBA types.
- *
- *  Rev  8.00.00b7-pre5        November 17, 2003       AV
- *     - Merge several patches from Christoph Hellwig [hch@lst.de]:
- *       - patch to split the driver into a common qla2xxx.ko and a
- *         qla2?00.ko for each HBA type - the latter modules are
- *         only very small wrappers, mostly for the firmware
- *         images, all the meat is in the common qla2xxx.ko.
- *       - make the failover code optional.
- *       - kill useless lock_kernel in dpc thread startup.
- *       - no need for modversions hacks in 2.6 (or 2.4).
- *       - kill qla2x00_register_with_Linux.
- *       - simplify EH code, cmd or it's hostdata can't be NULL, no
- *         need to search whether the host it's ours, the midlayer
- *         makes sure it won't call into a driver for some else
- *         host.
- *     - Merge several patches from Jes Sorensen
- *       [jes@wildopensource.com]:
- *       - Call qla2x00_config_dma_addressing() before performing
- *         any consistent allocations. This is required since the
- *         dma mask settings will affect the memory
- *         pci_alloc_consistent() will return.
- *       - Call pci_set_consistent_dma_mask() to allow for 64 bit
- *         consistent allocations, required on some platforms such
- *         as the SN2.
- *       - Wait 20 usecs (not sure how long is really necessary,
- *         but this seems safe) after setting CSR_ISP_SOFT_RESET in
- *         the ctrl_status register as the card doesn't respond to
- *         PCI reads while in reset state. This causes a machine
- *         check on some architectures.
- *       - Flush PCI writes before calling udelay() to ensure the
- *         write is not sitting idle in-flight for a while before
- *         hitting the hardware.
- *       - Include linux/vmalloc.h in qla_os.c since it uses
- *         vmalloc().
- *       - Use auto-negotiate link speed when using default
- *         parameters rather than NVRAM settings. Disable NVRAM
- *         reading on SN2 since it's not possible to execute the
- *         HBA's BIOS on an SN2. I suggest doing something similar
- *         for all architectures that do not provide x86 BIOS
- *         emulation.
- *     - Clean-up slab-cache allocations:
- *       - locking.
- *       - mempool allocations in case of low-memory situations.
- *     - Fallback to GA_NXT scan if GID_PT call returns more than
- *       MAX_FIBRE_DEVICES.
- *     - Preserve iterating port ID across GA_NXT calls in
- *       qla2x00_find_all_fabric_devs().
- *     - Pre-calculate ASCII firmware dump length as to not incur the
- *       cost-to-calculate at each invocation of a read().
- *
- *  Rev  8.00.00b6     November 4, 2003        AV
- *     - Add new 2300 TPX firmware (3.02.18).
- *
- *  Rev  8.00.00b6-pre25 October 20, 2003      RA/AV
- *     - Resync with Linux Kernel 2.6.0-test9.
- *     - Rework firmware dump process:
- *       - Use binary attribute within sysfs tree.
- *       - Add user-space tool (gdump.sh) to retrieve formatted
- *         buffer.
- *       - Add ISP2100 support.
- *     - Use a slab cache for SRB allocations to reduce memory
- *       pressure.
- *     - Initial conversion of driver logging methods to a new
- *       qla_printk() function which uses dev_printk (Daniel
- *       Stekloff, IBM).
- *     - Further reduce stack usage in qla2x00_configure_local_loop()
- *       and qla2x00_find_all_fabric_devs().
- *     - Separate port state used for routing of I/O's from port
- *       mgmt-login retry etc.
- *
- *  Rev  8.00.00b6-pre19 October 13, 2003      AV
- *     - Resync with Linux Kernel 2.6.0-test7-bk5.
- *     - Add intelligent RSCN event handling:
- *       - reduce scan time during 'port' RSCN events by only
- *         querying specified port ids.
- *       - Available on ISP23xx cards only.
- *     - Increase maximum number of recognizable targets from 256
- *       to 512.
- *       - Backend changes were previously added to support TPX
- *         (2K logins) firmware.  Mid-layer can now scan for targets
- *         (H, B, T, L) where 512 < T >= 0.
- *     - Remove IP support from driver.
- *       - Switch firmware types from IP->TP for ISP22xx and
- *         IPX->TPX for ISP23xx cards.
- *       - Remove files qla_ip.[ch].
- *     - Remove type designations from firmware filenames.
- *
- *  Rev  8.00.00b6-pre11 September 15, 2003    DG/AV
- *     - Resync with 6.06.00.
- *     - Resync with Linux Kernel 2.6.0-test5-bk3.
- *     - Add new 2300 IPX firmware (3.02.15).
- *
- *  Rev  8.00.00b5     July 31, 2003           AV
- *     - Always create an fc_lun_t entry for lun 0 - as the mid-
- *       layer requires access to this lun for discovery to occur.
- *     - General sanitizing:
- *       - Add generic firmware option definitions.
- *       - Generalize retrieval/update of firmware options.
- *       - Fix compile errors which occur with extended debug.
- *       - Handle failure cases for scsi_add_host() and
- *         down_interruptible().
- *     - Host template updates:
- *       - Use standard bios_param callback function.
- *       - Disable clustering.
- *       - Remove unchecked_is_dma entry.
- *
- *  Rev  8.00.00b5-pre5        July 29, 2003           DG/AV
- *     - Resync with 6.06.00b13.
- *     - Resync with Linux Kernel 2.6.0-test2.
- *     - Pass the complete loop_id, not the masked (0xff) value
- *       while issuing mailbox commands (qla_mbx.c/qla_fo.c/
- *       qla_iocb.c/qla_init.c).
- *     - Properly handle zero-length return status for an RLC CDB.
- *     - Create an fclun_t structure for 'disconnected' luns,
- *       peripheral-qualifier of 001b.
- *     - Remove unused LIP-sequence register access during AE 8010.
- *     - Generalize qla2x00_mark_device_lost() to handle forced 
- *       login request -- modify all direct/indirect invocations 
- *       with proper flag.
- *     - Save RSCN notification (AE 8015h) data in a proper and 
- *       consistent format (domain, area, al_pa).
- *     - General sanitizing:
- *       - scsi_qla_host structure member reordering for cache-line
- *         coherency.
- *       - Remove unused SCSI opcodes, endian-swap definitions.
- *       - Remove CMD_* pre-processor defines.
- *       - Remove unused SCSIFCHOTSWAP/GAMAP/MULTIHOST codes.
- *     - Backout patch which added a per-scsi_qla_host scsi host
- *       spinlock, since mid-layer already defines one.
- *     - Add new 2300 IPX firmware (3.02.15).
- *
- *  Rev  8.00.00b4     July 14, 2003           RA/DG/AV
- *     - Resync with 6.06.00b12.
- *     - Resync with Linux Kernel 2.6.0-test1.
- *     - Remove IOCB throttling code -- originally #if'd.
- *     - Remove apidev_*() routines since proc_mknod() has been
- *       removed -- need alternate IOCTL interface.
- *     - Merge several performance/fix patches from Arjan van de
- *       Ven:
- *       - Undefined operation >> 32.
- *       - No need to acquire mid-layer lock during command
- *         callback. 
- *       - Use a per-HBA mid-layer lock.
- *       - Use a non-locked cycle for setting the count of the
- *         newly allocated sp (qla2x00_get_new_sp()).
- *       - Modify semantic behavior of qla2x00_queuecommand():
- *         - Reduce cacheline bouncing by having I/Os submitted
- *           by the IRQ handler.
- *         - Remove extraneous calls to qla2x00_next() during I/O
- *           queuing.
- *       - Use list_splice_init() during qla2x00_done() handling
- *         of commands to reduce list_lock contention.
- *       - RIO mode support for ISP2200:
- *         - Implementation differs slightly from original patch.
- *       - Do not use bottom-half handler (tasklet/work queue)
- *         for qla2x00_done() processing.
- *
- *  Rev  8.00.00b4-pre22 July 12, 2003         AV
- *     - Check for 'Process Response Queue' requests early during
- *       the Host Status check.
- *     - General sanitizing:
- *       - srb_t structure rewrite, removal of unused members.
- *       - Remove unused fcdev array, fabricid, and PORT_*
- *         definitions.
- *       - Remove unused config_reg_t PCI definitions.
- *     - Add new 2200 IP firmware (2.02.06).
- *     - Add new 2300 IPX firmware (3.02.14).
- *
- *  Rev  8.00.00b4-pre19 June 30, 2003         AV
- *     - Resync with Linux Kernel 2.5.73-bk8.
- *     - Rework IOCB command queuing methods:
- *       - Upper-layer driver *MUST* properly set the direction
- *         bit of SCSI commands.
- *       - Generalize 32bit/64bit queuing path functions.
- *       - Remove costly page-boundary cross check when using
- *         64bit address capable IOCBs.
- *
- *  Rev  8.00.00b4-pre15 June 19, 2003         AV
- *     - Resync with 6.06.00b11.
- *     - Continue fcport list consolidation work:
- *       - Updated IOCTL implementations to use new fcports 
- *         list.
- *     - Modified product ID check to not verify ISP chip
- *       revision -- ISP2312 v3 (qla2x00_chip_diag()).
- *     - Add new 2300 IPX firmware (3.02.13):
- *
- *  Rev  8.00.00b4-pre13 June 19, 2003         AV
- *     - Fix build process for qla2100 driver -- no support
- *       for IP.
- *     - SCSI host template modifications:
- *       - Set sg_tablesize based on the derived DMA mask.
- *       - Increase max_sectors since only limit within RISC
- *         is transfer of (((2^32) - 1) >> 9) sectors.
- *
- *  Rev  8.00.00b4-pre12 June 18, 2003         RA, DG, RL, AV
- *     - Resync with 6.06.00b10.
- *     - Resync with Linux Kernel 2.5.72.
- *     - Initial fcport list consolidation work:
- *       - fcports/fcinitiators/fcdev/fc_ip --> ha->fcports
- *         list.
- *
- *  Rev  8.00.00b4-pre7         June 05, 2003          AV
- *     - Properly release PCI resouces in init-failure case.
- *     - Reconcile disparite function return code definitions.
- *
- *  Rev  8.00.00b4-pre4         June 03, 2003          AV
- *     - Resync with Linux Kernel 2.5.70-bk8:
- *       - SHT proc_info() changes.
- *     - Restructure SNS Generic Services routines:
- *       - Add qla_gs.c file to driver distribution.
- *     - Configure PCI latency timer for ISP23xx.
- *
- *  Rev  8.00.00b4-pre3         June 02, 2003          RA, DG, RL, AV
- *     - Resync with 6.06.00b5.
- *     - Rework (again) PCI I/O space configuration
- *       (Anton Blanchard):
- *       - Use pci_set_mwi() routine;
- *         - Remove uneeded qla2x00_set_cache_line() function.
- *       - Remove extraneous modification of PCI_COMMAND word.
- *
- *  Rev  8.00.00b3      May 29, 2003           AV
- *     - Resync with Linux Kernel 2.5.70.
- *     - Move RISC paused check from ISR fast-path.
- *
- *  Rev  8.00.00b3-pre8 May 26, 2003           AV
- *     - Add new 2300 IPX firmware (3.02.12):
- *     - Rework PCI I/O space configuration.
- *
- *  Rev  8.00.00b3-pre6        May 22, 2003            RA, DG, RL, AV
- *     - Resync with 6.06.00b3.
- *
- *  Rev  8.00.00b3-pre4        May 21 2003             AV
- *     - Add new 2300 IPX firmware (3.02.11):
- *       - Remove 2300 TPX firmware from distribution.
- *
- *  Rev  8.00.00b3-pre3        May 21 2003             AV
- *     - Properly setup PCI configuation space during
- *       initialization:
- *       - Properly configure Memory-Mapped I/O during early
- *         configuration stage.
- *     - Rework IP functionality to support 2k logins.
- *     - Add new 2300 IPX firmware (3.02.11):
- *       - Remove 2300 TPX firmware from distribution.
- *
- *  Rev  8.00.00b3-pre2        May ??, 2003            RA, DG, RL, AV
- *     - Resync with 6.06.00b1.
- *
- *  Rev  8.00.00b3-pre1        May ??, 2003            RA, DG, RL, AV
- *     - Resync with 6.05.00.
- *
- *  Rev  8.00.00b2     May 19, 2003            AV
- *     - Simplify dma_addr_t handling during command queuing given
- *       new block-layer defined restrictions:
- *       - Physical addresses not spanning 4GB boundaries.
- *     - Firmware versions: 2100 TP (1.19.24), 2200 IP (2.02.05),
- *       2300 TPX (3.02.10).
- *
- *  Rev  8.00.00b2-pre1        May 13, 2003            AV
- *     - Add support for new 'Hotplug initialization' model. 
- *     - Simplify host template by removing unused callbacks.
- *     - Use scsicam facilities to determine geometry.
- *     - Fix compilation issues for non-ISP23xx builds:
- *       - Correct register references in qla_dbg.c.
- *       - Correct Makefile build process.
- *
- *  Rev  8.00.00b1     May 05, 2003            AV
- *     - Resync with Linux Kernel 2.5.69.
- *     - Firmware versions: 2100 TP (1.19.24), 2200 TP (2.02.05),
- *       2300 TPX (3.02.10).
- *
- *  Rev  8.00.00b1-pre45 April ??, 2003                AV
- *     - Resync with Linux Kernel 2.5.68-bk11:
- *     - Fix improper return-code assignment during fabric
- *       discovery.
- *     - Remove additional extraneous #defines from
- *       qla_settings.h.
- *       - USE_PORTNAME -- FO will always use portname.
- *     - Default queue depth size set to 64.
- *
- *  Rev  8.00.00b1-pre42 April ??, 2003                AV
- *     - Convert bottom-half tasklet to a work_queue.
- *     - Initial basic coding of dynamic queue depth handling
- *       during QUEUE FULL statuses.
- *     - Fix mailbox interface problem with
- *       qla2x00_get_retry_cnt().
- *
- *  Rev  8.00.00b1-pre41 April ??, 2003                AV
- *     - Convert build defines qla2[1|2|3]00 macros to
- *       qla2[1|2|3]xx due to module name stringification clashes.
- *     - Add additional ISP2322 checks during board configuration.
- *
- *  Rev  8.00.00b1-pre40 April ??, 2003                AV
- *     - Resync with Linux Kernel 2.5.68-bk8:
- *       - Updated IRQ handler interface.
- *     - Add ISP dump code (stub) in case of SYSTEM_ERROR on
- *       ISP2100.
- *     - Add new 2200 IP firmware (2.02.05).
- *
- *  Rev  8.00.00b1-pre39 April ??, 2003                AV
- *     - Resync with Linux Kernel 2.5.68.
- *     - Add simple build.sh script to aid in external compilation.
- *     - Clean-break with Kernel 2.4 compatibility.
- *       - Rework DPC routine -- completion routines for signaling.
- *     - Re-add HBAAPI character device node for IOCTL support.
- *     - Remove residual QLA2X_PERFORMANCE defines.
- *     - Allocate SP pool via __get_free_pages() rather than
- *       individual kmalloc()'s.
- *     - Inform SCSI mid-layer of 16-byte CDB support
- *       (host->max_cmd_len):
- *       - Remove unecessary 'more_cdb' handling code from
- *         qla_iocb.c and qla_xioct.c.
- *     - Reduce duplicate code in fabric scanning logic (MS IOCB
- *       preparation).
- *     - Add ISP dump code in case of SYSTEM_ERROR.
- *     - Remove 2300 VIX firmware from distribution:
- *       - Add initial code for IPX support.
- *     - Add new 2300 TPX firmware (3.02.10).
- *
- *  Rev  8.00.00b1-pre34 April ??, 2003                AV
- *     - Resync with Linux Kernel 2.5.67.
- *     - Use domain/area/al_pa fields when displaying PortID 
- *       values -- addresses endianess issues.
- *     - Rework large case statement to check 'common' CDB commands
- *       early in qla2x00_get_cmd_direction().
- *
- *  Rev  8.00.00b1-pre31 April ??, 2003                AV
- *     - Update makefile to support PPC64 build.
- *     - Retool NVRAM configuration routine and structures:
- *       - Consoldate ISP21xx/ISP22xx/ISP23xx configuration
- *         (struct nvram_t).
- *       - Remove big/little endian support structures in favor of
- *         simplified bit-operations within byte fields.
- *     - Fix long-standing 'static' buffer sharing problem in 
- *       qla2x00_configure_fabric().
- *
- *  Rev  8.00.00b1-pre30 April ??, 2003                AV
- *     - Complete implementation of GID_PT scan.
- *     - Use consistent MS IOCB invocation method to query SNS:
- *       - Add RNN_ID and RSNN_NN registrations in a fabric.
- *     - Remove unused Mailbox Command 6Eh (Send SNS) support
- *       structures.
- *     - Use 64bit safe IOCBs while issuing INQUIRY and RLC during
- *       topology scan.
- *     - Until reimplementation of fcdev_t/fcport list
- *       consolidation, valid loop_id ranges are still limited from
- *       0x00 through 0xFF -- enforce this within the code.
- *
- *  Rev  8.00.00b1-pre27 March ??, 2003                AV
- *     - Resync with 6.05.00b9.
- *     - Retool HBA PCI configuration -- qla2x00_pci_config().
- *     - Remove inconsistent use of delay routines (UDELAY/SYS*).
- *     - Continue to teardown/clean/add comments and debug
- *       routines.
- *     - Properly swap bytes of the device's nodename in
- *       qla2x00_configure_local_loop().
- *
- *  Rev  8.00.00b1-pre25 March ??, 2003                AV
- *     - Resync with 6.05.00b8.
- *
- *  Rev  8.00.00b1-pre23 March ??, 2003                AV
- *     - Remove (#define) IOCB usage throttling.
- *     - Abstract interrupt polling with qla2x00_poll().
- *     - Modify lun scanning logic:
- *       - If the device does not support the SCSI Report Luns
- *         command, the driver will now only scan from 0 to the
- *         max#-luns as defined in the NVRAM (BIOS), rather than
- *         blindly scanning from 0 to 255 -- which could result in
- *         an increase in startup time when running against slow
- *         (JBOD) devices.
- *     - Rework reset logic in qla2x00_reset_chip() (spec).
- *
- *  Rev  8.00.00b1-pre22 March ??, 2003                AV
- *     - Resync with 6.05.00b7.
- *     - Cleanup (rewrite) ISR handler.
- *     - Rename kmem_zalloc --> qla2x00_kmem_zalloc():
- *       - This function will eventually be removed.
- *     - Add new 2300 VIX firmware (3.02.09):
- *       - Support for Tape, Fabric, 2K logins, IP, and VI.
- *
- *  Rev  8.00.00b1-pre18 March ??, 2003                AV
- *     - Support 232x type ISPs.
- *     - Support single firmware for each ISP type:
- *       - Restructure brd_info/fw_info methods.
- *       - Streamline firmware load process.
- *       - Properly query firmware for version information.
- *     - Remove extraneous scsi_qla_host members:
- *       - device_id ==> pdev->device
- *     - Fix fc4 features (RFF_ID) registration.
- *     - Convert kmem_zalloc --> qla2x00_kmem_zalloc().
- *     - Remove unused/extraneous #defines (USE_PORTNAME).
- *
- *  Rev  8.00.00b1-pre14 March ??, 2003                AV
- *     - Resync with 6.05.00b6.
- *     - Initial source-code restructuring effort.
- *       - Build procedure.
- *       - Source file layout -- intuitive component layout.
- *       - Remove unused #defines (*PERFORMANCE, WORD_FW_LOAD, etc).
- *     - Add support for 2K logins (TPX -- firmware).
- *     - Add module parameter ql2xsuspendcount.
- *     - Add new 2200 IP/TP firmware (2.02.04).
- *
- *  Rev  8.00.00b1-pre9        March ??, 2003  RL/DG/RA/AV
- *     - Use kernel struct list_head for fcport and fclun lists.
- *     - Remove extraneous (L|M)S_64BITS() and QL21_64*() defines.
- *
- *  Rev  8.00.00b1-pre8        February 28, 2003       RL/DG/RA/AV
- *     - Resync with 6.05.00b3.
- *
- *  Rev  8.00.00b1-pre7        February 23, 2003       RL/DG/RA/AV
- *     - Add alternate fabric scanning logic (GID_PT/GNN_ID/GPN_ID).
- *     - Remove use of deprecated function check_region().
- *     - Add new 2300 IP/TP firmware (3.02.08).
- *
- *  Rev  8.00.00b1-pre5        January 28, 2003        RL/DG/RA/AV
- *     - Resync with 6.05.00b3.
- *     - Consolidate device_reg structure definitions for ISP types.
- *     - Add support for new queue-depth selection.
- *     - Add new 2300 IP/TP firmware (3.02.07).
- *
- *  Rev  8.00.00b1-pre1        January 17, 2003        AV
- *     - Initial branch from 6.04.00b8 driver.
- *     - Remove VMWARE specific code.
- *     - Add support for pci_driver interface.
- *
- ********************************************************************/
index 1f24129a3099071dd253d5a341b07ab4c944cc96..e41703d7d24d3f0579acccdc0b28a56da2621ff1 100644 (file)
@@ -389,8 +389,6 @@ Summary:
    scsi_remove_host - detach and remove all SCSI devices owned by host
    scsi_report_bus_reset - report scsi _bus_ reset observed
    scsi_set_device - place device reference in host structure
-   scsi_to_pci_dma_dir - convert SCSI subsystem direction flag to PCI
-   scsi_to_sbus_dma_dir - convert SCSI subsystem direction flag to SBUS
    scsi_track_queue_full - track successive QUEUE_FULL events 
    scsi_unblock_requests - allow further commands to be queued to given host
    scsi_unregister - [calls scsi_host_put()]
@@ -756,48 +754,6 @@ void scsi_report_bus_reset(struct Scsi_Host * shost, int channel)
 void scsi_set_device(struct Scsi_Host * shost, struct device * dev)
 
 
-/**
- * scsi_to_pci_dma_dir - convert SCSI subsystem direction flag to PCI
- * @scsi_data_direction: SCSI subsystem direction flag
- *
- *      Returns DMA_TO_DEVICE given SCSI_DATA_WRITE,
- *              DMA_FROM_DEVICE given SCSI_DATA_READ
- *              DMA_BIDIRECTIONAL given SCSI_DATA_UNKNOWN
- *              else returns DMA_NONE
- *
- *      Might block: no
- *
- *      Notes: The SCSI subsystem now uses the same values for these
- *      constants as the PCI subsystem so this function is a nop.
- *      The recommendation is not to use this conversion function anymore
- *      (in the 2.6 kernel series) as it is not needed.
- *
- *      Defined in: drivers/scsi/scsi.h .
- **/
-int scsi_to_pci_dma_dir(unsigned char scsi_data_direction)
-
-
-/**
- * scsi_to_sbus_dma_dir - convert SCSI subsystem direction flag to SBUS
- * @scsi_data_direction: SCSI subsystem direction flag
- *
- *      Returns DMA_TO_DEVICE given SCSI_DATA_WRITE,
- *              FROM_DEVICE given SCSI_DATA_READ
- *              DMA_BIDIRECTIONAL given SCSI_DATA_UNKNOWN
- *              else returns DMA_NONE
- *
- *      Notes: The SCSI subsystem now uses the same values for these
- *      constants as the SBUS subsystem so this function is a nop.
- *      The recommendation is not to use this conversion function anymore
- *      (in the 2.6 kernel series) as it is not needed.
- *
- *      Might block: no
- *
- *      Defined in: drivers/scsi/scsi.h .
- **/
-int scsi_to_sbus_dma_dir(unsigned char scsi_data_direction)
-
-
 /**
  * scsi_track_queue_full - track successive QUEUE_FULL events on given
  *                      device to determine if and when there is a need
index 46e54b441663d501c33944e46411117af9c4da40..11ef9d9ea139316e454cfae3d3b10f7aaab71959 100644 (file)
@@ -1715,6 +1715,15 @@ request_queue_t *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
        if (blk_init_free_list(q))
                goto out_init;
 
+       /*
+        * if caller didn't supply a lock, they get per-queue locking with
+        * our embedded lock
+        */
+       if (!lock) {
+               spin_lock_init(&q->__queue_lock);
+               lock = &q->__queue_lock;
+       }
+
        q->request_fn           = rfn;
        q->back_merge_fn        = ll_back_merge_fn;
        q->front_merge_fn       = ll_front_merge_fn;
index a393cf4d0313cb22c7c8449168749b3b88a7744d..1f9aeb4accc60cc80f810125083e860dbf74dc3f 100644 (file)
@@ -52,19 +52,18 @@ static inline int zfcp_sg_list_copy_from_user(struct zfcp_sg_list *,
 static inline int zfcp_sg_list_copy_to_user(void __user *,
                                            struct zfcp_sg_list *, size_t);
 
-static int zfcp_cfdc_dev_ioctl(struct inode *, struct file *,
-       unsigned int, unsigned long);
+static int zfcp_cfdc_dev_ioctl(struct file *, unsigned int, unsigned long);
 
 #define ZFCP_CFDC_IOC_MAGIC                     0xDD
 #define ZFCP_CFDC_IOC \
        _IOWR(ZFCP_CFDC_IOC_MAGIC, 0, struct zfcp_cfdc_sense_data)
 
-#ifdef CONFIG_COMPAT
-static struct ioctl_trans zfcp_ioctl_trans = {ZFCP_CFDC_IOC, (void*) sys_ioctl};
-#endif
 
 static struct file_operations zfcp_cfdc_fops = {
-       .ioctl = zfcp_cfdc_dev_ioctl
+       .unlocked_ioctl = zfcp_cfdc_dev_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = zfcp_cfdc_dev_ioctl
+#endif
 };
 
 static struct miscdevice zfcp_cfdc_misc = {
@@ -308,23 +307,16 @@ zfcp_module_init(void)
        if (!zfcp_transport_template)
                return -ENODEV;
 
-       retval = register_ioctl32_conversion(zfcp_ioctl_trans.cmd,
-                                            zfcp_ioctl_trans.handler);
-       if (retval != 0) {
-               ZFCP_LOG_INFO("registration of ioctl32 conversion failed\n");
-               goto out;
-       }
-
        retval = misc_register(&zfcp_cfdc_misc);
        if (retval != 0) {
                ZFCP_LOG_INFO("registration of misc device "
                              "zfcp_cfdc failed\n");
-               goto out_misc_register;
-       } else {
-               ZFCP_LOG_TRACE("major/minor for zfcp_cfdc: %d/%d\n",
-                              ZFCP_CFDC_DEV_MAJOR, zfcp_cfdc_misc.minor);
+               goto out;
        }
 
+       ZFCP_LOG_TRACE("major/minor for zfcp_cfdc: %d/%d\n",
+                      ZFCP_CFDC_DEV_MAJOR, zfcp_cfdc_misc.minor);
+
        /* Initialise proc semaphores */
        sema_init(&zfcp_data.config_sema, 1);
 
@@ -348,8 +340,6 @@ zfcp_module_init(void)
 
  out_ccw_register:
        misc_deregister(&zfcp_cfdc_misc);
- out_misc_register:
-       unregister_ioctl32_conversion(zfcp_ioctl_trans.cmd);
  out:
        return retval;
 }
@@ -370,9 +360,9 @@ zfcp_module_init(void)
  *              -EPERM      - Cannot create or queue FSF request or create SBALs
  *              -ERESTARTSYS- Received signal (is mapped to EAGAIN by VFS)
  */
-static int
-zfcp_cfdc_dev_ioctl(struct inode *inode, struct file *file,
-                    unsigned int command, unsigned long buffer)
+static long
+zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
+                   unsigned long buffer)
 {
        struct zfcp_cfdc_sense_data *sense_data, __user *sense_data_user;
        struct zfcp_adapter *adapter = NULL;
index a591fcb8aab13819f38eac7970d3072cb72ea6d4..4b1bb529f676dcbb158bbc05d43b90378ce9c313 100644 (file)
@@ -389,8 +389,7 @@ NCR_700_detect(struct scsi_host_template *tpnt,
        host->max_lun = NCR_700_MAX_LUNS;
        BUG_ON(NCR_700_transport_template == NULL);
        host->transportt = NCR_700_transport_template;
-       host->unique_id = hostdata->base;
-       host->base = hostdata->base;
+       host->unique_id = (unsigned long)hostdata->base;
        hostdata->eh_complete = NULL;
        host->hostdata[0] = (unsigned long)hostdata;
        /* kick the chip */
index df4aa30ae0aa139e5f64d119a9f9df21c6315254..e86012cf6ab763505a56a86fcae2a3db46539c8f 100644 (file)
 #include <scsi/scsi_device.h>
 
 
-#if defined(CONFIG_53C700_MEM_MAPPED) && defined(CONFIG_53C700_IO_MAPPED)
-#define CONFIG_53C700_BOTH_MAPPED
-#endif
-
 /* Turn on for general debugging---too verbose for normal use */
 #undef NCR_700_DEBUG
 /* Debug the tag queues, checking hash queue allocation and deallocation
 /* magic byte identifying an internally generated REQUEST_SENSE command */
 #define NCR_700_INTERNAL_SENSE_MAGIC   0x42
 
-/* WARNING: Leave this in for now: the dependency preprocessor doesn't
- * pick up file specific flags, so must define here if they are not
- * set */
-#if !defined(CONFIG_53C700_IO_MAPPED) && !defined(CONFIG_53C700_MEM_MAPPED)
-#error "Config.in must define either CONFIG_53C700_IO_MAPPED or CONFIG_53C700_MEM_MAPPED to use this scsi core."
-#endif
-
 struct NCR_700_Host_Parameters;
 
 /* These are the externally used routines */
@@ -184,7 +173,7 @@ struct NCR_700_command_slot {
 struct NCR_700_Host_Parameters {
        /* These must be filled in by the calling driver */
        int     clock;                  /* board clock speed in MHz */
-       unsigned long   base;           /* the base for the port (copied to host) */
+       void __iomem    *base;          /* the base for the port (copied to host) */
        struct device   *dev;
        __u32   dmode_extra;    /* adjustable bus settings */
        __u32   differential:1; /* if we are differential */
@@ -199,9 +188,6 @@ struct NCR_700_Host_Parameters {
        /* NOTHING BELOW HERE NEEDS ALTERING */
        __u32   fast:1;         /* if we can alter the SCSI bus clock
                                    speed (so can negiotiate sync) */
-#ifdef CONFIG_53C700_BOTH_MAPPED
-       __u32   mem_mapped;     /* set if memory mapped */
-#endif
        int     sync_clock;     /* The speed of the SYNC core */
 
        __u32   *script;                /* pointer to script location */
@@ -246,12 +232,18 @@ struct NCR_700_Host_Parameters {
 #ifdef CONFIG_53C700_LE_ON_BE
 #define bE     (hostdata->force_le_on_be ? 0 : 3)
 #define        bSWAP   (hostdata->force_le_on_be)
+/* This is terrible, but there's no raw version of ioread32.  That means
+ * that on a be board we swap twice (once in ioread32 and once again to 
+ * get the value correct) */
+#define bS_to_io(x)    ((hostdata->force_le_on_be) ? (x) : cpu_to_le32(x))
 #elif defined(__BIG_ENDIAN)
 #define bE     3
 #define bSWAP  0
+#define bS_to_io(x)    (x)
 #elif defined(__LITTLE_ENDIAN)
 #define bE     0
 #define bSWAP  0
+#define bS_to_io(x)    (x)
 #else
 #error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined, did you include byteorder.h?"
 #endif
@@ -455,91 +447,42 @@ struct NCR_700_Host_Parameters {
 
 
 static inline __u8
-NCR_700_mem_readb(struct Scsi_Host *host, __u32 reg)
-{
-       const struct NCR_700_Host_Parameters *hostdata __attribute__((unused))
-               = (struct NCR_700_Host_Parameters *)host->hostdata[0];
-
-       return readb(host->base + (reg^bE));
-}
-
-static inline __u32
-NCR_700_mem_readl(struct Scsi_Host *host, __u32 reg)
-{
-       __u32 value = __raw_readl(host->base + reg);
-       const struct NCR_700_Host_Parameters *hostdata __attribute__((unused))
-               = (struct NCR_700_Host_Parameters *)host->hostdata[0];
-#if 1
-       /* sanity check the register */
-       if((reg & 0x3) != 0)
-               BUG();
-#endif
-
-       return bS_to_cpu(value);
-}
-
-static inline void
-NCR_700_mem_writeb(__u8 value, struct Scsi_Host *host, __u32 reg)
-{
-       const struct NCR_700_Host_Parameters *hostdata __attribute__((unused))
-               = (struct NCR_700_Host_Parameters *)host->hostdata[0];
-
-       writeb(value, host->base + (reg^bE));
-}
-
-static inline void
-NCR_700_mem_writel(__u32 value, struct Scsi_Host *host, __u32 reg)
-{
-       const struct NCR_700_Host_Parameters *hostdata __attribute__((unused))
-               = (struct NCR_700_Host_Parameters *)host->hostdata[0];
-
-#if 1
-       /* sanity check the register */
-       if((reg & 0x3) != 0)
-               BUG();
-#endif
-
-       __raw_writel(bS_to_host(value), host->base + reg);
-}
-
-static inline __u8
-NCR_700_io_readb(struct Scsi_Host *host, __u32 reg)
+NCR_700_readb(struct Scsi_Host *host, __u32 reg)
 {
-       const struct NCR_700_Host_Parameters *hostdata __attribute__((unused))
+       const struct NCR_700_Host_Parameters *hostdata
                = (struct NCR_700_Host_Parameters *)host->hostdata[0];
 
-       return inb(host->base + (reg^bE));
+       return ioread8(hostdata->base + (reg^bE));
 }
 
 static inline __u32
-NCR_700_io_readl(struct Scsi_Host *host, __u32 reg)
+NCR_700_readl(struct Scsi_Host *host, __u32 reg)
 {
-       __u32 value = inl(host->base + reg);
-       const struct NCR_700_Host_Parameters *hostdata __attribute__((unused))
+       const struct NCR_700_Host_Parameters *hostdata
                = (struct NCR_700_Host_Parameters *)host->hostdata[0];
-
+       __u32 value = ioread32(hostdata->base + reg);
 #if 1
        /* sanity check the register */
        if((reg & 0x3) != 0)
                BUG();
 #endif
 
-       return bS_to_cpu(value);
+       return bS_to_io(value);
 }
 
 static inline void
-NCR_700_io_writeb(__u8 value, struct Scsi_Host *host, __u32 reg)
+NCR_700_writeb(__u8 value, struct Scsi_Host *host, __u32 reg)
 {
-       const struct NCR_700_Host_Parameters *hostdata __attribute__((unused))
+       const struct NCR_700_Host_Parameters *hostdata
                = (struct NCR_700_Host_Parameters *)host->hostdata[0];
 
-       outb(value, host->base + (reg^bE));
+       iowrite8(value, hostdata->base + (reg^bE));
 }
 
 static inline void
-NCR_700_io_writel(__u32 value, struct Scsi_Host *host, __u32 reg)
+NCR_700_writel(__u32 value, struct Scsi_Host *host, __u32 reg)
 {
-       const struct NCR_700_Host_Parameters *hostdata __attribute__((unused))
+       const struct NCR_700_Host_Parameters *hostdata
                = (struct NCR_700_Host_Parameters *)host->hostdata[0];
 
 #if 1
@@ -548,102 +491,7 @@ NCR_700_io_writel(__u32 value, struct Scsi_Host *host, __u32 reg)
                BUG();
 #endif
 
-       outl(bS_to_host(value), host->base + reg);
-}
-
-#ifdef CONFIG_53C700_BOTH_MAPPED
-
-static inline __u8
-NCR_700_readb(struct Scsi_Host *host, __u32 reg)
-{
-       __u8 val;
-
-       const struct NCR_700_Host_Parameters *hostdata __attribute__((unused))
-               = (struct NCR_700_Host_Parameters *)host->hostdata[0];
-
-       if(hostdata->mem_mapped)
-               val = NCR_700_mem_readb(host, reg);
-       else
-               val = NCR_700_io_readb(host, reg);
-
-       return val;
-}
-
-static inline __u32
-NCR_700_readl(struct Scsi_Host *host, __u32 reg)
-{
-       __u32 val;
-
-       const struct NCR_700_Host_Parameters *hostdata __attribute__((unused))
-               = (struct NCR_700_Host_Parameters *)host->hostdata[0];
-
-       if(hostdata->mem_mapped)
-               val = NCR_700_mem_readl(host, reg);
-       else
-               val = NCR_700_io_readl(host, reg);
-
-       return val;
-}
-
-static inline void
-NCR_700_writeb(__u8 value, struct Scsi_Host *host, __u32 reg)
-{
-       const struct NCR_700_Host_Parameters *hostdata __attribute__((unused))
-               = (struct NCR_700_Host_Parameters *)host->hostdata[0];
-
-       if(hostdata->mem_mapped)
-               NCR_700_mem_writeb(value, host, reg);
-       else
-               NCR_700_io_writeb(value, host, reg);
-}
-
-static inline void
-NCR_700_writel(__u32 value, struct Scsi_Host *host, __u32 reg)
-{
-       const struct NCR_700_Host_Parameters *hostdata __attribute__((unused))
-               = (struct NCR_700_Host_Parameters *)host->hostdata[0];
-
-       if(hostdata->mem_mapped)
-               NCR_700_mem_writel(value, host, reg);
-       else
-               NCR_700_io_writel(value, host, reg);
-}
-
-static inline void
-NCR_700_set_mem_mapped(struct NCR_700_Host_Parameters *hostdata)
-{
-       hostdata->mem_mapped = 1;
-}
-
-static inline void
-NCR_700_set_io_mapped(struct NCR_700_Host_Parameters *hostdata)
-{
-       hostdata->mem_mapped = 0;
+       iowrite32(bS_to_io(value), hostdata->base + reg);
 }
 
-
-#elif defined(CONFIG_53C700_IO_MAPPED)
-
-#define NCR_700_readb NCR_700_io_readb
-#define NCR_700_readl NCR_700_io_readl
-#define NCR_700_writeb NCR_700_io_writeb
-#define NCR_700_writel NCR_700_io_writel
-
-#define NCR_700_set_io_mapped(x)
-#define NCR_700_set_mem_mapped(x)      error I/O mapped only
-
-#elif defined(CONFIG_53C700_MEM_MAPPED)
-
-#define NCR_700_readb NCR_700_mem_readb
-#define NCR_700_readl NCR_700_mem_readl
-#define NCR_700_writeb NCR_700_mem_writeb
-#define NCR_700_writel NCR_700_mem_writel
-
-#define NCR_700_set_io_mapped(x)       error MEM mapped only
-#define NCR_700_set_mem_mapped(x)
-
-#else
-#error neither CONFIG_53C700_MEM_MAPPED nor CONFIG_53C700_IO_MAPPED is set
-#endif
-
 #endif
index d22b32f4662d24502185bb830d686b54938a4b56..750b11cefd934349480d6237f6df564edf6d297b 100644 (file)
@@ -942,11 +942,6 @@ config SCSI_NCR_D700
          Unless you have an NCR manufactured machine, the chances are that
          you do not have this SCSI card, so say N.
 
-config 53C700_IO_MAPPED
-       bool
-       depends on SCSI_NCR_D700
-       default y
-
 config SCSI_LASI700
        tristate "HP Lasi SCSI support for 53c700/710"
        depends on GSC && SCSI
@@ -956,11 +951,6 @@ config SCSI_LASI700
          many PA-RISC workstations & servers.  If you do not know whether you
          have a Lasi chip, it is safe to say "Y" here.
 
-config 53C700_MEM_MAPPED
-       bool
-       depends on SCSI_LASI700
-       default y
-
 config 53C700_LE_ON_BE
        bool
        depends on SCSI_LASI700
@@ -1324,6 +1314,14 @@ config SCSI_QLOGICPTI
 
 source "drivers/scsi/qla2xxx/Kconfig"
 
+config SCSI_LPFC
+       tristate "Emulex LightPulse Fibre Channel Support"
+       depends on PCI && SCSI
+       select SCSI_FC_ATTRS
+       help
+          This lpfc driver supports the Emulex LightPulse
+          Family of Fibre Channel PCI host adapters.
+
 config SCSI_SEAGATE
        tristate "Seagate ST-02 and Future Domain TMC-8xx SCSI support"
        depends on X86 && ISA && SCSI && BROKEN
index 29fcee35ec01f8206fa238f67ae9996313977545..9cb9fe7d623aeda983c2e6b2f4cb57bdca796673 100644 (file)
@@ -80,6 +80,7 @@ obj-$(CONFIG_SCSI_QLOGIC_ISP) += qlogicisp.o
 obj-$(CONFIG_SCSI_QLOGIC_FC)   += qlogicfc.o 
 obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o 
 obj-$(CONFIG_SCSI_QLA2XXX)     += qla2xxx/
+obj-$(CONFIG_SCSI_LPFC)                += lpfc/
 obj-$(CONFIG_SCSI_PAS16)       += pas16.o
 obj-$(CONFIG_SCSI_SEAGATE)     += seagate.o
 obj-$(CONFIG_SCSI_FD_8xx)      += seagate.o
index 507751941f1e65c65c6adfe0cb03f65d27b8be7d..e993a7ba276f79992985b29770e2706d16d15c10 100644 (file)
@@ -197,12 +197,10 @@ NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq,
        }
                
        /* Fill in the three required pieces of hostdata */
-       hostdata->base = region;
+       hostdata->base = ioport_map(region, 64);
        hostdata->differential = (((1<<siop) & differential) != 0);
        hostdata->clock = NCR_D700_CLOCK_MHZ;
 
-       NCR_700_set_io_mapped(hostdata);
-
        /* and register the siop */
        host = NCR_700_detect(&NCR_D700_driver_template, hostdata, p->dev);
        if (!host) {
@@ -214,6 +212,7 @@ NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq,
        /* FIXME: read this from SUS */
        host->this_id = id_array[slot * 2 + siop];
        host->irq = irq;
+       host->base = region;
        scsi_scan_host(host);
 
        return 0;
index 8398e0dd48100357fd80fedc371e88d1dcdcb1b7..ac8de03c9fa2b6f4b0e99f6708c0772743145410 100644 (file)
@@ -5,6 +5,7 @@
 config SCSI_AIC7XXX
        tristate "Adaptec AIC7xxx Fast -> U160 support (New Driver)"
        depends on (PCI || EISA) && SCSI
+       select SCSI_SPI_ATTRS
        ---help---
        This driver supports all of Adaptec's Fast through Ultra 160 PCI
        based SCSI controllers as well as the aic7770 based EISA and VLB
index fb2877c303f0f1b055ccbc35ad86813a69eecd59..550c9921691ad8df96602bf833d2c28ba7eba411 100644 (file)
@@ -687,7 +687,7 @@ ahd_linux_unmap_scb(struct ahd_softc *ahd, struct scb *scb)
        int direction;
 
        cmd = scb->io_ctx;
-       direction = scsi_to_pci_dma_dir(cmd->sc_data_direction);
+       direction = cmd->sc_data_direction;
        ahd_sync_sglist(ahd, scb, BUS_DMASYNC_POSTWRITE);
        if (cmd->use_sg != 0) {
                struct scatterlist *sg;
@@ -3338,7 +3338,7 @@ ahd_linux_dv_inq(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
        }
 
        ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
-       cmd->sc_data_direction = SCSI_DATA_READ;
+       cmd->sc_data_direction = DMA_FROM_DEVICE;
        cmd->cmd_len = 6;
        cmd->cmnd[0] = INQUIRY;
        cmd->cmnd[4] = request_length;
@@ -3363,7 +3363,7 @@ ahd_linux_dv_tur(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
 #endif
        /* Do a TUR to clear out any non-fatal transitional state */
        ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
-       cmd->sc_data_direction = SCSI_DATA_NONE;
+       cmd->sc_data_direction = DMA_NONE;
        cmd->cmd_len = 6;
        cmd->cmnd[0] = TEST_UNIT_READY;
 }
@@ -3385,7 +3385,7 @@ ahd_linux_dv_rebd(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
                free(targ->dv_buffer, M_DEVBUF);
        targ->dv_buffer = malloc(AHD_REBD_LEN, M_DEVBUF, M_WAITOK);
        ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
-       cmd->sc_data_direction = SCSI_DATA_READ;
+       cmd->sc_data_direction = DMA_FROM_DEVICE;
        cmd->cmd_len = 10;
        cmd->cmnd[0] = READ_BUFFER;
        cmd->cmnd[1] = 0x0b;
@@ -3407,7 +3407,7 @@ ahd_linux_dv_web(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
        }
 #endif
        ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
-       cmd->sc_data_direction = SCSI_DATA_WRITE;
+       cmd->sc_data_direction = DMA_TO_DEVICE;
        cmd->cmd_len = 10;
        cmd->cmnd[0] = WRITE_BUFFER;
        cmd->cmnd[1] = 0x0a;
@@ -3429,7 +3429,7 @@ ahd_linux_dv_reb(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
        }
 #endif
        ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
-       cmd->sc_data_direction = SCSI_DATA_READ;
+       cmd->sc_data_direction = DMA_FROM_DEVICE;
        cmd->cmd_len = 10;
        cmd->cmnd[0] = READ_BUFFER;
        cmd->cmnd[1] = 0x0a;
@@ -3455,7 +3455,7 @@ ahd_linux_dv_su(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
        }
 #endif
        ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
-       cmd->sc_data_direction = SCSI_DATA_NONE;
+       cmd->sc_data_direction = DMA_NONE;
        cmd->cmd_len = 6;
        cmd->cmnd[0] = START_STOP_UNIT;
        cmd->cmnd[4] = le | SSS_START;
@@ -4018,7 +4018,7 @@ ahd_linux_run_device_queue(struct ahd_softc *ahd, struct ahd_linux_device *dev)
                        int      dir;
 
                        cur_seg = (struct scatterlist *)cmd->request_buffer;
-                       dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
+                       dir = cmd->sc_data_direction;
                        nseg = pci_map_sg(ahd->dev_softc, cur_seg,
                                          cmd->use_sg, dir);
                        scb->platform_data->xfer_len = 0;
@@ -4038,7 +4038,7 @@ ahd_linux_run_device_queue(struct ahd_softc *ahd, struct ahd_linux_device *dev)
                        int dir;
 
                        sg = scb->sg_list;
-                       dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
+                       dir = cmd->sc_data_direction;
                        addr = pci_map_single(ahd->dev_softc,
                                              cmd->request_buffer,
                                              cmd->request_bufflen, dir);
index 031c6aaa5ca571456d1d4f8f3e0eabad56ef725e..d74b99dab7ec8d11003a176fa4685f2b9f85c4ae 100644 (file)
 #include "aic7xxx_osm.h"
 #include "aic7xxx_inline.h"
 #include <scsi/scsicam.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_spi.h>
+
+static struct scsi_transport_template *ahc_linux_transport_template = NULL;
 
 /*
  * Include aiclib.c as part of our
@@ -270,39 +274,6 @@ static adapter_tag_info_t aic7xxx_tag_info[] =
        {AIC7XXX_CONFIGED_TAG_COMMANDS}
 };
 
-/*
- * DV option:
- *
- * positive value = DV Enabled
- * zero                  = DV Disabled
- * negative value = DV Default for adapter type/seeprom
- */
-#ifdef CONFIG_AIC7XXX_DV_SETTING
-#define AIC7XXX_CONFIGED_DV CONFIG_AIC7XXX_DV_SETTING
-#else
-#define AIC7XXX_CONFIGED_DV -1
-#endif
-
-static int8_t aic7xxx_dv_settings[] =
-{
-       AIC7XXX_CONFIGED_DV,
-       AIC7XXX_CONFIGED_DV,
-       AIC7XXX_CONFIGED_DV,
-       AIC7XXX_CONFIGED_DV,
-       AIC7XXX_CONFIGED_DV,
-       AIC7XXX_CONFIGED_DV,
-       AIC7XXX_CONFIGED_DV,
-       AIC7XXX_CONFIGED_DV,
-       AIC7XXX_CONFIGED_DV,
-       AIC7XXX_CONFIGED_DV,
-       AIC7XXX_CONFIGED_DV,
-       AIC7XXX_CONFIGED_DV,
-       AIC7XXX_CONFIGED_DV,
-       AIC7XXX_CONFIGED_DV,
-       AIC7XXX_CONFIGED_DV,
-       AIC7XXX_CONFIGED_DV
-};
-
 /*
  * There should be a specific return value for this in scsi.h, but
  * it seems that most drivers ignore it.
@@ -450,7 +421,6 @@ MODULE_PARM_DESC(aic7xxx,
 "      tag_info:<tag_str>      Set per-target tag depth\n"
 "      global_tag_depth:<int>  Global tag depth for every target\n"
 "                              on every bus\n"
-"      dv:<dv_settings>        Set per-controller Domain Validation Setting.\n"
 "      seltime:<int>           Selection Timeout\n"
 "                              (0/256ms,1/128ms,2/64ms,3/32ms)\n"
 "\n"
@@ -467,7 +437,6 @@ static void ahc_linux_handle_scsi_status(struct ahc_softc *,
                                         struct scb *);
 static void ahc_linux_queue_cmd_complete(struct ahc_softc *ahc,
                                         Scsi_Cmnd *cmd);
-static void ahc_linux_filter_inquiry(struct ahc_softc*, struct ahc_devinfo*);
 static void ahc_linux_sem_timeout(u_long arg);
 static void ahc_linux_freeze_simq(struct ahc_softc *ahc);
 static void ahc_linux_release_simq(u_long arg);
@@ -476,49 +445,8 @@ static int  ahc_linux_queue_recovery_cmd(Scsi_Cmnd *cmd, scb_flag flag);
 static void ahc_linux_initialize_scsi_bus(struct ahc_softc *ahc);
 static void ahc_linux_size_nseg(void);
 static void ahc_linux_thread_run_complete_queue(struct ahc_softc *ahc);
-static void ahc_linux_start_dv(struct ahc_softc *ahc);
-static void ahc_linux_dv_timeout(struct scsi_cmnd *cmd);
-static int  ahc_linux_dv_thread(void *data);
-static void ahc_linux_kill_dv_thread(struct ahc_softc *ahc);
-static void ahc_linux_dv_target(struct ahc_softc *ahc, u_int target);
-static void ahc_linux_dv_transition(struct ahc_softc *ahc,
-                                   struct scsi_cmnd *cmd,
-                                   struct ahc_devinfo *devinfo,
-                                   struct ahc_linux_target *targ);
-static void ahc_linux_dv_fill_cmd(struct ahc_softc *ahc,
-                                 struct scsi_cmnd *cmd,
-                                 struct ahc_devinfo *devinfo);
-static void ahc_linux_dv_inq(struct ahc_softc *ahc,
-                            struct scsi_cmnd *cmd,
-                            struct ahc_devinfo *devinfo,
-                            struct ahc_linux_target *targ,
-                            u_int request_length);
-static void ahc_linux_dv_tur(struct ahc_softc *ahc,
-                            struct scsi_cmnd *cmd,
-                            struct ahc_devinfo *devinfo);
-static void ahc_linux_dv_rebd(struct ahc_softc *ahc,
-                             struct scsi_cmnd *cmd,
-                             struct ahc_devinfo *devinfo,
-                             struct ahc_linux_target *targ);
-static void ahc_linux_dv_web(struct ahc_softc *ahc,
-                            struct scsi_cmnd *cmd,
-                            struct ahc_devinfo *devinfo,
-                            struct ahc_linux_target *targ);
-static void ahc_linux_dv_reb(struct ahc_softc *ahc,
-                            struct scsi_cmnd *cmd,
-                            struct ahc_devinfo *devinfo,
-                            struct ahc_linux_target *targ);
-static void ahc_linux_dv_su(struct ahc_softc *ahc,
-                           struct scsi_cmnd *cmd,
-                           struct ahc_devinfo *devinfo,
-                           struct ahc_linux_target *targ);
-static int ahc_linux_fallback(struct ahc_softc *ahc,
-                             struct ahc_devinfo *devinfo);
-static void ahc_linux_dv_complete(Scsi_Cmnd *cmd);
-static void ahc_linux_generate_dv_pattern(struct ahc_linux_target *targ);
 static u_int ahc_linux_user_tagdepth(struct ahc_softc *ahc,
                                     struct ahc_devinfo *devinfo);
-static u_int ahc_linux_user_dv_setting(struct ahc_softc *ahc);
 static void ahc_linux_device_queue_depth(struct ahc_softc *ahc,
                                         struct ahc_linux_device *dev);
 static struct ahc_linux_target*        ahc_linux_alloc_target(struct ahc_softc*,
@@ -534,7 +462,6 @@ static void ahc_linux_run_device_queue(struct ahc_softc*,
                                       struct ahc_linux_device*);
 static void ahc_linux_setup_tag_info_global(char *p);
 static aic_option_callback_t ahc_linux_setup_tag_info;
-static aic_option_callback_t ahc_linux_setup_dv;
 static int  aic7xxx_setup(char *s);
 static int  ahc_linux_next_unit(void);
 static void ahc_runq_tasklet(unsigned long data);
@@ -663,8 +590,7 @@ ahc_linux_next_device_to_run(struct ahc_softc *ahc)
 {
        
        if ((ahc->flags & AHC_RESOURCE_SHORTAGE) != 0
-        || (ahc->platform_data->qfrozen != 0
-         && AHC_DV_SIMQ_FROZEN(ahc) == 0))
+           || (ahc->platform_data->qfrozen != 0))
                return (NULL);
        return (TAILQ_FIRST(&ahc->platform_data->device_runq));
 }
@@ -693,12 +619,12 @@ ahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb)
 
                sg = (struct scatterlist *)cmd->request_buffer;
                pci_unmap_sg(ahc->dev_softc, sg, cmd->use_sg,
-                            scsi_to_pci_dma_dir(cmd->sc_data_direction));
+                            cmd->sc_data_direction);
        } else if (cmd->request_bufflen != 0) {
                pci_unmap_single(ahc->dev_softc,
                                 scb->platform_data->buf_busaddr,
                                 cmd->request_bufflen,
-                                scsi_to_pci_dma_dir(cmd->sc_data_direction));
+                                cmd->sc_data_direction);
        }
 }
 
@@ -962,8 +888,7 @@ ahc_linux_queue(Scsi_Cmnd * cmd, void (*scsi_done) (Scsi_Cmnd *))
         * DV commands through so long as we are only frozen to
         * perform DV.
         */
-       if (ahc->platform_data->qfrozen != 0
-        && AHC_DV_CMD(cmd) == 0) {
+       if (ahc->platform_data->qfrozen != 0) {
 
                ahc_cmd_set_transaction_status(cmd, CAM_REQUEUE_REQ);
                ahc_linux_queue_cmd_complete(ahc, cmd);
@@ -1030,6 +955,11 @@ ahc_linux_slave_configure(Scsi_Device *device)
                ahc_linux_device_queue_depth(ahc, dev);
        }
        ahc_midlayer_entrypoint_unlock(ahc, &flags);
+
+       /* Initial Domain Validation */
+       if (!spi_initial_dv(device->sdev_target))
+               spi_dv_device(device);
+
        return (0);
 }
 
@@ -1545,18 +1475,6 @@ ahc_linux_setup_tag_info(u_long arg, int instance, int targ, int32_t value)
        }
 }
 
-static void
-ahc_linux_setup_dv(u_long arg, int instance, int targ, int32_t value)
-{
-
-       if ((instance >= 0)
-        && (instance < NUM_ELEMENTS(aic7xxx_dv_settings))) {
-               aic7xxx_dv_settings[instance] = value;
-               if (bootverbose)
-                       printf("dv[%d] = %d\n", instance, value);
-       }
-}
-
 /*
  * Handle Linux boot parameters. This routine allows for assigning a value
  * to a parameter with a ':' between the parameter and the value.
@@ -1616,9 +1534,6 @@ aic7xxx_setup(char *s)
                } else if (strncmp(p, "tag_info", n) == 0) {
                        s = aic_parse_brace_option("tag_info", p + n, end,
                            2, ahc_linux_setup_tag_info, 0);
-               } else if (strncmp(p, "dv", n) == 0) {
-                       s = aic_parse_brace_option("dv", p + n, end, 1,
-                           ahc_linux_setup_dv, 0);
                } else if (p[n] == ':') {
                        *(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0);
                } else if (strncmp(p, "verbose", n) == 0) {
@@ -1641,7 +1556,6 @@ ahc_linux_register_host(struct ahc_softc *ahc, Scsi_Host_Template *template)
        struct   Scsi_Host *host;
        char    *new_name;
        u_long   s;
-       u_int    targ_offset;
 
        template->name = ahc->description;
        host = scsi_host_alloc(template, sizeof(struct ahc_softc *));
@@ -1677,57 +1591,11 @@ ahc_linux_register_host(struct ahc_softc *ahc, Scsi_Host_Template *template)
        scsi_set_pci_device(host, ahc->dev_softc);
 #endif
        ahc_linux_initialize_scsi_bus(ahc);
-       ahc_unlock(ahc, &s);
-       ahc->platform_data->dv_pid = kernel_thread(ahc_linux_dv_thread, ahc, 0);
-       ahc_lock(ahc, &s);
-       if (ahc->platform_data->dv_pid < 0) {
-               printf("%s: Failed to create DV thread, error= %d\n",
-                      ahc_name(ahc), ahc->platform_data->dv_pid);
-               return (-ahc->platform_data->dv_pid);
-       }
-       /*
-        * Initially allocate *all* of our linux target objects
-        * so that the DV thread will scan them all in parallel
-        * just after driver initialization.  Any device that
-        * does not exist will have its target object destroyed
-        * by the selection timeout handler.  In the case of a
-        * device that appears after the initial DV scan, async
-        * negotiation will occur for the first command, and DV
-        * will comence should that first command be successful.
-        */
-       for (targ_offset = 0;
-            targ_offset < host->max_id * (host->max_channel + 1);
-            targ_offset++) {
-               u_int channel;
-               u_int target;
-
-               channel = 0;
-               target = targ_offset;
-               if (target > 7
-                && (ahc->features & AHC_TWIN) != 0) {
-                       channel = 1;
-                       target &= 0x7;
-               }
-               /*
-                * Skip our own ID.  Some Compaq/HP storage devices
-                * have enclosure management devices that respond to
-                * single bit selection (i.e. selecting ourselves).
-                * It is expected that either an external application
-                * or a modified kernel will be used to probe this
-                * ID if it is appropriate.  To accommodate these
-                * installations, ahc_linux_alloc_target() will allocate
-                * for our ID if asked to do so.
-                */
-               if ((channel == 0 && target == ahc->our_id)
-                || (channel == 1 && target == ahc->our_id_b))
-                       continue;
-
-               ahc_linux_alloc_target(ahc, channel, target);
-       }
        ahc_intr_enable(ahc, TRUE);
-       ahc_linux_start_dv(ahc);
        ahc_unlock(ahc, &s);
 
+       host->transportt = ahc_linux_transport_template;
+
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
        scsi_add_host(host, (ahc->dev_softc ? &ahc->dev_softc->dev : NULL)); /* XXX handle failure */
        scsi_scan_host(host);
@@ -1860,8 +1728,6 @@ ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
        ahc->platform_data->completeq_timer.function =
            (ahc_linux_callback_t *)ahc_linux_thread_run_complete_queue;
        init_MUTEX_LOCKED(&ahc->platform_data->eh_sem);
-       init_MUTEX_LOCKED(&ahc->platform_data->dv_sem);
-       init_MUTEX_LOCKED(&ahc->platform_data->dv_cmd_sem);
        tasklet_init(&ahc->platform_data->runq_tasklet, ahc_runq_tasklet,
                     (unsigned long)ahc);
        ahc->seltime = (aic7xxx_seltime & 0x3) << 4;
@@ -1881,7 +1747,6 @@ ahc_platform_free(struct ahc_softc *ahc)
 
        if (ahc->platform_data != NULL) {
                del_timer_sync(&ahc->platform_data->completeq_timer);
-               ahc_linux_kill_dv_thread(ahc);
                tasklet_kill(&ahc->platform_data->runq_tasklet);
                if (ahc->platform_data->host != NULL) {
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
@@ -2120,1571 +1985,200 @@ ahc_linux_thread_run_complete_queue(struct ahc_softc *ahc)
        ahc_unlock(ahc, &flags);
 }
 
-static void
-ahc_linux_start_dv(struct ahc_softc *ahc)
+static u_int
+ahc_linux_user_tagdepth(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
 {
+       static int warned_user;
+       u_int tags;
 
-       /*
-        * Freeze the simq and signal ahc_linux_queue to not let any
-        * more commands through.
-        */
-       if ((ahc->platform_data->flags & AHC_DV_ACTIVE) == 0) {
-#ifdef AHC_DEBUG
-               if (ahc_debug & AHC_SHOW_DV)
-                       printf("%s: Waking DV thread\n", ahc_name(ahc));
-#endif
+       tags = 0;
+       if ((ahc->user_discenable & devinfo->target_mask) != 0) {
+               if (ahc->unit >= NUM_ELEMENTS(aic7xxx_tag_info)) {
+                       if (warned_user == 0) {
 
-               ahc->platform_data->flags |= AHC_DV_ACTIVE;
-               ahc_linux_freeze_simq(ahc);
+                               printf(KERN_WARNING
+"aic7xxx: WARNING: Insufficient tag_info instances\n"
+"aic7xxx: for installed controllers. Using defaults\n"
+"aic7xxx: Please update the aic7xxx_tag_info array in\n"
+"aic7xxx: the aic7xxx_osm..c source file.\n");
+                               warned_user++;
+                       }
+                       tags = AHC_MAX_QUEUE;
+               } else {
+                       adapter_tag_info_t *tag_info;
 
-               /* Wake up the DV kthread */
-               up(&ahc->platform_data->dv_sem);
+                       tag_info = &aic7xxx_tag_info[ahc->unit];
+                       tags = tag_info->tag_commands[devinfo->target_offset];
+                       if (tags > AHC_MAX_QUEUE)
+                               tags = AHC_MAX_QUEUE;
+               }
        }
+       return (tags);
 }
 
+/*
+ * Determines the queue depth for a given device.
+ */
 static void
-ahc_linux_kill_dv_thread(struct ahc_softc *ahc)
+ahc_linux_device_queue_depth(struct ahc_softc *ahc,
+                            struct ahc_linux_device *dev)
 {
-       u_long s;
-
-       ahc_lock(ahc, &s);
-       if (ahc->platform_data->dv_pid != 0) {
-               ahc->platform_data->flags |= AHC_DV_SHUTDOWN;
-               ahc_unlock(ahc, &s);
-               up(&ahc->platform_data->dv_sem);
+       struct  ahc_devinfo devinfo;
+       u_int   tags;
 
-               /*
-                * Use the eh_sem as an indicator that the
-                * dv thread is exiting.  Note that the dv
-                * thread must still return after performing
-                * the up on our semaphore before it has
-                * completely exited this module.  Unfortunately,
-                * there seems to be no easy way to wait for the
-                * exit of a thread for which you are not the
-                * parent (dv threads are parented by init).
-                * Cross your fingers...
-                */
-               down(&ahc->platform_data->eh_sem);
+       ahc_compile_devinfo(&devinfo,
+                           dev->target->channel == 0
+                         ? ahc->our_id : ahc->our_id_b,
+                           dev->target->target, dev->lun,
+                           dev->target->channel == 0 ? 'A' : 'B',
+                           ROLE_INITIATOR);
+       tags = ahc_linux_user_tagdepth(ahc, &devinfo);
+       if (tags != 0
+        && dev->scsi_device != NULL
+        && dev->scsi_device->tagged_supported != 0) {
 
-               /*
-                * Mark the dv thread as already dead.  This
-                * avoids attempting to kill it a second time.
-                * This is necessary because we must kill the
-                * DV thread before calling ahc_free() in the
-                * module shutdown case to avoid bogus locking
-                * in the SCSI mid-layer, but we ahc_free() is
-                * called without killing the DV thread in the
-                * instance detach case, so ahc_platform_free()
-                * calls us again to verify that the DV thread
-                * is dead.
-                */
-               ahc->platform_data->dv_pid = 0;
+               ahc_set_tags(ahc, &devinfo, AHC_QUEUE_TAGGED);
+               ahc_print_devinfo(ahc, &devinfo);
+               printf("Tagged Queuing enabled.  Depth %d\n", tags);
        } else {
-               ahc_unlock(ahc, &s);
+               ahc_set_tags(ahc, &devinfo, AHC_QUEUE_NONE);
        }
 }
 
-static int
-ahc_linux_dv_thread(void *data)
+static void
+ahc_linux_run_device_queue(struct ahc_softc *ahc, struct ahc_linux_device *dev)
 {
-       struct  ahc_softc *ahc;
-       int     target;
-       u_long  s;
-
-       ahc = (struct ahc_softc *)data;
-
-#ifdef AHC_DEBUG
-       if (ahc_debug & AHC_SHOW_DV)
-               printf("Launching DV Thread\n");
-#endif
-
-       /*
-        * Complete thread creation.
-        */
-       lock_kernel();
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
-       /*
-        * Don't care about any signals.
-        */
-       siginitsetinv(&current->blocked, 0);
-
-       daemonize();
-       sprintf(current->comm, "ahc_dv_%d", ahc->unit);
-#else
-       daemonize("ahc_dv_%d", ahc->unit);
-       current->flags |= PF_FREEZE;
-#endif
-       unlock_kernel();
-
-       while (1) {
-               /*
-                * Use down_interruptible() rather than down() to
-                * avoid inclusion in the load average.
-                */
-               down_interruptible(&ahc->platform_data->dv_sem);
+       struct   ahc_cmd *acmd;
+       struct   scsi_cmnd *cmd;
+       struct   scb *scb;
+       struct   hardware_scb *hscb;
+       struct   ahc_initiator_tinfo *tinfo;
+       struct   ahc_tmode_tstate *tstate;
+       uint16_t mask;
 
-               /* Check to see if we've been signaled to exit */
-               ahc_lock(ahc, &s);
-               if ((ahc->platform_data->flags & AHC_DV_SHUTDOWN) != 0) {
-                       ahc_unlock(ahc, &s);
-                       break;
-               }
-               ahc_unlock(ahc, &s);
+       if ((dev->flags & AHC_DEV_ON_RUN_LIST) != 0)
+               panic("running device on run list");
 
-#ifdef AHC_DEBUG
-               if (ahc_debug & AHC_SHOW_DV)
-                       printf("%s: Beginning Domain Validation\n",
-                              ahc_name(ahc));
-#endif
+       while ((acmd = TAILQ_FIRST(&dev->busyq)) != NULL
+           && dev->openings > 0 && dev->qfrozen == 0) {
 
                /*
-                * Wait for any pending commands to drain before proceeding.
+                * Schedule us to run later.  The only reason we are not
+                * running is because the whole controller Q is frozen.
                 */
-               ahc_lock(ahc, &s);
-               while (LIST_FIRST(&ahc->pending_scbs) != NULL) {
-                       ahc->platform_data->flags |= AHC_DV_WAIT_SIMQ_EMPTY;
-                       ahc_unlock(ahc, &s);
-                       down_interruptible(&ahc->platform_data->dv_sem);
-                       ahc_lock(ahc, &s);
+               if (ahc->platform_data->qfrozen != 0) {
+                       TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq,
+                                         dev, links);
+                       dev->flags |= AHC_DEV_ON_RUN_LIST;
+                       return;
                }
-
                /*
-                * Wait for the SIMQ to be released so that DV is the
-                * only reason the queue is frozen.
+                * Get an scb to use.
                 */
-               while (AHC_DV_SIMQ_FROZEN(ahc) == 0) {
-                       ahc->platform_data->flags |= AHC_DV_WAIT_SIMQ_RELEASE;
-                       ahc_unlock(ahc, &s);
-                       down_interruptible(&ahc->platform_data->dv_sem);
-                       ahc_lock(ahc, &s);
+               if ((scb = ahc_get_scb(ahc)) == NULL) {
+                       TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq,
+                                        dev, links);
+                       dev->flags |= AHC_DEV_ON_RUN_LIST;
+                       ahc->flags |= AHC_RESOURCE_SHORTAGE;
+                       return;
                }
-               ahc_unlock(ahc, &s);
-
-               for (target = 0; target < AHC_NUM_TARGETS; target++)
-                       ahc_linux_dv_target(ahc, target);
-
-               ahc_lock(ahc, &s);
-               ahc->platform_data->flags &= ~AHC_DV_ACTIVE;
-               ahc_unlock(ahc, &s);
+               TAILQ_REMOVE(&dev->busyq, acmd, acmd_links.tqe);
+               cmd = &acmd_scsi_cmd(acmd);
+               scb->io_ctx = cmd;
+               scb->platform_data->dev = dev;
+               hscb = scb->hscb;
+               cmd->host_scribble = (char *)scb;
 
                /*
-                * Release the SIMQ so that normal commands are
-                * allowed to continue on the bus.
+                * Fill out basics of the HSCB.
                 */
-               ahc_linux_release_simq((u_long)ahc);
-       }
-       up(&ahc->platform_data->eh_sem);
-       return (0);
-}
+               hscb->control = 0;
+               hscb->scsiid = BUILD_SCSIID(ahc, cmd);
+               hscb->lun = cmd->device->lun;
+               mask = SCB_GET_TARGET_MASK(ahc, scb);
+               tinfo = ahc_fetch_transinfo(ahc, SCB_GET_CHANNEL(ahc, scb),
+                                           SCB_GET_OUR_ID(scb),
+                                           SCB_GET_TARGET(ahc, scb), &tstate);
+               hscb->scsirate = tinfo->scsirate;
+               hscb->scsioffset = tinfo->curr.offset;
+               if ((tstate->ultraenb & mask) != 0)
+                       hscb->control |= ULTRAENB;
 
-#define AHC_LINUX_DV_INQ_SHORT_LEN     36
-#define AHC_LINUX_DV_INQ_LEN           256
-#define AHC_LINUX_DV_TIMEOUT           (HZ / 4)
+               if ((ahc->user_discenable & mask) != 0)
+                       hscb->control |= DISCENB;
 
-#define AHC_SET_DV_STATE(ahc, targ, newstate) \
-       ahc_set_dv_state(ahc, targ, newstate, __LINE__)
+               if ((tstate->auto_negotiate & mask) != 0) {
+                       scb->flags |= SCB_AUTO_NEGOTIATE;
+                       scb->hscb->control |= MK_MESSAGE;
+               }
 
-static __inline void
-ahc_set_dv_state(struct ahc_softc *ahc, struct ahc_linux_target *targ,
-                ahc_dv_state newstate, u_int line)
-{
-       ahc_dv_state oldstate;
+               if ((dev->flags & (AHC_DEV_Q_TAGGED|AHC_DEV_Q_BASIC)) != 0) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+                       int     msg_bytes;
+                       uint8_t tag_msgs[2];
 
-       oldstate = targ->dv_state;
-#ifdef AHC_DEBUG
-       if (ahc_debug & AHC_SHOW_DV)
-               printf("%s:%d: Going from state %d to state %d\n",
-                      ahc_name(ahc), line, oldstate, newstate);
+                       msg_bytes = scsi_populate_tag_msg(cmd, tag_msgs);
+                       if (msg_bytes && tag_msgs[0] != MSG_SIMPLE_TASK) {
+                               hscb->control |= tag_msgs[0];
+                               if (tag_msgs[0] == MSG_ORDERED_TASK)
+                                       dev->commands_since_idle_or_otag = 0;
+                       } else
 #endif
+                       if (dev->commands_since_idle_or_otag == AHC_OTAG_THRESH
+                        && (dev->flags & AHC_DEV_Q_TAGGED) != 0) {
+                               hscb->control |= MSG_ORDERED_TASK;
+                               dev->commands_since_idle_or_otag = 0;
+                       } else {
+                               hscb->control |= MSG_SIMPLE_TASK;
+                       }
+               }
 
-       if (oldstate == newstate)
-               targ->dv_state_retry++;
-       else
-               targ->dv_state_retry = 0;
-       targ->dv_state = newstate;
-}
+               hscb->cdb_len = cmd->cmd_len;
+               if (hscb->cdb_len <= 12) {
+                       memcpy(hscb->shared_data.cdb, cmd->cmnd, hscb->cdb_len);
+               } else {
+                       memcpy(hscb->cdb32, cmd->cmnd, hscb->cdb_len);
+                       scb->flags |= SCB_CDB32_PTR;
+               }
 
-static void
-ahc_linux_dv_target(struct ahc_softc *ahc, u_int target_offset)
-{
-       struct   ahc_devinfo devinfo;
-       struct   ahc_linux_target *targ;
-       struct   scsi_cmnd *cmd;
-       struct   scsi_device *scsi_dev;
-       struct   scsi_sense_data *sense;
-       uint8_t *buffer;
-       u_long   s;
-       u_int    timeout;
-       int      echo_size;
+               scb->platform_data->xfer_len = 0;
+               ahc_set_residual(scb, 0);
+               ahc_set_sense_residual(scb, 0);
+               scb->sg_count = 0;
+               if (cmd->use_sg != 0) {
+                       struct  ahc_dma_seg *sg;
+                       struct  scatterlist *cur_seg;
+                       struct  scatterlist *end_seg;
+                       int     nseg;
 
-       sense = NULL;
-       buffer = NULL;
-       echo_size = 0;
-       ahc_lock(ahc, &s);
-       targ = ahc->platform_data->targets[target_offset];
-       if (targ == NULL || (targ->flags & AHC_DV_REQUIRED) == 0) {
-               ahc_unlock(ahc, &s);
-               return;
-       }
-       ahc_compile_devinfo(&devinfo,
-                           targ->channel == 0 ? ahc->our_id : ahc->our_id_b,
-                           targ->target, /*lun*/0, targ->channel + 'A',
-                           ROLE_INITIATOR);
-#ifdef AHC_DEBUG
-       if (ahc_debug & AHC_SHOW_DV) {
-               ahc_print_devinfo(ahc, &devinfo);
-               printf("Performing DV\n");
-       }
-#endif
+                       cur_seg = (struct scatterlist *)cmd->request_buffer;
+                       nseg = pci_map_sg(ahc->dev_softc, cur_seg, cmd->use_sg,
+                           cmd->sc_data_direction);
+                       end_seg = cur_seg + nseg;
+                       /* Copy the segments into the SG list. */
+                       sg = scb->sg_list;
+                       /*
+                        * The sg_count may be larger than nseg if
+                        * a transfer crosses a 32bit page.
+                        */ 
+                       while (cur_seg < end_seg) {
+                               dma_addr_t addr;
+                               bus_size_t len;
+                               int consumed;
 
-       ahc_unlock(ahc, &s);
+                               addr = sg_dma_address(cur_seg);
+                               len = sg_dma_len(cur_seg);
+                               consumed = ahc_linux_map_seg(ahc, scb,
+                                                            sg, addr, len);
+                               sg += consumed;
+                               scb->sg_count += consumed;
+                               cur_seg++;
+                       }
+                       sg--;
+                       sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
 
-       cmd = malloc(sizeof(struct scsi_cmnd), M_DEVBUF, M_WAITOK);
-       scsi_dev = malloc(sizeof(struct scsi_device), M_DEVBUF, M_WAITOK);
-       scsi_dev->host = ahc->platform_data->host;
-       scsi_dev->id = devinfo.target;
-       scsi_dev->lun = devinfo.lun;
-       scsi_dev->channel = devinfo.channel - 'A';
-       ahc->platform_data->dv_scsi_dev = scsi_dev;
-
-       AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_INQ_SHORT_ASYNC);
-
-       while (targ->dv_state != AHC_DV_STATE_EXIT) {
-               timeout = AHC_LINUX_DV_TIMEOUT;
-               switch (targ->dv_state) {
-               case AHC_DV_STATE_INQ_SHORT_ASYNC:
-               case AHC_DV_STATE_INQ_ASYNC:
-               case AHC_DV_STATE_INQ_ASYNC_VERIFY:
                        /*
-                        * Set things to async narrow to reduce the
-                        * chance that the INQ will fail.
-                        */
-                       ahc_lock(ahc, &s);
-                       ahc_set_syncrate(ahc, &devinfo, NULL, 0, 0, 0,
-                                        AHC_TRANS_GOAL, /*paused*/FALSE);
-                       ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
-                                     AHC_TRANS_GOAL, /*paused*/FALSE);
-                       ahc_unlock(ahc, &s);
-                       timeout = 10 * HZ;
-                       targ->flags &= ~AHC_INQ_VALID;
-                       /* FALLTHROUGH */
-               case AHC_DV_STATE_INQ_VERIFY:
-               {
-                       u_int inq_len;
-
-                       if (targ->dv_state == AHC_DV_STATE_INQ_SHORT_ASYNC)
-                               inq_len = AHC_LINUX_DV_INQ_SHORT_LEN;
-                       else
-                               inq_len = targ->inq_data->additional_length + 5;
-                       ahc_linux_dv_inq(ahc, cmd, &devinfo, targ, inq_len);
-                       break;
-               }
-               case AHC_DV_STATE_TUR:
-               case AHC_DV_STATE_BUSY:
-                       timeout = 5 * HZ;
-                       ahc_linux_dv_tur(ahc, cmd, &devinfo);
-                       break;
-               case AHC_DV_STATE_REBD:
-                       ahc_linux_dv_rebd(ahc, cmd, &devinfo, targ);
-                       break;
-               case AHC_DV_STATE_WEB:
-                       ahc_linux_dv_web(ahc, cmd, &devinfo, targ);
-                       break;
-
-               case AHC_DV_STATE_REB:
-                       ahc_linux_dv_reb(ahc, cmd, &devinfo, targ);
-                       break;
-
-               case AHC_DV_STATE_SU:
-                       ahc_linux_dv_su(ahc, cmd, &devinfo, targ);
-                       timeout = 50 * HZ;
-                       break;
-
-               default:
-                       ahc_print_devinfo(ahc, &devinfo);
-                       printf("Unknown DV state %d\n", targ->dv_state);
-                       goto out;
-               }
-
-               /* Queue the command and wait for it to complete */
-               /* Abuse eh_timeout in the scsi_cmnd struct for our purposes */
-               init_timer(&cmd->eh_timeout);
-#ifdef AHC_DEBUG
-               if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
-                       /*
-                        * All of the printfs during negotiation
-                        * really slow down the negotiation.
-                        * Add a bit of time just to be safe.
-                        */
-                       timeout += HZ;
-#endif
-               scsi_add_timer(cmd, timeout, ahc_linux_dv_timeout);
-               /*
-                * In 2.5.X, it is assumed that all calls from the
-                * "midlayer" (which we are emulating) will have the
-                * ahc host lock held.  For other kernels, the
-                * io_request_lock must be held.
-                */
-#if AHC_SCSI_HAS_HOST_LOCK != 0
-               ahc_lock(ahc, &s);
-#else
-               spin_lock_irqsave(&io_request_lock, s);
-#endif
-               ahc_linux_queue(cmd, ahc_linux_dv_complete);
-#if AHC_SCSI_HAS_HOST_LOCK != 0
-               ahc_unlock(ahc, &s);
-#else
-               spin_unlock_irqrestore(&io_request_lock, s);
-#endif
-               down_interruptible(&ahc->platform_data->dv_cmd_sem);
-               /*
-                * Wait for the SIMQ to be released so that DV is the
-                * only reason the queue is frozen.
-                */
-               ahc_lock(ahc, &s);
-               while (AHC_DV_SIMQ_FROZEN(ahc) == 0) {
-                       ahc->platform_data->flags |= AHC_DV_WAIT_SIMQ_RELEASE;
-                       ahc_unlock(ahc, &s);
-                       down_interruptible(&ahc->platform_data->dv_sem);
-                       ahc_lock(ahc, &s);
-               }
-               ahc_unlock(ahc, &s);
-
-               ahc_linux_dv_transition(ahc, cmd, &devinfo, targ);
-       }
-
-out:
-       if ((targ->flags & AHC_INQ_VALID) != 0
-        && ahc_linux_get_device(ahc, devinfo.channel - 'A',
-                                devinfo.target, devinfo.lun,
-                                /*alloc*/FALSE) == NULL) {
-               /*
-                * The DV state machine failed to configure this device.  
-                * This is normal if DV is disabled.  Since we have inquiry
-                * data, filter it and use the "optimistic" negotiation
-                * parameters found in the inquiry string.
-                */
-               ahc_linux_filter_inquiry(ahc, &devinfo);
-               if ((targ->flags & (AHC_BASIC_DV|AHC_ENHANCED_DV)) != 0) {
-                       ahc_print_devinfo(ahc, &devinfo);
-                       printf("DV failed to configure device.  "
-                              "Please file a bug report against "
-                              "this driver.\n");
-               }
-       }
-
-       if (cmd != NULL)
-               free(cmd, M_DEVBUF);
-
-       if (ahc->platform_data->dv_scsi_dev != NULL) {
-               free(ahc->platform_data->dv_scsi_dev, M_DEVBUF);
-               ahc->platform_data->dv_scsi_dev = NULL;
-       }
-
-       ahc_lock(ahc, &s);
-       if (targ->dv_buffer != NULL) {
-               free(targ->dv_buffer, M_DEVBUF);
-               targ->dv_buffer = NULL;
-       }
-       if (targ->dv_buffer1 != NULL) {
-               free(targ->dv_buffer1, M_DEVBUF);
-               targ->dv_buffer1 = NULL;
-       }
-       targ->flags &= ~AHC_DV_REQUIRED;
-       if (targ->refcount == 0)
-               ahc_linux_free_target(ahc, targ);
-       ahc_unlock(ahc, &s);
-}
-
-static void
-ahc_linux_dv_transition(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
-                       struct ahc_devinfo *devinfo,
-                       struct ahc_linux_target *targ)
-{
-       u_int32_t status;
-
-       status = aic_error_action(cmd, targ->inq_data,
-                                 ahc_cmd_get_transaction_status(cmd),
-                                 ahc_cmd_get_scsi_status(cmd));
-       
-#ifdef AHC_DEBUG
-       if (ahc_debug & AHC_SHOW_DV) {
-               ahc_print_devinfo(ahc, devinfo);
-               printf("Entering ahc_linux_dv_transition, state= %d, "
-                      "status= 0x%x, cmd->result= 0x%x\n", targ->dv_state,
-                      status, cmd->result);
-       }
-#endif
-
-       switch (targ->dv_state) {
-       case AHC_DV_STATE_INQ_SHORT_ASYNC:
-       case AHC_DV_STATE_INQ_ASYNC:
-               switch (status & SS_MASK) {
-               case SS_NOP:
-               {
-                       AHC_SET_DV_STATE(ahc, targ, targ->dv_state+1);
-                       break;
-               }
-               case SS_INQ_REFRESH:
-                       AHC_SET_DV_STATE(ahc, targ,
-                                        AHC_DV_STATE_INQ_SHORT_ASYNC);
-                       break;
-               case SS_TUR:
-               case SS_RETRY:
-                       AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
-                       if (ahc_cmd_get_transaction_status(cmd)
-                        == CAM_REQUEUE_REQ)
-                               targ->dv_state_retry--;
-                       if ((status & SS_ERRMASK) == EBUSY)
-                               AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_BUSY);
-                       if (targ->dv_state_retry < 10)
-                               break;
-                       /* FALLTHROUGH */
-               default:
-                       AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
-#ifdef AHC_DEBUG
-                       if (ahc_debug & AHC_SHOW_DV) {
-                               ahc_print_devinfo(ahc, devinfo);
-                               printf("Failed DV inquiry, skipping\n");
-                       }
-#endif
-                       break;
-               }
-               break;
-       case AHC_DV_STATE_INQ_ASYNC_VERIFY:
-               switch (status & SS_MASK) {
-               case SS_NOP:
-               {
-                       u_int xportflags;
-                       u_int spi3data;
-
-                       if (memcmp(targ->inq_data, targ->dv_buffer,
-                                  AHC_LINUX_DV_INQ_LEN) != 0) {
-                               /*
-                                * Inquiry data must have changed.
-                                * Try from the top again.
-                                */
-                               AHC_SET_DV_STATE(ahc, targ,
-                                                AHC_DV_STATE_INQ_SHORT_ASYNC);
-                               break;
-                       }
-
-                       AHC_SET_DV_STATE(ahc, targ, targ->dv_state+1);
-                       targ->flags |= AHC_INQ_VALID;
-                       if (ahc_linux_user_dv_setting(ahc) == 0)
-                               break;
-
-                       xportflags = targ->inq_data->flags;
-                       if ((xportflags & (SID_Sync|SID_WBus16)) == 0)
-                               break;
-
-                       spi3data = targ->inq_data->spi3data;
-                       switch (spi3data & SID_SPI_CLOCK_DT_ST) {
-                       default:
-                       case SID_SPI_CLOCK_ST:
-                               /* Assume only basic DV is supported. */
-                               targ->flags |= AHC_BASIC_DV;
-                               break;
-                       case SID_SPI_CLOCK_DT:
-                       case SID_SPI_CLOCK_DT_ST:
-                               targ->flags |= AHC_ENHANCED_DV;
-                               break;
-                       }
-                       break;
-               }
-               case SS_INQ_REFRESH:
-                       AHC_SET_DV_STATE(ahc, targ,
-                                        AHC_DV_STATE_INQ_SHORT_ASYNC);
-                       break;
-               case SS_TUR:
-               case SS_RETRY:
-                       AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
-                       if (ahc_cmd_get_transaction_status(cmd)
-                        == CAM_REQUEUE_REQ)
-                               targ->dv_state_retry--;
-
-                       if ((status & SS_ERRMASK) == EBUSY)
-                               AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_BUSY);
-                       if (targ->dv_state_retry < 10)
-                               break;
-                       /* FALLTHROUGH */
-               default:
-                       AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
-#ifdef AHC_DEBUG
-                       if (ahc_debug & AHC_SHOW_DV) {
-                               ahc_print_devinfo(ahc, devinfo);
-                               printf("Failed DV inquiry, skipping\n");
-                       }
-#endif
-                       break;
-               }
-               break;
-       case AHC_DV_STATE_INQ_VERIFY:
-               switch (status & SS_MASK) {
-               case SS_NOP:
-               {
-
-                       if (memcmp(targ->inq_data, targ->dv_buffer,
-                                  AHC_LINUX_DV_INQ_LEN) == 0) {
-                               AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
-                               break;
-                       }
-#ifdef AHC_DEBUG
-                       if (ahc_debug & AHC_SHOW_DV) {
-                               int i;
-
-                               ahc_print_devinfo(ahc, devinfo);
-                               printf("Inquiry buffer mismatch:");
-                               for (i = 0; i < AHC_LINUX_DV_INQ_LEN; i++) {
-                                       if ((i & 0xF) == 0)
-                                               printf("\n        ");
-                                       printf("0x%x:0x0%x ",
-                                              ((uint8_t *)targ->inq_data)[i], 
-                                              targ->dv_buffer[i]);
-                               }
-                               printf("\n");
-                       }
-#endif
-
-                       if (ahc_linux_fallback(ahc, devinfo) != 0) {
-                               AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
-                               break;
-                       }
-                       /*
-                        * Do not count "falling back"
-                        * against our retries.
-                        */
-                       targ->dv_state_retry = 0;
-                       AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
-                       break;
-               }
-               case SS_INQ_REFRESH:
-                       AHC_SET_DV_STATE(ahc, targ,
-                                        AHC_DV_STATE_INQ_SHORT_ASYNC);
-                       break;
-               case SS_TUR:
-               case SS_RETRY:
-                       AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
-                       if (ahc_cmd_get_transaction_status(cmd)
-                        == CAM_REQUEUE_REQ) {
-                               targ->dv_state_retry--;
-                       } else if ((status & SSQ_FALLBACK) != 0) {
-                               if (ahc_linux_fallback(ahc, devinfo) != 0) {
-                                       AHC_SET_DV_STATE(ahc, targ,
-                                                        AHC_DV_STATE_EXIT);
-                                       break;
-                               }
-                               /*
-                                * Do not count "falling back"
-                                * against our retries.
-                                */
-                               targ->dv_state_retry = 0;
-                       } else if ((status & SS_ERRMASK) == EBUSY)
-                               AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_BUSY);
-                       if (targ->dv_state_retry < 10)
-                               break;
-                       /* FALLTHROUGH */
-               default:
-                       AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
-#ifdef AHC_DEBUG
-                       if (ahc_debug & AHC_SHOW_DV) {
-                               ahc_print_devinfo(ahc, devinfo);
-                               printf("Failed DV inquiry, skipping\n");
-                       }
-#endif
-                       break;
-               }
-               break;
-
-       case AHC_DV_STATE_TUR:
-               switch (status & SS_MASK) {
-               case SS_NOP:
-                       if ((targ->flags & AHC_BASIC_DV) != 0) {
-                               ahc_linux_filter_inquiry(ahc, devinfo);
-                               AHC_SET_DV_STATE(ahc, targ,
-                                                AHC_DV_STATE_INQ_VERIFY);
-                       } else if ((targ->flags & AHC_ENHANCED_DV) != 0) {
-                               AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_REBD);
-                       } else {
-                               AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
-                       }
-                       break;
-               case SS_RETRY:
-               case SS_TUR:
-                       if ((status & SS_ERRMASK) == EBUSY) {
-                               AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_BUSY);
-                               break;
-                       }
-                       AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
-                       if (ahc_cmd_get_transaction_status(cmd)
-                        == CAM_REQUEUE_REQ) {
-                               targ->dv_state_retry--;
-                       } else if ((status & SSQ_FALLBACK) != 0) {
-                               if (ahc_linux_fallback(ahc, devinfo) != 0) {
-                                       AHC_SET_DV_STATE(ahc, targ,
-                                                        AHC_DV_STATE_EXIT);
-                                       break;
-                               }
-                               /*
-                                * Do not count "falling back"
-                                * against our retries.
-                                */
-                               targ->dv_state_retry = 0;
-                       }
-                       if (targ->dv_state_retry >= 10) {
-#ifdef AHC_DEBUG
-                               if (ahc_debug & AHC_SHOW_DV) {
-                                       ahc_print_devinfo(ahc, devinfo);
-                                       printf("DV TUR reties exhausted\n");
-                               }
-#endif
-                               AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
-                               break;
-                       }
-                       if (status & SSQ_DELAY)
-                               ssleep(1);
-
-                       break;
-               case SS_START:
-                       AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_SU);
-                       break;
-               case SS_INQ_REFRESH:
-                       AHC_SET_DV_STATE(ahc, targ,
-                                        AHC_DV_STATE_INQ_SHORT_ASYNC);
-                       break;
-               default:
-                       AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
-                       break;
-               }
-               break;
-
-       case AHC_DV_STATE_REBD:
-               switch (status & SS_MASK) {
-               case SS_NOP:
-               {
-                       uint32_t echo_size;
-
-                       AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_WEB);
-                       echo_size = scsi_3btoul(&targ->dv_buffer[1]);
-                       echo_size &= 0x1FFF;
-#ifdef AHC_DEBUG
-                       if (ahc_debug & AHC_SHOW_DV) {
-                               ahc_print_devinfo(ahc, devinfo);
-                               printf("Echo buffer size= %d\n", echo_size);
-                       }
-#endif
-                       if (echo_size == 0) {
-                               AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
-                               break;
-                       }
-
-                       /* Generate the buffer pattern */
-                       targ->dv_echo_size = echo_size;
-                       ahc_linux_generate_dv_pattern(targ);
-                       /*
-                        * Setup initial negotiation values.
-                        */
-                       ahc_linux_filter_inquiry(ahc, devinfo);
-                       break;
-               }
-               case SS_INQ_REFRESH:
-                       AHC_SET_DV_STATE(ahc, targ,
-                                        AHC_DV_STATE_INQ_SHORT_ASYNC);
-                       break;
-               case SS_RETRY:
-                       AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
-                       if (ahc_cmd_get_transaction_status(cmd)
-                        == CAM_REQUEUE_REQ)
-                               targ->dv_state_retry--;
-                       if (targ->dv_state_retry <= 10)
-                               break;
-#ifdef AHC_DEBUG
-                       if (ahc_debug & AHC_SHOW_DV) {
-                               ahc_print_devinfo(ahc, devinfo);
-                               printf("DV REBD reties exhausted\n");
-                       }
-#endif
-                       /* FALLTHROUGH */
-               case SS_FATAL:
-               default:
-                       /*
-                        * Setup initial negotiation values
-                        * and try level 1 DV.
-                        */
-                       ahc_linux_filter_inquiry(ahc, devinfo);
-                       AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_INQ_VERIFY);
-                       targ->dv_echo_size = 0;
-                       break;
-               }
-               break;
-
-       case AHC_DV_STATE_WEB:
-               switch (status & SS_MASK) {
-               case SS_NOP:
-                       AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_REB);
-                       break;
-               case SS_INQ_REFRESH:
-                       AHC_SET_DV_STATE(ahc, targ,
-                                        AHC_DV_STATE_INQ_SHORT_ASYNC);
-                       break;
-               case SS_RETRY:
-                       AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
-                       if (ahc_cmd_get_transaction_status(cmd)
-                        == CAM_REQUEUE_REQ) {
-                               targ->dv_state_retry--;
-                       } else if ((status & SSQ_FALLBACK) != 0) {
-                               if (ahc_linux_fallback(ahc, devinfo) != 0) {
-                                       AHC_SET_DV_STATE(ahc, targ,
-                                                        AHC_DV_STATE_EXIT);
-                                       break;
-                               }
-                               /*
-                                * Do not count "falling back"
-                                * against our retries.
-                                */
-                               targ->dv_state_retry = 0;
-                       }
-                       if (targ->dv_state_retry <= 10)
-                               break;
-                       /* FALLTHROUGH */
-#ifdef AHC_DEBUG
-                       if (ahc_debug & AHC_SHOW_DV) {
-                               ahc_print_devinfo(ahc, devinfo);
-                               printf("DV WEB reties exhausted\n");
-                       }
-#endif
-               default:
-                       AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
-                       break;
-               }
-               break;
-
-       case AHC_DV_STATE_REB:
-               switch (status & SS_MASK) {
-               case SS_NOP:
-                       if (memcmp(targ->dv_buffer, targ->dv_buffer1,
-                                  targ->dv_echo_size) != 0) {
-                               if (ahc_linux_fallback(ahc, devinfo) != 0)
-                                       AHC_SET_DV_STATE(ahc, targ,
-                                                        AHC_DV_STATE_EXIT);
-                               else
-                                       AHC_SET_DV_STATE(ahc, targ,
-                                                        AHC_DV_STATE_WEB);
-                               break;
-                       }
-                       
-                       if (targ->dv_buffer != NULL) {
-                               free(targ->dv_buffer, M_DEVBUF);
-                               targ->dv_buffer = NULL;
-                       }
-                       if (targ->dv_buffer1 != NULL) {
-                               free(targ->dv_buffer1, M_DEVBUF);
-                               targ->dv_buffer1 = NULL;
-                       }
-                       AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
-                       break;
-               case SS_INQ_REFRESH:
-                       AHC_SET_DV_STATE(ahc, targ,
-                                        AHC_DV_STATE_INQ_SHORT_ASYNC);
-                       break;
-               case SS_RETRY:
-                       AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
-                       if (ahc_cmd_get_transaction_status(cmd)
-                        == CAM_REQUEUE_REQ) {
-                               targ->dv_state_retry--;
-                       } else if ((status & SSQ_FALLBACK) != 0) {
-                               if (ahc_linux_fallback(ahc, devinfo) != 0) {
-                                       AHC_SET_DV_STATE(ahc, targ,
-                                                        AHC_DV_STATE_EXIT);
-                                       break;
-                               }
-                               AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_WEB);
-                       }
-                       if (targ->dv_state_retry <= 10) {
-                               if ((status & (SSQ_DELAY_RANDOM|SSQ_DELAY))!= 0)
-                                       msleep(ahc->our_id*1000/10);
-                               break;
-                       }
-#ifdef AHC_DEBUG
-                       if (ahc_debug & AHC_SHOW_DV) {
-                               ahc_print_devinfo(ahc, devinfo);
-                               printf("DV REB reties exhausted\n");
-                       }
-#endif
-                       /* FALLTHROUGH */
-               default:
-                       AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
-                       break;
-               }
-               break;
-
-       case AHC_DV_STATE_SU:
-               switch (status & SS_MASK) {
-               case SS_NOP:
-               case SS_INQ_REFRESH:
-                       AHC_SET_DV_STATE(ahc, targ,
-                                        AHC_DV_STATE_INQ_SHORT_ASYNC);
-                       break;
-               default:
-                       AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
-                       break;
-               }
-               break;
-
-       case AHC_DV_STATE_BUSY:
-               switch (status & SS_MASK) {
-               case SS_NOP:
-               case SS_INQ_REFRESH:
-                       AHC_SET_DV_STATE(ahc, targ,
-                                        AHC_DV_STATE_INQ_SHORT_ASYNC);
-                       break;
-               case SS_TUR:
-               case SS_RETRY:
-                       AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
-                       if (ahc_cmd_get_transaction_status(cmd)
-                        == CAM_REQUEUE_REQ) {
-                               targ->dv_state_retry--;
-                       } else if (targ->dv_state_retry < 60) {
-                               if ((status & SSQ_DELAY) != 0)
-                                       ssleep(1);
-                       } else {
-#ifdef AHC_DEBUG
-                               if (ahc_debug & AHC_SHOW_DV) {
-                                       ahc_print_devinfo(ahc, devinfo);
-                                       printf("DV BUSY reties exhausted\n");
-                               }
-#endif
-                               AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
-                       }
-                       break;
-               default:
-                       AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
-                       break;
-               }
-               break;
-
-       default:
-               printf("%s: Invalid DV completion state %d\n", ahc_name(ahc),
-                      targ->dv_state);
-               AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
-               break;
-       }
-}
-
-static void
-ahc_linux_dv_fill_cmd(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
-                     struct ahc_devinfo *devinfo)
-{
-       memset(cmd, 0, sizeof(struct scsi_cmnd));
-       cmd->device = ahc->platform_data->dv_scsi_dev;
-       cmd->scsi_done = ahc_linux_dv_complete;
-}
-
-/*
- * Synthesize an inquiry command.  On the return trip, it'll be
- * sniffed and the device transfer settings set for us.
- */
-static void
-ahc_linux_dv_inq(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
-                struct ahc_devinfo *devinfo, struct ahc_linux_target *targ,
-                u_int request_length)
-{
-
-#ifdef AHC_DEBUG
-       if (ahc_debug & AHC_SHOW_DV) {
-               ahc_print_devinfo(ahc, devinfo);
-               printf("Sending INQ\n");
-       }
-#endif
-       if (targ->inq_data == NULL)
-               targ->inq_data = malloc(AHC_LINUX_DV_INQ_LEN,
-                                       M_DEVBUF, M_WAITOK);
-       if (targ->dv_state > AHC_DV_STATE_INQ_ASYNC) {
-               if (targ->dv_buffer != NULL)
-                       free(targ->dv_buffer, M_DEVBUF);
-               targ->dv_buffer = malloc(AHC_LINUX_DV_INQ_LEN,
-                                        M_DEVBUF, M_WAITOK);
-       }
-
-       ahc_linux_dv_fill_cmd(ahc, cmd, devinfo);
-       cmd->sc_data_direction = SCSI_DATA_READ;
-       cmd->cmd_len = 6;
-       cmd->cmnd[0] = INQUIRY;
-       cmd->cmnd[4] = request_length;
-       cmd->request_bufflen = request_length;
-       if (targ->dv_state > AHC_DV_STATE_INQ_ASYNC)
-               cmd->request_buffer = targ->dv_buffer;
-       else
-               cmd->request_buffer = targ->inq_data;
-       memset(cmd->request_buffer, 0, AHC_LINUX_DV_INQ_LEN);
-}
-
-static void
-ahc_linux_dv_tur(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
-                struct ahc_devinfo *devinfo)
-{
-
-#ifdef AHC_DEBUG
-       if (ahc_debug & AHC_SHOW_DV) {
-               ahc_print_devinfo(ahc, devinfo);
-               printf("Sending TUR\n");
-       }
-#endif
-       /* Do a TUR to clear out any non-fatal transitional state */
-       ahc_linux_dv_fill_cmd(ahc, cmd, devinfo);
-       cmd->sc_data_direction = SCSI_DATA_NONE;
-       cmd->cmd_len = 6;
-       cmd->cmnd[0] = TEST_UNIT_READY;
-}
-
-#define AHC_REBD_LEN 4
-
-static void
-ahc_linux_dv_rebd(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
-                struct ahc_devinfo *devinfo, struct ahc_linux_target *targ)
-{
-
-#ifdef AHC_DEBUG
-       if (ahc_debug & AHC_SHOW_DV) {
-               ahc_print_devinfo(ahc, devinfo);
-               printf("Sending REBD\n");
-       }
-#endif
-       if (targ->dv_buffer != NULL)
-               free(targ->dv_buffer, M_DEVBUF);
-       targ->dv_buffer = malloc(AHC_REBD_LEN, M_DEVBUF, M_WAITOK);
-       ahc_linux_dv_fill_cmd(ahc, cmd, devinfo);
-       cmd->sc_data_direction = SCSI_DATA_READ;
-       cmd->cmd_len = 10;
-       cmd->cmnd[0] = READ_BUFFER;
-       cmd->cmnd[1] = 0x0b;
-       scsi_ulto3b(AHC_REBD_LEN, &cmd->cmnd[6]);
-       cmd->request_bufflen = AHC_REBD_LEN;
-       cmd->underflow = cmd->request_bufflen;
-       cmd->request_buffer = targ->dv_buffer;
-}
-
-static void
-ahc_linux_dv_web(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
-                struct ahc_devinfo *devinfo, struct ahc_linux_target *targ)
-{
-
-#ifdef AHC_DEBUG
-       if (ahc_debug & AHC_SHOW_DV) {
-               ahc_print_devinfo(ahc, devinfo);
-               printf("Sending WEB\n");
-       }
-#endif
-       ahc_linux_dv_fill_cmd(ahc, cmd, devinfo);
-       cmd->sc_data_direction = SCSI_DATA_WRITE;
-       cmd->cmd_len = 10;
-       cmd->cmnd[0] = WRITE_BUFFER;
-       cmd->cmnd[1] = 0x0a;
-       scsi_ulto3b(targ->dv_echo_size, &cmd->cmnd[6]);
-       cmd->request_bufflen = targ->dv_echo_size;
-       cmd->underflow = cmd->request_bufflen;
-       cmd->request_buffer = targ->dv_buffer;
-}
-
-static void
-ahc_linux_dv_reb(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
-                struct ahc_devinfo *devinfo, struct ahc_linux_target *targ)
-{
-
-#ifdef AHC_DEBUG
-       if (ahc_debug & AHC_SHOW_DV) {
-               ahc_print_devinfo(ahc, devinfo);
-               printf("Sending REB\n");
-       }
-#endif
-       ahc_linux_dv_fill_cmd(ahc, cmd, devinfo);
-       cmd->sc_data_direction = SCSI_DATA_READ;
-       cmd->cmd_len = 10;
-       cmd->cmnd[0] = READ_BUFFER;
-       cmd->cmnd[1] = 0x0a;
-       scsi_ulto3b(targ->dv_echo_size, &cmd->cmnd[6]);
-       cmd->request_bufflen = targ->dv_echo_size;
-       cmd->underflow = cmd->request_bufflen;
-       cmd->request_buffer = targ->dv_buffer1;
-}
-
-static void
-ahc_linux_dv_su(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
-               struct ahc_devinfo *devinfo,
-               struct ahc_linux_target *targ)
-{
-       u_int le;
-
-       le = SID_IS_REMOVABLE(targ->inq_data) ? SSS_LOEJ : 0;
-
-#ifdef AHC_DEBUG
-       if (ahc_debug & AHC_SHOW_DV) {
-               ahc_print_devinfo(ahc, devinfo);
-               printf("Sending SU\n");
-       }
-#endif
-       ahc_linux_dv_fill_cmd(ahc, cmd, devinfo);
-       cmd->sc_data_direction = SCSI_DATA_NONE;
-       cmd->cmd_len = 6;
-       cmd->cmnd[0] = START_STOP_UNIT;
-       cmd->cmnd[4] = le | SSS_START;
-}
-
-static int
-ahc_linux_fallback(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
-{
-       struct  ahc_linux_target *targ;
-       struct  ahc_initiator_tinfo *tinfo;
-       struct  ahc_transinfo *goal;
-       struct  ahc_tmode_tstate *tstate;
-       struct  ahc_syncrate *syncrate;
-       u_long  s;
-       u_int   width;
-       u_int   period;
-       u_int   offset;
-       u_int   ppr_options;
-       u_int   cur_speed;
-       u_int   wide_speed;
-       u_int   narrow_speed;
-       u_int   fallback_speed;
-
-#ifdef AHC_DEBUG
-       if (ahc_debug & AHC_SHOW_DV) {
-               ahc_print_devinfo(ahc, devinfo);
-               printf("Trying to fallback\n");
-       }
-#endif
-       ahc_lock(ahc, &s);
-       targ = ahc->platform_data->targets[devinfo->target_offset];
-       tinfo = ahc_fetch_transinfo(ahc, devinfo->channel,
-                                   devinfo->our_scsiid,
-                                   devinfo->target, &tstate);
-       goal = &tinfo->goal;
-       width = goal->width;
-       period = goal->period;
-       offset = goal->offset;
-       ppr_options = goal->ppr_options;
-       if (offset == 0)
-               period = AHC_ASYNC_XFER_PERIOD;
-       if (targ->dv_next_narrow_period == 0)
-               targ->dv_next_narrow_period = MAX(period, AHC_SYNCRATE_ULTRA2);
-       if (targ->dv_next_wide_period == 0)
-               targ->dv_next_wide_period = period;
-       if (targ->dv_max_width == 0)
-               targ->dv_max_width = width;
-       if (targ->dv_max_ppr_options == 0)
-               targ->dv_max_ppr_options = ppr_options;
-       if (targ->dv_last_ppr_options == 0)
-               targ->dv_last_ppr_options = ppr_options;
-
-       cur_speed = aic_calc_speed(width, period, offset, AHC_SYNCRATE_MIN);
-       wide_speed = aic_calc_speed(MSG_EXT_WDTR_BUS_16_BIT,
-                                         targ->dv_next_wide_period,
-                                         MAX_OFFSET,
-                                         AHC_SYNCRATE_MIN);
-       narrow_speed = aic_calc_speed(MSG_EXT_WDTR_BUS_8_BIT,
-                                           targ->dv_next_narrow_period,
-                                           MAX_OFFSET,
-                                           AHC_SYNCRATE_MIN);
-       fallback_speed = aic_calc_speed(width, period+1, offset,
-                                       AHC_SYNCRATE_MIN);
-#ifdef AHC_DEBUG
-       if (ahc_debug & AHC_SHOW_DV) {
-               printf("cur_speed= %d, wide_speed= %d, narrow_speed= %d, "
-                      "fallback_speed= %d\n", cur_speed, wide_speed,
-                      narrow_speed, fallback_speed);
-       }
-#endif
-
-       if (cur_speed > 160000) {
-               /*
-                * Paced/DT/IU_REQ only transfer speeds.  All we
-                * can do is fallback in terms of syncrate.
-                */
-               period++;
-       } else if (cur_speed > 80000) {
-               if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
-                       /*
-                        * Try without IU_REQ as it may be confusing
-                        * an expander.
-                        */
-                       ppr_options &= ~MSG_EXT_PPR_IU_REQ;
-               } else {
-                       /*
-                        * Paced/DT only transfer speeds.  All we
-                        * can do is fallback in terms of syncrate.
-                        */
-                       period++;
-                       ppr_options = targ->dv_max_ppr_options;
-               }
-       } else if (cur_speed > 3300) {
-
-               /*
-                * In this range we the following
-                * options ordered from highest to
-                * lowest desireability:
-                *
-                * o Wide/DT
-                * o Wide/non-DT
-                * o Narrow at a potentally higher sync rate.
-                *
-                * All modes are tested with and without IU_REQ
-                * set since using IUs may confuse an expander.
-                */
-               if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
-
-                       ppr_options &= ~MSG_EXT_PPR_IU_REQ;
-               } else if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) {
-                       /*
-                        * Try going non-DT.
-                        */
-                       ppr_options = targ->dv_max_ppr_options;
-                       ppr_options &= ~MSG_EXT_PPR_DT_REQ;
-               } else if (targ->dv_last_ppr_options != 0) {
-                       /*
-                        * Try without QAS or any other PPR options.
-                        * We may need a non-PPR message to work with
-                        * an expander.  We look at the "last PPR options"
-                        * so we will perform this fallback even if the
-                        * target responded to our PPR negotiation with
-                        * no option bits set.
-                        */
-                       ppr_options = 0;
-               } else if (width == MSG_EXT_WDTR_BUS_16_BIT) {
-                       /*
-                        * If the next narrow speed is greater than
-                        * the next wide speed, fallback to narrow.
-                        * Otherwise fallback to the next DT/Wide setting.
-                        * The narrow async speed will always be smaller
-                        * than the wide async speed, so handle this case
-                        * specifically.
-                        */
-                       ppr_options = targ->dv_max_ppr_options;
-                       if (narrow_speed > fallback_speed
-                        || period >= AHC_ASYNC_XFER_PERIOD) {
-                               targ->dv_next_wide_period = period+1;
-                               width = MSG_EXT_WDTR_BUS_8_BIT;
-                               period = targ->dv_next_narrow_period;
-                       } else {
-                               period++;
-                       }
-               } else if ((ahc->features & AHC_WIDE) != 0
-                       && targ->dv_max_width != 0
-                       && wide_speed >= fallback_speed
-                       && (targ->dv_next_wide_period <= AHC_ASYNC_XFER_PERIOD
-                        || period >= AHC_ASYNC_XFER_PERIOD)) {
-
-                       /*
-                        * We are narrow.  Try falling back
-                        * to the next wide speed with 
-                        * all supported ppr options set.
-                        */
-                       targ->dv_next_narrow_period = period+1;
-                       width = MSG_EXT_WDTR_BUS_16_BIT;
-                       period = targ->dv_next_wide_period;
-                       ppr_options = targ->dv_max_ppr_options;
-               } else {
-                       /* Only narrow fallback is allowed. */
-                       period++;
-                       ppr_options = targ->dv_max_ppr_options;
-               }
-       } else {
-               ahc_unlock(ahc, &s);
-               return (-1);
-       }
-       offset = MAX_OFFSET;
-       syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
-                                    AHC_SYNCRATE_DT);
-       ahc_set_width(ahc, devinfo, width, AHC_TRANS_GOAL, FALSE);
-       if (period == 0) {
-               period = 0;
-               offset = 0;
-               ppr_options = 0;
-               if (width == MSG_EXT_WDTR_BUS_8_BIT)
-                       targ->dv_next_narrow_period = AHC_ASYNC_XFER_PERIOD;
-               else
-                       targ->dv_next_wide_period = AHC_ASYNC_XFER_PERIOD;
-       }
-       ahc_set_syncrate(ahc, devinfo, syncrate, period, offset,
-                        ppr_options, AHC_TRANS_GOAL, FALSE);
-       targ->dv_last_ppr_options = ppr_options;
-       ahc_unlock(ahc, &s);
-       return (0);
-}
-
-static void
-ahc_linux_dv_timeout(struct scsi_cmnd *cmd)
-{
-       struct  ahc_softc *ahc;
-       struct  scb *scb;
-       u_long  flags;
-
-       ahc = *((struct ahc_softc **)cmd->device->host->hostdata);
-       ahc_lock(ahc, &flags);
-
-#ifdef AHC_DEBUG
-       if (ahc_debug & AHC_SHOW_DV) {
-               printf("%s: Timeout while doing DV command %x.\n",
-                      ahc_name(ahc), cmd->cmnd[0]);
-               ahc_dump_card_state(ahc);
-       }
-#endif
-       
-       /*
-        * Guard against "done race".  No action is
-        * required if we just completed.
-        */
-       if ((scb = (struct scb *)cmd->host_scribble) == NULL) {
-               ahc_unlock(ahc, &flags);
-               return;
-       }
-
-       /*
-        * Command has not completed.  Mark this
-        * SCB as having failing status prior to
-        * resetting the bus, so we get the correct
-        * error code.
-        */
-       if ((scb->flags & SCB_SENSE) != 0)
-               ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL);
-       else
-               ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
-       ahc_reset_channel(ahc, cmd->device->channel + 'A', /*initiate*/TRUE);
-
-       /*
-        * Add a minimal bus settle delay for devices that are slow to
-        * respond after bus resets.
-        */
-       ahc_linux_freeze_simq(ahc);
-       init_timer(&ahc->platform_data->reset_timer);
-       ahc->platform_data->reset_timer.data = (u_long)ahc;
-       ahc->platform_data->reset_timer.expires = jiffies + HZ / 2;
-       ahc->platform_data->reset_timer.function =
-           (ahc_linux_callback_t *)ahc_linux_release_simq;
-       add_timer(&ahc->platform_data->reset_timer);
-       if (ahc_linux_next_device_to_run(ahc) != NULL)
-               ahc_schedule_runq(ahc);
-       ahc_linux_run_complete_queue(ahc);
-       ahc_unlock(ahc, &flags);
-}
-
-static void
-ahc_linux_dv_complete(struct scsi_cmnd *cmd)
-{
-       struct ahc_softc *ahc;
-
-       ahc = *((struct ahc_softc **)cmd->device->host->hostdata);
-
-       /* Delete the DV timer before it goes off! */
-       scsi_delete_timer(cmd);
-
-#ifdef AHC_DEBUG
-       if (ahc_debug & AHC_SHOW_DV)
-               printf("%s:%d:%d: Command completed, status= 0x%x\n",
-                      ahc_name(ahc), cmd->device->channel,
-                      cmd->device->id, cmd->result);
-#endif
-
-       /* Wake up the state machine */
-       up(&ahc->platform_data->dv_cmd_sem);
-}
-
-static void
-ahc_linux_generate_dv_pattern(struct ahc_linux_target *targ)
-{
-       uint16_t b;
-       u_int    i;
-       u_int    j;
-
-       if (targ->dv_buffer != NULL)
-               free(targ->dv_buffer, M_DEVBUF);
-       targ->dv_buffer = malloc(targ->dv_echo_size, M_DEVBUF, M_WAITOK);
-       if (targ->dv_buffer1 != NULL)
-               free(targ->dv_buffer1, M_DEVBUF);
-       targ->dv_buffer1 = malloc(targ->dv_echo_size, M_DEVBUF, M_WAITOK);
-
-       i = 0;
-       b = 0x0001;
-       for (j = 0 ; i < targ->dv_echo_size; j++) {
-               if (j < 32) {
-                       /*
-                        * 32bytes of sequential numbers.
-                        */
-                       targ->dv_buffer[i++] = j & 0xff;
-               } else if (j < 48) {
-                       /*
-                        * 32bytes of repeating 0x0000, 0xffff.
-                        */
-                       targ->dv_buffer[i++] = (j & 0x02) ? 0xff : 0x00;
-               } else if (j < 64) {
-                       /*
-                        * 32bytes of repeating 0x5555, 0xaaaa.
-                        */
-                       targ->dv_buffer[i++] = (j & 0x02) ? 0xaa : 0x55;
-               } else {
-                       /*
-                        * Remaining buffer is filled with a repeating
-                        * patter of:
-                        *
-                        *       0xffff
-                        *      ~0x0001 << shifted once in each loop.
-                        */
-                       if (j & 0x02) {
-                               if (j & 0x01) {
-                                       targ->dv_buffer[i++] = ~(b >> 8) & 0xff;
-                                       b <<= 1;
-                                       if (b == 0x0000)
-                                               b = 0x0001;
-                               } else {
-                                       targ->dv_buffer[i++] = (~b & 0xff);
-                               }
-                       } else {
-                               targ->dv_buffer[i++] = 0xff;
-                       }
-               }
-       }
-}
-
-static u_int
-ahc_linux_user_tagdepth(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
-{
-       static int warned_user;
-       u_int tags;
-
-       tags = 0;
-       if ((ahc->user_discenable & devinfo->target_mask) != 0) {
-               if (ahc->unit >= NUM_ELEMENTS(aic7xxx_tag_info)) {
-                       if (warned_user == 0) {
-
-                               printf(KERN_WARNING
-"aic7xxx: WARNING: Insufficient tag_info instances\n"
-"aic7xxx: for installed controllers. Using defaults\n"
-"aic7xxx: Please update the aic7xxx_tag_info array in\n"
-"aic7xxx: the aic7xxx_osm..c source file.\n");
-                               warned_user++;
-                       }
-                       tags = AHC_MAX_QUEUE;
-               } else {
-                       adapter_tag_info_t *tag_info;
-
-                       tag_info = &aic7xxx_tag_info[ahc->unit];
-                       tags = tag_info->tag_commands[devinfo->target_offset];
-                       if (tags > AHC_MAX_QUEUE)
-                               tags = AHC_MAX_QUEUE;
-               }
-       }
-       return (tags);
-}
-
-static u_int
-ahc_linux_user_dv_setting(struct ahc_softc *ahc)
-{
-       static int warned_user;
-       int dv;
-
-       if (ahc->unit >= NUM_ELEMENTS(aic7xxx_dv_settings)) {
-               if (warned_user == 0) {
-
-                       printf(KERN_WARNING
-"aic7xxx: WARNING: Insufficient dv settings instances\n"
-"aic7xxx: for installed controllers. Using defaults\n"
-"aic7xxx: Please update the aic7xxx_dv_settings array\n"
-"aic7xxx: in the aic7xxx_osm.c source file.\n");
-                       warned_user++;
-               }
-               dv = -1;
-       } else {
-
-               dv = aic7xxx_dv_settings[ahc->unit];
-       }
-
-       if (dv < 0) {
-               u_long s;
-
-               /*
-                * Apply the default.
-                */
-               /*
-                * XXX - Enable DV on non-U160 controllers once it
-                *       has been tested there.
-                */
-               ahc_lock(ahc, &s);
-               dv = (ahc->features & AHC_DT);
-               if (ahc->seep_config != 0
-                && ahc->seep_config->signature >= CFSIGNATURE2)
-                       dv = (ahc->seep_config->adapter_control & CFENABLEDV);
-               ahc_unlock(ahc, &s);
-       }
-       return (dv);
-}
-
-/*
- * Determines the queue depth for a given device.
- */
-static void
-ahc_linux_device_queue_depth(struct ahc_softc *ahc,
-                            struct ahc_linux_device *dev)
-{
-       struct  ahc_devinfo devinfo;
-       u_int   tags;
-
-       ahc_compile_devinfo(&devinfo,
-                           dev->target->channel == 0
-                         ? ahc->our_id : ahc->our_id_b,
-                           dev->target->target, dev->lun,
-                           dev->target->channel == 0 ? 'A' : 'B',
-                           ROLE_INITIATOR);
-       tags = ahc_linux_user_tagdepth(ahc, &devinfo);
-       if (tags != 0
-        && dev->scsi_device != NULL
-        && dev->scsi_device->tagged_supported != 0) {
-
-               ahc_set_tags(ahc, &devinfo, AHC_QUEUE_TAGGED);
-               ahc_print_devinfo(ahc, &devinfo);
-               printf("Tagged Queuing enabled.  Depth %d\n", tags);
-       } else {
-               ahc_set_tags(ahc, &devinfo, AHC_QUEUE_NONE);
-       }
-}
-
-static void
-ahc_linux_run_device_queue(struct ahc_softc *ahc, struct ahc_linux_device *dev)
-{
-       struct   ahc_cmd *acmd;
-       struct   scsi_cmnd *cmd;
-       struct   scb *scb;
-       struct   hardware_scb *hscb;
-       struct   ahc_initiator_tinfo *tinfo;
-       struct   ahc_tmode_tstate *tstate;
-       uint16_t mask;
-
-       if ((dev->flags & AHC_DEV_ON_RUN_LIST) != 0)
-               panic("running device on run list");
-
-       while ((acmd = TAILQ_FIRST(&dev->busyq)) != NULL
-           && dev->openings > 0 && dev->qfrozen == 0) {
-
-               /*
-                * Schedule us to run later.  The only reason we are not
-                * running is because the whole controller Q is frozen.
-                */
-               if (ahc->platform_data->qfrozen != 0
-                && AHC_DV_SIMQ_FROZEN(ahc) == 0) {
-                       TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq,
-                                         dev, links);
-                       dev->flags |= AHC_DEV_ON_RUN_LIST;
-                       return;
-               }
-               /*
-                * Get an scb to use.
-                */
-               if ((scb = ahc_get_scb(ahc)) == NULL) {
-                       TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq,
-                                        dev, links);
-                       dev->flags |= AHC_DEV_ON_RUN_LIST;
-                       ahc->flags |= AHC_RESOURCE_SHORTAGE;
-                       return;
-               }
-               TAILQ_REMOVE(&dev->busyq, acmd, acmd_links.tqe);
-               cmd = &acmd_scsi_cmd(acmd);
-               scb->io_ctx = cmd;
-               scb->platform_data->dev = dev;
-               hscb = scb->hscb;
-               cmd->host_scribble = (char *)scb;
-
-               /*
-                * Fill out basics of the HSCB.
-                */
-               hscb->control = 0;
-               hscb->scsiid = BUILD_SCSIID(ahc, cmd);
-               hscb->lun = cmd->device->lun;
-               mask = SCB_GET_TARGET_MASK(ahc, scb);
-               tinfo = ahc_fetch_transinfo(ahc, SCB_GET_CHANNEL(ahc, scb),
-                                           SCB_GET_OUR_ID(scb),
-                                           SCB_GET_TARGET(ahc, scb), &tstate);
-               hscb->scsirate = tinfo->scsirate;
-               hscb->scsioffset = tinfo->curr.offset;
-               if ((tstate->ultraenb & mask) != 0)
-                       hscb->control |= ULTRAENB;
-
-               if ((ahc->user_discenable & mask) != 0)
-                       hscb->control |= DISCENB;
-
-               if (AHC_DV_CMD(cmd) != 0)
-                       scb->flags |= SCB_SILENT;
-
-               if ((tstate->auto_negotiate & mask) != 0) {
-                       scb->flags |= SCB_AUTO_NEGOTIATE;
-                       scb->hscb->control |= MK_MESSAGE;
-               }
-
-               if ((dev->flags & (AHC_DEV_Q_TAGGED|AHC_DEV_Q_BASIC)) != 0) {
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
-                       int     msg_bytes;
-                       uint8_t tag_msgs[2];
-
-                       msg_bytes = scsi_populate_tag_msg(cmd, tag_msgs);
-                       if (msg_bytes && tag_msgs[0] != MSG_SIMPLE_TASK) {
-                               hscb->control |= tag_msgs[0];
-                               if (tag_msgs[0] == MSG_ORDERED_TASK)
-                                       dev->commands_since_idle_or_otag = 0;
-                       } else
-#endif
-                       if (dev->commands_since_idle_or_otag == AHC_OTAG_THRESH
-                        && (dev->flags & AHC_DEV_Q_TAGGED) != 0) {
-                               hscb->control |= MSG_ORDERED_TASK;
-                               dev->commands_since_idle_or_otag = 0;
-                       } else {
-                               hscb->control |= MSG_SIMPLE_TASK;
-                       }
-               }
-
-               hscb->cdb_len = cmd->cmd_len;
-               if (hscb->cdb_len <= 12) {
-                       memcpy(hscb->shared_data.cdb, cmd->cmnd, hscb->cdb_len);
-               } else {
-                       memcpy(hscb->cdb32, cmd->cmnd, hscb->cdb_len);
-                       scb->flags |= SCB_CDB32_PTR;
-               }
-
-               scb->platform_data->xfer_len = 0;
-               ahc_set_residual(scb, 0);
-               ahc_set_sense_residual(scb, 0);
-               scb->sg_count = 0;
-               if (cmd->use_sg != 0) {
-                       struct  ahc_dma_seg *sg;
-                       struct  scatterlist *cur_seg;
-                       struct  scatterlist *end_seg;
-                       int     nseg;
-
-                       cur_seg = (struct scatterlist *)cmd->request_buffer;
-                       nseg = pci_map_sg(ahc->dev_softc, cur_seg, cmd->use_sg,
-                           scsi_to_pci_dma_dir(cmd->sc_data_direction));
-                       end_seg = cur_seg + nseg;
-                       /* Copy the segments into the SG list. */
-                       sg = scb->sg_list;
-                       /*
-                        * The sg_count may be larger than nseg if
-                        * a transfer crosses a 32bit page.
-                        */ 
-                       while (cur_seg < end_seg) {
-                               dma_addr_t addr;
-                               bus_size_t len;
-                               int consumed;
-
-                               addr = sg_dma_address(cur_seg);
-                               len = sg_dma_len(cur_seg);
-                               consumed = ahc_linux_map_seg(ahc, scb,
-                                                            sg, addr, len);
-                               sg += consumed;
-                               scb->sg_count += consumed;
-                               cur_seg++;
-                       }
-                       sg--;
-                       sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
-
-                       /*
-                        * Reset the sg list pointer.
+                        * Reset the sg list pointer.
                         */
                        scb->hscb->sgptr =
                            ahc_htole32(scb->sg_list_phys | SG_FULL_RESID);
@@ -3703,7 +2197,7 @@ ahc_linux_run_device_queue(struct ahc_softc *ahc, struct ahc_linux_device *dev)
                        addr = pci_map_single(ahc->dev_softc,
                               cmd->request_buffer,
                               cmd->request_bufflen,
-                              scsi_to_pci_dma_dir(cmd->sc_data_direction));
+                              cmd->sc_data_direction);
                        scb->platform_data->buf_busaddr = addr;
                        scb->sg_count = ahc_linux_map_seg(ahc, scb,
                                                          sg, addr,
@@ -3805,7 +2299,6 @@ ahc_linux_alloc_target(struct ahc_softc *ahc, u_int channel, u_int target)
        targ->channel = channel;
        targ->target = target;
        targ->ahc = ahc;
-       targ->flags = AHC_DV_REQUIRED;
        ahc->platform_data->targets[target_offset] = targ;
        return (targ);
 }
@@ -3844,10 +2337,6 @@ ahc_linux_free_target(struct ahc_softc *ahc, struct ahc_linux_target *targ)
        ahc->platform_data->targets[target_offset] = NULL;
        if (targ->inq_data != NULL)
                free(targ->inq_data, M_DEVBUF);
-       if (targ->dv_buffer != NULL)
-               free(targ->dv_buffer, M_DEVBUF);
-       if (targ->dv_buffer1 != NULL)
-               free(targ->dv_buffer1, M_DEVBUF);
        free(targ, M_DEVBUF);
 }
 
@@ -3894,8 +2383,7 @@ __ahc_linux_free_device(struct ahc_softc *ahc, struct ahc_linux_device *dev)
        targ->devices[dev->lun] = NULL;
        free(dev, M_DEVBUF);
        targ->refcount--;
-       if (targ->refcount == 0
-        && (targ->flags & AHC_DV_REQUIRED) == 0)
+       if (targ->refcount == 0)
                ahc_linux_free_target(ahc, targ);
 }
 
@@ -4099,16 +2587,7 @@ ahc_done(struct ahc_softc *ahc, struct scb *scb)
                ahc_linux_handle_scsi_status(ahc, dev, scb);
        } else if (ahc_get_transaction_status(scb) == CAM_SEL_TIMEOUT) {
                dev->flags |= AHC_DEV_UNCONFIGURED;
-               if (AHC_DV_CMD(cmd) == FALSE)
-                       dev->target->flags &= ~AHC_DV_REQUIRED;
        }
-       /*
-        * Start DV for devices that require it assuming the first command
-        * sent does not result in a selection timeout.
-        */
-       if (ahc_get_transaction_status(scb) != CAM_SEL_TIMEOUT
-        && (dev->target->flags & AHC_DV_REQUIRED) != 0)
-               ahc_linux_start_dv(ahc);
 
        if (dev->openings == 1
         && ahc_get_transaction_status(scb) == CAM_REQ_CMP
@@ -4152,13 +2631,6 @@ ahc_done(struct ahc_softc *ahc, struct scb *scb)
 
        ahc_free_scb(ahc, scb);
        ahc_linux_queue_cmd_complete(ahc, cmd);
-
-       if ((ahc->platform_data->flags & AHC_DV_WAIT_SIMQ_EMPTY) != 0
-        && LIST_FIRST(&ahc->pending_scbs) == NULL) {
-               ahc->platform_data->flags &= ~AHC_DV_WAIT_SIMQ_EMPTY;
-               up(&ahc->platform_data->dv_sem);
-       }
-               
 }
 
 static void
@@ -4335,7 +2807,7 @@ ahc_linux_queue_cmd_complete(struct ahc_softc *ahc, Scsi_Cmnd *cmd)
         * full error information available when making
         * state change decisions.
         */
-       if (AHC_DV_CMD(cmd) == FALSE) {
+       {
                u_int new_status;
 
                switch (ahc_cmd_get_transaction_status(cmd)) {
@@ -4425,115 +2897,6 @@ ahc_linux_queue_cmd_complete(struct ahc_softc *ahc, Scsi_Cmnd *cmd)
                TAILQ_INSERT_TAIL(completeq, acmd, acmd_links.tqe);
 }
 
-static void
-ahc_linux_filter_inquiry(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
-{
-       struct  scsi_inquiry_data *sid;
-       struct  ahc_initiator_tinfo *tinfo;
-       struct  ahc_transinfo *user;
-       struct  ahc_transinfo *goal;
-       struct  ahc_transinfo *curr;
-       struct  ahc_tmode_tstate *tstate;
-       struct  ahc_syncrate *syncrate;
-       struct  ahc_linux_device *dev;
-       u_int   maxsync;
-       u_int   width;
-       u_int   period;
-       u_int   offset;
-       u_int   ppr_options;
-       u_int   trans_version;
-       u_int   prot_version;
-
-       /*
-        * Determine if this lun actually exists.  If so,
-        * hold on to its corresponding device structure.
-        * If not, make sure we release the device and
-        * don't bother processing the rest of this inquiry
-        * command.
-        */
-       dev = ahc_linux_get_device(ahc, devinfo->channel - 'A',
-                                  devinfo->target, devinfo->lun,
-                                  /*alloc*/TRUE);
-
-       sid = (struct scsi_inquiry_data *)dev->target->inq_data;
-       if (SID_QUAL(sid) == SID_QUAL_LU_CONNECTED) {
-
-               dev->flags &= ~AHC_DEV_UNCONFIGURED;
-       } else {
-               dev->flags |= AHC_DEV_UNCONFIGURED;
-               return;
-       }
-
-       /*
-        * Update our notion of this device's transfer
-        * negotiation capabilities.
-        */
-       tinfo = ahc_fetch_transinfo(ahc, devinfo->channel,
-                                   devinfo->our_scsiid,
-                                   devinfo->target, &tstate);
-       user = &tinfo->user;
-       goal = &tinfo->goal;
-       curr = &tinfo->curr;
-       width = user->width;
-       period = user->period;
-       offset = user->offset;
-       ppr_options = user->ppr_options;
-       trans_version = user->transport_version;
-       prot_version = MIN(user->protocol_version, SID_ANSI_REV(sid));
-
-       /*
-        * Only attempt SPI3/4 once we've verified that
-        * the device claims to support SPI3/4 features.
-        */
-       if (prot_version < SCSI_REV_2)
-               trans_version = SID_ANSI_REV(sid);
-       else
-               trans_version = SCSI_REV_2;
-
-       if ((sid->flags & SID_WBus16) == 0)
-               width = MSG_EXT_WDTR_BUS_8_BIT;
-       if ((sid->flags & SID_Sync) == 0) {
-               period = 0;
-               offset = 0;
-               ppr_options = 0;
-       }
-       if ((sid->spi3data & SID_SPI_QAS) == 0)
-               ppr_options &= ~MSG_EXT_PPR_QAS_REQ;
-       if ((sid->spi3data & SID_SPI_CLOCK_DT) == 0)
-               ppr_options &= MSG_EXT_PPR_QAS_REQ;
-       if ((sid->spi3data & SID_SPI_IUS) == 0)
-               ppr_options &= (MSG_EXT_PPR_DT_REQ
-                             | MSG_EXT_PPR_QAS_REQ);
-
-       if (prot_version > SCSI_REV_2
-        && ppr_options != 0)
-               trans_version = user->transport_version;
-
-       ahc_validate_width(ahc, /*tinfo limit*/NULL, &width, ROLE_UNKNOWN);
-       if ((ahc->features & AHC_ULTRA2) != 0)
-               maxsync = AHC_SYNCRATE_DT;
-       else if ((ahc->features & AHC_ULTRA) != 0)
-               maxsync = AHC_SYNCRATE_ULTRA;
-       else
-               maxsync = AHC_SYNCRATE_FAST;
-
-       syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, maxsync);
-       ahc_validate_offset(ahc, /*tinfo limit*/NULL, syncrate,
-                           &offset, width, ROLE_UNKNOWN);
-       if (offset == 0 || period == 0) {
-               period = 0;
-               offset = 0;
-               ppr_options = 0;
-       }
-       /* Apply our filtered user settings. */
-       curr->transport_version = trans_version;
-       curr->protocol_version = prot_version;
-       ahc_set_width(ahc, devinfo, width, AHC_TRANS_GOAL, /*paused*/FALSE);
-       ahc_set_syncrate(ahc, devinfo, syncrate, period,
-                        offset, ppr_options, AHC_TRANS_GOAL,
-                        /*paused*/FALSE);
-}
-
 static void
 ahc_linux_sem_timeout(u_long arg)
 {
@@ -4579,11 +2942,6 @@ ahc_linux_release_simq(u_long arg)
                ahc->platform_data->qfrozen--;
        if (ahc->platform_data->qfrozen == 0)
                unblock_reqs = 1;
-       if (AHC_DV_SIMQ_FROZEN(ahc)
-        && ((ahc->platform_data->flags & AHC_DV_WAIT_SIMQ_RELEASE) != 0)) {
-               ahc->platform_data->flags &= ~AHC_DV_WAIT_SIMQ_RELEASE;
-               up(&ahc->platform_data->dv_sem);
-       }
        ahc_schedule_runq(ahc);
        ahc_unlock(ahc, &s);
        /*
@@ -4990,13 +3348,267 @@ ahc_platform_dump_card_state(struct ahc_softc *ahc)
 
 static void ahc_linux_exit(void);
 
+static void ahc_linux_get_period(struct scsi_target *starget)
+{
+       struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+       struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
+       struct ahc_tmode_tstate *tstate;
+       struct ahc_initiator_tinfo *tinfo 
+               = ahc_fetch_transinfo(ahc,
+                                     starget->channel + 'A',
+                                     shost->this_id, starget->id, &tstate);
+       spi_period(starget) = tinfo->curr.period;
+}
+
+static void ahc_linux_set_period(struct scsi_target *starget, int period)
+{
+       struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+       struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
+       struct ahc_tmode_tstate *tstate;
+       struct ahc_initiator_tinfo *tinfo 
+               = ahc_fetch_transinfo(ahc,
+                                     starget->channel + 'A',
+                                     shost->this_id, starget->id, &tstate);
+       struct ahc_devinfo devinfo;
+       unsigned int ppr_options = tinfo->curr.ppr_options;
+       unsigned long flags;
+       unsigned long offset = tinfo->curr.offset;
+       struct ahc_syncrate *syncrate;
+
+       if (offset == 0)
+               offset = MAX_OFFSET;
+
+       ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
+                           starget->channel + 'A', ROLE_INITIATOR);
+       syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT);
+       ahc_lock(ahc, &flags);
+       ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset,
+                        ppr_options, AHC_TRANS_GOAL, FALSE);
+       ahc_unlock(ahc, &flags);
+}
+
+static void ahc_linux_get_offset(struct scsi_target *starget)
+{
+       struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+       struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
+       struct ahc_tmode_tstate *tstate;
+       struct ahc_initiator_tinfo *tinfo 
+               = ahc_fetch_transinfo(ahc,
+                                     starget->channel + 'A',
+                                     shost->this_id, starget->id, &tstate);
+       spi_offset(starget) = tinfo->curr.offset;
+}
+
+static void ahc_linux_set_offset(struct scsi_target *starget, int offset)
+{
+       struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+       struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
+       struct ahc_tmode_tstate *tstate;
+       struct ahc_initiator_tinfo *tinfo 
+               = ahc_fetch_transinfo(ahc,
+                                     starget->channel + 'A',
+                                     shost->this_id, starget->id, &tstate);
+       struct ahc_devinfo devinfo;
+       unsigned int ppr_options = 0;
+       unsigned int period = 0;
+       unsigned long flags;
+       struct ahc_syncrate *syncrate = NULL;
+
+       ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
+                           starget->channel + 'A', ROLE_INITIATOR);
+       if (offset != 0) {
+               syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT);
+               period = tinfo->curr.period;
+               ppr_options = tinfo->curr.ppr_options;
+       }
+       ahc_lock(ahc, &flags);
+       ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset,
+                        ppr_options, AHC_TRANS_GOAL, FALSE);
+       ahc_unlock(ahc, &flags);
+}
+
+static void ahc_linux_get_width(struct scsi_target *starget)
+{
+       struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+       struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
+       struct ahc_tmode_tstate *tstate;
+       struct ahc_initiator_tinfo *tinfo 
+               = ahc_fetch_transinfo(ahc,
+                                     starget->channel + 'A',
+                                     shost->this_id, starget->id, &tstate);
+       spi_width(starget) = tinfo->curr.width;
+}
+
+static void ahc_linux_set_width(struct scsi_target *starget, int width)
+{
+       struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+       struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
+       struct ahc_devinfo devinfo;
+       unsigned long flags;
+
+       ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
+                           starget->channel + 'A', ROLE_INITIATOR);
+       ahc_lock(ahc, &flags);
+       ahc_set_width(ahc, &devinfo, width, AHC_TRANS_GOAL, FALSE);
+       ahc_unlock(ahc, &flags);
+}
+
+static void ahc_linux_get_dt(struct scsi_target *starget)
+{
+       struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+       struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
+       struct ahc_tmode_tstate *tstate;
+       struct ahc_initiator_tinfo *tinfo 
+               = ahc_fetch_transinfo(ahc,
+                                     starget->channel + 'A',
+                                     shost->this_id, starget->id, &tstate);
+       spi_dt(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ;
+}
+
+static void ahc_linux_set_dt(struct scsi_target *starget, int dt)
+{
+       struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+       struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
+       struct ahc_tmode_tstate *tstate;
+       struct ahc_initiator_tinfo *tinfo 
+               = ahc_fetch_transinfo(ahc,
+                                     starget->channel + 'A',
+                                     shost->this_id, starget->id, &tstate);
+       struct ahc_devinfo devinfo;
+       unsigned int ppr_options = tinfo->curr.ppr_options
+               & ~MSG_EXT_PPR_DT_REQ;
+       unsigned int period = tinfo->curr.period;
+       unsigned long flags;
+       struct ahc_syncrate *syncrate;
+
+       ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
+                           starget->channel + 'A', ROLE_INITIATOR);
+       syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
+                                    dt ? AHC_SYNCRATE_DT : AHC_SYNCRATE_ULTRA2);
+       ahc_lock(ahc, &flags);
+       ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->curr.offset,
+                        ppr_options, AHC_TRANS_GOAL, FALSE);
+       ahc_unlock(ahc, &flags);
+}
+
+static void ahc_linux_get_qas(struct scsi_target *starget)
+{
+       struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+       struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
+       struct ahc_tmode_tstate *tstate;
+       struct ahc_initiator_tinfo *tinfo 
+               = ahc_fetch_transinfo(ahc,
+                                     starget->channel + 'A',
+                                     shost->this_id, starget->id, &tstate);
+       spi_dt(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_QAS_REQ;
+}
+
+static void ahc_linux_set_qas(struct scsi_target *starget, int qas)
+{
+       struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+       struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
+       struct ahc_tmode_tstate *tstate;
+       struct ahc_initiator_tinfo *tinfo 
+               = ahc_fetch_transinfo(ahc,
+                                     starget->channel + 'A',
+                                     shost->this_id, starget->id, &tstate);
+       struct ahc_devinfo devinfo;
+       unsigned int ppr_options = tinfo->curr.ppr_options
+               & ~MSG_EXT_PPR_QAS_REQ;
+       unsigned int period = tinfo->curr.period;
+       unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
+       unsigned long flags;
+       struct ahc_syncrate *syncrate;
+
+       if (qas)
+               ppr_options |= MSG_EXT_PPR_QAS_REQ;
+
+       ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
+                           starget->channel + 'A', ROLE_INITIATOR);
+       syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
+                                    dt ? AHC_SYNCRATE_DT : AHC_SYNCRATE_ULTRA2);
+       ahc_lock(ahc, &flags);
+       ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->curr.offset,
+                        ppr_options, AHC_TRANS_GOAL, FALSE);
+       ahc_unlock(ahc, &flags);
+}
+
+static void ahc_linux_get_iu(struct scsi_target *starget)
+{
+       struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+       struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
+       struct ahc_tmode_tstate *tstate;
+       struct ahc_initiator_tinfo *tinfo 
+               = ahc_fetch_transinfo(ahc,
+                                     starget->channel + 'A',
+                                     shost->this_id, starget->id, &tstate);
+       spi_dt(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ;
+}
+
+static void ahc_linux_set_iu(struct scsi_target *starget, int iu)
+{
+       struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+       struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
+       struct ahc_tmode_tstate *tstate;
+       struct ahc_initiator_tinfo *tinfo 
+               = ahc_fetch_transinfo(ahc,
+                                     starget->channel + 'A',
+                                     shost->this_id, starget->id, &tstate);
+       struct ahc_devinfo devinfo;
+       unsigned int ppr_options = tinfo->curr.ppr_options
+               & ~MSG_EXT_PPR_IU_REQ;
+       unsigned int period = tinfo->curr.period;
+       unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
+       unsigned long flags;
+       struct ahc_syncrate *syncrate;
+
+       if (iu)
+               ppr_options |= MSG_EXT_PPR_IU_REQ;
+
+       ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
+                           starget->channel + 'A', ROLE_INITIATOR);
+       syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
+                                    dt ? AHC_SYNCRATE_DT : AHC_SYNCRATE_ULTRA2);
+       ahc_lock(ahc, &flags);
+       ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->curr.offset,
+                        ppr_options, AHC_TRANS_GOAL, FALSE);
+       ahc_unlock(ahc, &flags);
+}
+
+static struct spi_function_template ahc_linux_transport_functions = {
+       .get_offset     = ahc_linux_get_offset,
+       .set_offset     = ahc_linux_set_offset,
+       .show_offset    = 1,
+       .get_period     = ahc_linux_get_period,
+       .set_period     = ahc_linux_set_period,
+       .show_period    = 1,
+       .get_width      = ahc_linux_get_width,
+       .set_width      = ahc_linux_set_width,
+       .show_width     = 1,
+       .get_dt         = ahc_linux_get_dt,
+       .set_dt         = ahc_linux_set_dt,
+       .show_dt        = 1,
+       .get_iu         = ahc_linux_get_iu,
+       .set_iu         = ahc_linux_set_iu,
+       .show_iu        = 1,
+       .get_qas        = ahc_linux_get_qas,
+       .set_qas        = ahc_linux_set_qas,
+       .show_qas       = 1,
+};
+
+
+
 static int __init
 ahc_linux_init(void)
 {
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+       ahc_linux_transport_template = spi_attach_transport(&ahc_linux_transport_functions);
+       if (!ahc_linux_transport_template)
+               return -ENODEV;
        int rc = ahc_linux_detect(&aic7xxx_driver_template);
        if (rc)
                return rc;
+       spi_release_transport(ahc_linux_transport_template);
        ahc_linux_exit();
        return -ENODEV;
 #else
@@ -5014,19 +3626,6 @@ ahc_linux_init(void)
 static void
 ahc_linux_exit(void)
 {
-       struct ahc_softc *ahc;
-
-       /*
-        * Shutdown DV threads before going into the SCSI mid-layer.
-        * This avoids situations where the mid-layer locks the entire
-        * kernel so that waiting for our DV threads to exit leads
-        * to deadlock.
-        */
-       TAILQ_FOREACH(ahc, &ahc_tailq, links) {
-
-               ahc_linux_kill_dv_thread(ahc);
-       }
-
 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
        /*
         * In 2.4 we have to unregister from the PCI core _after_
@@ -5037,6 +3636,7 @@ ahc_linux_exit(void)
 #endif
        ahc_linux_pci_exit();
        ahc_linux_eisa_exit();
+       spi_release_transport(ahc_linux_transport_template);
 }
 
 module_init(ahc_linux_init);
index db3bd6321dd4b219ba5fa9b5d89ad173d0e810fc..c401537067b65c25788cecf6b3c80be88272fc1c 100644 (file)
@@ -424,27 +424,9 @@ struct ahc_linux_device {
 };
 
 typedef enum {
-       AHC_DV_REQUIRED          = 0x01,
        AHC_INQ_VALID            = 0x02,
-       AHC_BASIC_DV             = 0x04,
-       AHC_ENHANCED_DV          = 0x08
 } ahc_linux_targ_flags;
 
-/* DV States */
-typedef enum {
-       AHC_DV_STATE_EXIT = 0,
-       AHC_DV_STATE_INQ_SHORT_ASYNC,
-       AHC_DV_STATE_INQ_ASYNC,
-       AHC_DV_STATE_INQ_ASYNC_VERIFY,
-       AHC_DV_STATE_TUR,
-       AHC_DV_STATE_REBD,
-       AHC_DV_STATE_INQ_VERIFY,
-       AHC_DV_STATE_WEB,
-       AHC_DV_STATE_REB,
-       AHC_DV_STATE_SU,
-       AHC_DV_STATE_BUSY
-} ahc_dv_state;
-
 struct ahc_linux_target {
        struct ahc_linux_device  *devices[AHC_NUM_LUNS];
        int                       channel;
@@ -454,19 +436,6 @@ struct ahc_linux_target {
        struct ahc_softc         *ahc;
        ahc_linux_targ_flags      flags;
        struct scsi_inquiry_data *inq_data;
-       /*
-        * The next "fallback" period to use for narrow/wide transfers.
-        */
-       uint8_t                   dv_next_narrow_period;
-       uint8_t                   dv_next_wide_period;
-       uint8_t                   dv_max_width;
-       uint8_t                   dv_max_ppr_options;
-       uint8_t                   dv_last_ppr_options;
-       u_int                     dv_echo_size;
-       ahc_dv_state              dv_state;
-       u_int                     dv_state_retry;
-       char                     *dv_buffer;
-       char                     *dv_buffer1;
 };
 
 /********************* Definitions Required by the Core ***********************/
@@ -511,10 +480,6 @@ struct scb_platform_data {
  * this driver.
  */
 typedef enum {
-       AHC_DV_WAIT_SIMQ_EMPTY   = 0x01,
-       AHC_DV_WAIT_SIMQ_RELEASE = 0x02,
-       AHC_DV_ACTIVE            = 0x04,
-       AHC_DV_SHUTDOWN          = 0x08,
        AHC_RUN_CMPLT_Q_TIMER    = 0x10
 } ahc_linux_softc_flags;
 
@@ -937,11 +902,6 @@ int        ahc_linux_proc_info(struct Scsi_Host *, char *, char **,
 #endif
 
 /*************************** Domain Validation ********************************/
-#define AHC_DV_CMD(cmd) ((cmd)->scsi_done == ahc_linux_dv_complete)
-#define AHC_DV_SIMQ_FROZEN(ahc)                                        \
-       ((((ahc)->platform_data->flags & AHC_DV_ACTIVE) != 0)   \
-        && (ahc)->platform_data->qfrozen == 1)
-
 /*********************** Transaction Access Wrappers *************************/
 static __inline void ahc_cmd_set_transaction_status(Scsi_Cmnd *, uint32_t);
 static __inline void ahc_set_transaction_status(struct scb *, uint32_t);
index d40ba0760c765a28503df3e566e3b0154b25b925..26f17e3fc45c6113b99e138dde03138502e85b4d 100644 (file)
@@ -103,9 +103,9 @@ typedef enum {
 } ac_code;
 
 typedef enum {
-       CAM_DIR_IN              = SCSI_DATA_READ,
-       CAM_DIR_OUT             = SCSI_DATA_WRITE,
-       CAM_DIR_NONE            = SCSI_DATA_NONE
+       CAM_DIR_IN              = DMA_FROM_DEVICE,
+       CAM_DIR_OUT             = DMA_TO_DEVICE,
+       CAM_DIR_NONE            = DMA_NONE,
 } ccb_flags;
 
 #endif /* _AIC7XXX_CAM_H */
index a6e7bb0d53f4a7becb3b6bbbb897af5ab8e20f79..9e9d0c40187e50ed1af8e5199514611e0903f26f 100644 (file)
@@ -2700,12 +2700,12 @@ aic7xxx_done(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
     struct scatterlist *sg;
 
     sg = (struct scatterlist *)cmd->request_buffer;
-    pci_unmap_sg(p->pdev, sg, cmd->use_sg, scsi_to_pci_dma_dir(cmd->sc_data_direction));
+    pci_unmap_sg(p->pdev, sg, cmd->use_sg, cmd->sc_data_direction);
   }
   else if (cmd->request_bufflen)
     pci_unmap_single(p->pdev, aic7xxx_mapping(cmd),
                     cmd->request_bufflen,
-                     scsi_to_pci_dma_dir(cmd->sc_data_direction));
+                     cmd->sc_data_direction);
   if (scb->flags & SCB_SENSE)
   {
     pci_unmap_single(p->pdev,
@@ -10228,7 +10228,7 @@ aic7xxx_buildscb(struct aic7xxx_host *p, Scsi_Cmnd *cmd,
 
     sg = (struct scatterlist *)cmd->request_buffer;
     scb->sg_length = 0;
-    use_sg = pci_map_sg(p->pdev, sg, cmd->use_sg, scsi_to_pci_dma_dir(cmd->sc_data_direction));
+    use_sg = pci_map_sg(p->pdev, sg, cmd->use_sg, cmd->sc_data_direction);
     /*
      * Copy the segments into the SG array.  NOTE!!! - We used to
      * have the first entry both in the data_pointer area and the first
@@ -10256,7 +10256,7 @@ aic7xxx_buildscb(struct aic7xxx_host *p, Scsi_Cmnd *cmd,
     {
       unsigned int address = pci_map_single(p->pdev, cmd->request_buffer,
                                            cmd->request_bufflen,
-                                            scsi_to_pci_dma_dir(cmd->sc_data_direction));
+                                            cmd->sc_data_direction);
       aic7xxx_mapping(cmd) = address;
       scb->sg_list[0].address = cpu_to_le32(address);
       scb->sg_list[0].length = cpu_to_le32(cmd->request_bufflen);
index 0a172c1e9f7e6934beb7ddeb316d60919c4dcc5e..3838f88e1fe01ee38166b9bba6093520d6cf0728 100644 (file)
@@ -2117,7 +2117,7 @@ request_sense:
        SCpnt->SCp.Message = 0;
        SCpnt->SCp.Status = 0;
        SCpnt->request_bufflen = sizeof(SCpnt->sense_buffer);
-       SCpnt->sc_data_direction = SCSI_DATA_READ;
+       SCpnt->sc_data_direction = DMA_FROM_DEVICE;
        SCpnt->use_sg = 0;
        SCpnt->tag = 0;
        SCpnt->host_scribble = (void *)fas216_rq_sns_done;
index 2eeb493f5a2bf75d438c1301e28edd3fa7d5725b..5674ada6d5c249ca58fb1d39ffba9355171d1b18 100644 (file)
@@ -642,12 +642,12 @@ int cpqfcTS_ioctl( struct scsi_device *ScsiDev, int Cmnd, void *arg)
                                return( -EFAULT);
                        }
                }
-               ScsiPassThruReq->sr_data_direction = SCSI_DATA_WRITE; 
+               ScsiPassThruReq->sr_data_direction = DMA_TO_DEVICE; 
        } else if (vendor_cmd->rw_flag == VENDOR_READ_OPCODE) {
-               ScsiPassThruReq->sr_data_direction = SCSI_DATA_READ; 
+               ScsiPassThruReq->sr_data_direction = DMA_FROM_DEVICE;
        } else
                // maybe this means a bug in the user app
-               ScsiPassThruReq->sr_data_direction = SCSI_DATA_NONE;
+               ScsiPassThruReq->sr_data_direction = DMA_BIDIRECTIONAL;
            
        ScsiPassThruReq->sr_cmd_len = 0; // set correctly by scsi_do_req()
        ScsiPassThruReq->sr_sense_buffer[0] = 0;
index a5fd7427e9dafbc3f50a3d4b0234448fa2403d8f..d822ddcc52b2cad83260fa8eed310542ec09933c 100644 (file)
@@ -5129,7 +5129,7 @@ cpqfc_undo_SEST_mappings(struct pci_dev *pcidev,
        for (i=*sgPages_head; i != NULL ;i = next)
        {
                pci_unmap_single(pcidev, i->busaddr, i->maplen, 
-                       scsi_to_pci_dma_dir(PCI_DMA_TODEVICE));
+                       PCI_DMA_TODEVICE);
                i->busaddr = (dma_addr_t) NULL; 
                i->maplen = 0L; 
                next = i->next;
@@ -5195,7 +5195,7 @@ static ULONG build_SEST_sgList(
                        contigaddr = ulBuff = pci_map_single(pcidev, 
                                Cmnd->request_buffer, 
                                Cmnd->request_bufflen,
-                               scsi_to_pci_dma_dir(Cmnd->sc_data_direction));
+                               Cmnd->sc_data_direction);
                        // printk("ms %p ", ulBuff);
                }
                else {
@@ -5224,7 +5224,7 @@ static ULONG build_SEST_sgList(
                unsigned long btg;
                contigaddr = pci_map_single(pcidev, Cmnd->request_buffer, 
                                Cmnd->request_bufflen,
-                               scsi_to_pci_dma_dir(Cmnd->sc_data_direction));
+                               Cmnd->sc_data_direction);
 
                // printk("contigaddr = %p, len = %d\n", 
                //      (void *) contigaddr, bytes_to_go);
@@ -5247,7 +5247,7 @@ static ULONG build_SEST_sgList(
  
        sgl = (struct scatterlist*)Cmnd->request_buffer;  
        sg_count = pci_map_sg(pcidev, sgl, Cmnd->use_sg, 
-               scsi_to_pci_dma_dir(Cmnd->sc_data_direction));
+               Cmnd->sc_data_direction);
        if( sg_count <= 3 ) {
 
        // we need to be careful here that no individual mapping
@@ -5400,7 +5400,7 @@ static ULONG build_SEST_sgList(
 
                cpqfc_undo_SEST_mappings(pcidev, contigaddr, 
                        Cmnd->request_bufflen,
-                       scsi_to_pci_dma_dir(Cmnd->sc_data_direction),
+                       Cmnd->sc_data_direction,
                        sgl, Cmnd->use_sg, sgPages_head, AllocatedPages+1);
 
                // FIXME: testing shows that if we get here, 
@@ -5946,7 +5946,7 @@ cpqfc_pci_unmap_extended_sg(struct pci_dev *pcidev,
        // for each extended scatter gather region needing unmapping... 
        for (i=fcChip->SEST->sgPages[x_ID] ; i != NULL ; i = i->next)
                pci_unmap_single(pcidev, i->busaddr, i->maplen,
-                       scsi_to_pci_dma_dir(PCI_DMA_TODEVICE));
+                       PCI_DMA_TODEVICE);
 }
 
 // Called also from cpqfcTScontrol.o, so can't be static
@@ -5960,14 +5960,14 @@ cpqfc_pci_unmap(struct pci_dev *pcidev,
        if (cmd->use_sg) {      // Used scatter gather list for data buffer?
                cpqfc_pci_unmap_extended_sg(pcidev, fcChip, x_ID);
                pci_unmap_sg(pcidev, cmd->buffer, cmd->use_sg,
-                       scsi_to_pci_dma_dir(cmd->sc_data_direction));
+                       cmd->sc_data_direction);
                // printk("umsg %d\n", cmd->use_sg);
        }
        else if (cmd->request_bufflen) {
                // printk("ums %p ", fcChip->SEST->u[ x_ID ].IWE.GAddr1);
                pci_unmap_single(pcidev, fcChip->SEST->u[ x_ID ].IWE.GAddr1,
                        cmd->request_bufflen,
-                       scsi_to_pci_dma_dir(cmd->sc_data_direction));
+                       cmd->sc_data_direction);
        }        
 }
 
index cc0cb246b1e4fda452cd6b9c9fd232cd4ee7f2c4..a9eaab9fbd5ea7761e53d436dee64654a1402236 100644 (file)
@@ -4034,7 +4034,7 @@ static void gdth_log_event(gdth_evt_data *dvr, char *buffer)
 }
 
 #ifdef GDTH_STATISTICS
-void gdth_timeout(ulong data)
+static void gdth_timeout(ulong data)
 {
     ulong32 i;
     Scsi_Cmnd *nscp;
@@ -4062,7 +4062,7 @@ void gdth_timeout(ulong data)
 }
 #endif
 
-void __init internal_setup(char *str,int *ints)
+static void __init internal_setup(char *str,int *ints)
 {
     int i, argc;
     char *cur_str, *argv;
@@ -4153,7 +4153,7 @@ int __init option_setup(char *str)
     return 1;
 }
 
-int __init gdth_detect(Scsi_Host_Template *shtp)
+static int __init gdth_detect(Scsi_Host_Template *shtp)
 {
     struct Scsi_Host *shp;
     gdth_pci_str pcistr[MAXHA];
@@ -4604,7 +4604,7 @@ int __init gdth_detect(Scsi_Host_Template *shtp)
 }
 
 
-int gdth_release(struct Scsi_Host *shp)
+static int gdth_release(struct Scsi_Host *shp)
 {
     int hanum;
     gdth_ha_str *ha;
@@ -4691,7 +4691,7 @@ static const char *gdth_ctr_name(int hanum)
     return("");
 }
 
-const char *gdth_info(struct Scsi_Host *shp)
+static const char *gdth_info(struct Scsi_Host *shp)
 {
     int hanum;
     gdth_ha_str *ha;
@@ -4704,19 +4704,19 @@ const char *gdth_info(struct Scsi_Host *shp)
 }
 
 /* new error handling */
-int gdth_eh_abort(Scsi_Cmnd *scp)
+static int gdth_eh_abort(Scsi_Cmnd *scp)
 {
     TRACE2(("gdth_eh_abort()\n"));
     return FAILED;
 }
 
-int gdth_eh_device_reset(Scsi_Cmnd *scp)
+static int gdth_eh_device_reset(Scsi_Cmnd *scp)
 {
     TRACE2(("gdth_eh_device_reset()\n"));
     return FAILED;
 }
 
-int gdth_eh_bus_reset(Scsi_Cmnd *scp)
+static int gdth_eh_bus_reset(Scsi_Cmnd *scp)
 {
     int i, hanum;
     gdth_ha_str *ha;
@@ -4770,7 +4770,7 @@ int gdth_eh_bus_reset(Scsi_Cmnd *scp)
     return SUCCESS;
 }
 
-int gdth_eh_host_reset(Scsi_Cmnd *scp)
+static int gdth_eh_host_reset(Scsi_Cmnd *scp)
 {
     TRACE2(("gdth_eh_host_reset()\n"));
     return FAILED;
@@ -4778,9 +4778,9 @@ int gdth_eh_host_reset(Scsi_Cmnd *scp)
 
 
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-int gdth_bios_param(struct scsi_device *sdev,struct block_device *bdev,sector_t cap,int *ip)
+static int gdth_bios_param(struct scsi_device *sdev,struct block_device *bdev,sector_t cap,int *ip)
 #else
-int gdth_bios_param(Disk *disk,kdev_t dev,int *ip)
+static int gdth_bios_param(Disk *disk,kdev_t dev,int *ip)
 #endif
 {
     unchar b, t;
@@ -4818,7 +4818,7 @@ int gdth_bios_param(Disk *disk,kdev_t dev,int *ip)
 }
 
 
-int gdth_queuecommand(Scsi_Cmnd *scp,void (*done)(Scsi_Cmnd *))
+static int gdth_queuecommand(Scsi_Cmnd *scp,void (*done)(Scsi_Cmnd *))
 {
     int hanum;
     int priority;
index bf269f05ea8ed2d49676d86ea764d6911868383e..c0f1e34115245f6f41d003faff9a17e8fa0385d1 100644 (file)
@@ -1029,51 +1029,10 @@ typedef struct {
 
 /* function prototyping */
 
-int gdth_detect(Scsi_Host_Template *);
-int gdth_release(struct Scsi_Host *);
-int gdth_queuecommand(Scsi_Cmnd *,void (*done)(Scsi_Cmnd *));
-const char *gdth_info(struct Scsi_Host *);
-
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-int gdth_bios_param(struct scsi_device *,struct block_device *,sector_t,int *);
 int gdth_proc_info(struct Scsi_Host *, char *,char **,off_t,int,int);
-#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
-int gdth_bios_param(Disk *,kdev_t,int *);
-int gdth_proc_info(char *,char **,off_t,int,int,int);
 #else
-int gdth_bios_param(Disk *,kdev_t,int *);
-extern struct proc_dir_entry proc_scsi_gdth;
 int gdth_proc_info(char *,char **,off_t,int,int,int);
-int gdth_abort(Scsi_Cmnd *);
-int gdth_reset(Scsi_Cmnd *,unsigned int); 
-#define GDTH { proc_dir:        &proc_scsi_gdth,                 \
-               proc_info:       gdth_proc_info,                  \
-               name:            "GDT SCSI Disk Array Controller",\
-               detect:          gdth_detect,                     \
-               release:         gdth_release,                    \
-               info:            gdth_info,                       \
-               command:         NULL,                            \
-               queuecommand:    gdth_queuecommand,               \
-               eh_abort_handler: gdth_eh_abort,                  \
-               eh_device_reset_handler: gdth_eh_device_reset,    \
-               eh_bus_reset_handler: gdth_eh_bus_reset,          \
-               eh_host_reset_handler: gdth_eh_host_reset,        \
-               abort:           gdth_abort,                      \
-               reset:           gdth_reset,                      \
-               bios_param:      gdth_bios_param,                 \
-               can_queue:       GDTH_MAXCMDS,                    \
-               this_id:         -1,                              \
-               sg_tablesize:    GDTH_MAXSG,                      \
-               cmd_per_lun:     GDTH_MAXC_P_L,                   \
-               present:         0,                               \
-               unchecked_isa_dma: 1,                             \
-               use_clustering:  ENABLE_CLUSTERING,               \
-               use_new_eh_code: 1       /* use new error code */ }    
 #endif
 
-int gdth_eh_abort(Scsi_Cmnd *scp);
-int gdth_eh_device_reset(Scsi_Cmnd *scp);
-int gdth_eh_bus_reset(Scsi_Cmnd *scp);
-int gdth_eh_host_reset(Scsi_Cmnd *scp);
-
 #endif
index 47c263e5cd39d355e15b061d6564cd131b47c249..fbc2cb6667a1a349e81aa559311e472ad7a94f91 100644 (file)
@@ -231,9 +231,9 @@ module_param(ips, charp, 0);
 #endif
 
 #define IPS_DMA_DIR(scb) ((!scb->scsi_cmd || ips_is_passthru(scb->scsi_cmd) || \
-                         SCSI_DATA_NONE == scb->scsi_cmd->sc_data_direction) ? \
+                         DMA_NONE == scb->scsi_cmd->sc_data_direction) ? \
                          PCI_DMA_BIDIRECTIONAL : \
-                         scsi_to_pci_dma_dir(scb->scsi_cmd->sc_data_direction))
+                         scb->scsi_cmd->sc_data_direction)
 
 #ifdef IPS_DEBUG
 #define METHOD_TRACE(s, i)    if (ips_debug >= (i+10)) printk(KERN_NOTICE s "\n");
@@ -2849,8 +2849,7 @@ ips_next(ips_ha_t * ha, int intr)
 
                        sg = SC->request_buffer;
                        scb->sg_count = pci_map_sg(ha->pcidev, sg, SC->use_sg,
-                                                  scsi_to_pci_dma_dir(SC->
-                                                                      sc_data_direction));
+                                                  SC->sc_data_direction);
                        scb->flags |= IPS_SCB_MAP_SG;
                        for (i = 0; i < scb->sg_count; i++) {
                                if (ips_fill_scb_sg_single
@@ -2865,8 +2864,7 @@ ips_next(ips_ha_t * ha, int intr)
                                    pci_map_single(ha->pcidev,
                                                   SC->request_buffer,
                                                   SC->request_bufflen,
-                                                  scsi_to_pci_dma_dir(SC->
-                                                                      sc_data_direction));
+                                                  SC->sc_data_direction);
                                scb->flags |= IPS_SCB_MAP_SINGLE;
                                ips_fill_scb_sg_single(ha, scb->data_busaddr,
                                                       scb, 0,
index 29f250c80b98fae57af1ae5ed056f8a75230fddf..4cbb6187cc441fc97132ea3a5b990fe932710a24 100644 (file)
@@ -131,6 +131,7 @@ lasi700_probe(struct parisc_device *dev)
        if (!host)
                goto out_kfree;
        host->this_id = 7;
+       host->base = base;
        host->irq = dev->irq;
        if(request_irq(dev->irq, NCR_700_intr, SA_SHIRQ, "lasi700", host)) {
                printk(KERN_ERR "lasi700: request_irq failed!\n");
index 4e5e54a1564b571edd07e6ac5d2ccfa100696b82..4c96df060c3bad9af44ab4c4664d8cba38c63030 100644 (file)
@@ -305,7 +305,7 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat)
        sb[0] = 0x70;
        sb[2] = MEDIUM_ERROR;
        sb[7] = 0x0A;
-       if (cmd->sc_data_direction == SCSI_DATA_READ) {
+       if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
                sb[12] = 0x11; /* "unrecovered read error" */
                sb[13] = 0x04;
        } else {
@@ -671,8 +671,8 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
                return;
 
        /* data is present; dma-map it */
-       if (cmd->sc_data_direction == SCSI_DATA_READ ||
-           cmd->sc_data_direction == SCSI_DATA_WRITE) {
+       if (cmd->sc_data_direction == DMA_FROM_DEVICE ||
+           cmd->sc_data_direction == DMA_TO_DEVICE) {
                if (unlikely(cmd->request_bufflen < 1)) {
                        printk(KERN_WARNING "ata%u(%u): WARNING: zero len r/w req\n",
                               ap->id, dev->devno);
@@ -1304,7 +1304,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
        struct scsi_cmnd *cmd = qc->scsicmd;
        struct ata_device *dev = qc->dev;
        int using_pio = (dev->flags & ATA_DFLAG_PIO);
-       int nodata = (cmd->sc_data_direction == SCSI_DATA_NONE);
+       int nodata = (cmd->sc_data_direction == DMA_NONE);
 
        if (!using_pio)
                /* Check whether ATAPI DMA is safe */
@@ -1316,7 +1316,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
        qc->complete_fn = atapi_qc_complete;
 
        qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
-       if (cmd->sc_data_direction == SCSI_DATA_WRITE) {
+       if (cmd->sc_data_direction == DMA_TO_DEVICE) {
                qc->tf.flags |= ATA_TFLAG_WRITE;
                DPRINTK("direction: write\n");
        }
@@ -1340,7 +1340,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
 
 #ifdef ATAPI_ENABLE_DMADIR
                /* some SATA bridges need us to indicate data xfer direction */
-               if (cmd->sc_data_direction != SCSI_DATA_WRITE)
+               if (cmd->sc_data_direction != DMA_TO_DEVICE)
                        qc->tf.feature |= ATAPI_DMADIR;
 #endif
        }
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile
new file mode 100644 (file)
index 0000000..2b30985
--- /dev/null
@@ -0,0 +1,32 @@
+#/*******************************************************************
+# * This file is part of the Emulex Linux Device Driver for         *
+# * Enterprise Fibre Channel Host Bus Adapters.                     *
+# * Refer to the README file included with this package for         *
+# * driver version and adapter support.                             *
+# * Copyright (C) 2004 Emulex Corporation.                          *
+# * www.emulex.com                                                  *
+# *                                                                 *
+# * This program is free software; you can redistribute it and/or   *
+# * modify it under the terms of the GNU General Public License     *
+# * as published by the Free Software Foundation; either version 2  *
+# * of the License, or (at your option) any later version.          *
+# *                                                                 *
+# * This program is distributed in the hope that it will be useful, *
+# * but WITHOUT ANY WARRANTY; without even the implied warranty of  *
+# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the   *
+# * GNU General Public License for more details, a copy of which    *
+# * can be found in the file COPYING included with this package.    *
+# *******************************************************************/
+######################################################################
+
+#$Id: Makefile 1.58 2005/01/23 19:00:32EST sf_support Exp  $
+
+ifneq ($(GCOV),)
+  EXTRA_CFLAGS += -fprofile-arcs -ftest-coverage
+  EXTRA_CFLAGS += -O0
+endif
+
+obj-$(CONFIG_SCSI_LPFC) := lpfc.o
+
+lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \
+       lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o lpfc_scsi.o lpfc_attr.o
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
new file mode 100644 (file)
index 0000000..d78247c
--- /dev/null
@@ -0,0 +1,384 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Enterprise Fibre Channel Host Bus Adapters.                     *
+ * Refer to the README file included with this package for         *
+ * driver version and adapter support.                             *
+ * Copyright (C) 2004 Emulex Corporation.                          *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of the GNU General Public License     *
+ * as published by the Free Software Foundation; either version 2  *
+ * of the License, or (at your option) any later version.          *
+ *                                                                 *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of  *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the   *
+ * GNU General Public License for more details, a copy of which    *
+ * can be found in the file COPYING included with this package.    *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc.h 1.167 2005/04/07 08:47:05EDT sf_support Exp  $
+ */
+
+struct lpfc_sli2_slim;
+
+#define LPFC_MAX_TARGET         256    /* max targets supported */
+#define LPFC_MAX_DISC_THREADS  64      /* max outstanding discovery els req */
+#define LPFC_MAX_NS_RETRY       3      /* max NameServer retries */
+
+#define LPFC_DFT_HBA_Q_DEPTH   2048    /* max cmds per hba */
+#define LPFC_LC_HBA_Q_DEPTH    1024    /* max cmds per low cost hba */
+#define LPFC_LP101_HBA_Q_DEPTH 128     /* max cmds per low cost hba */
+
+#define LPFC_CMD_PER_LUN       30      /* max outstanding cmds per lun */
+#define LPFC_SG_SEG_CNT                64      /* sg element count per scsi cmnd */
+#define LPFC_IOCB_LIST_CNT     2250    /* list of IOCBs for fast-path usage. */
+
+/* Define macros for 64 bit support */
+#define putPaddrLow(addr)    ((uint32_t) (0xffffffff & (u64)(addr)))
+#define putPaddrHigh(addr)   ((uint32_t) (0xffffffff & (((u64)(addr))>>32)))
+#define getPaddr(high, low)  ((dma_addr_t)( \
+                            (( (u64)(high)<<16 ) << 16)|( (u64)(low))))
+/* Provide maximum configuration definitions. */
+#define LPFC_DRVR_TIMEOUT      16      /* driver iocb timeout value in sec */
+#define MAX_FCP_TARGET         256     /* max num of FCP targets supported */
+#define FC_MAX_ADPTMSG         64
+
+#define MAX_HBAEVT     32
+
+/* Provide DMA memory definitions the driver uses per port instance. */
+struct lpfc_dmabuf {
+       struct list_head list;
+       void *virt;             /* virtual address ptr */
+       dma_addr_t phys;        /* mapped address */
+};
+
+struct lpfc_dma_pool {
+       struct lpfc_dmabuf   *elements;
+       uint32_t    max_count;
+       uint32_t    current_count;
+};
+
+/* Priority bit.  Set value to exceed low water mark in lpfc_mem. */
+#define MEM_PRI                0x100
+
+
+/****************************************************************************/
+/*      Device VPD save area                                                */
+/****************************************************************************/
+typedef struct lpfc_vpd {
+       uint32_t status;        /* vpd status value */
+       uint32_t length;        /* number of bytes actually returned */
+       struct {
+               uint32_t rsvd1; /* Revision numbers */
+               uint32_t biuRev;
+               uint32_t smRev;
+               uint32_t smFwRev;
+               uint32_t endecRev;
+               uint16_t rBit;
+               uint8_t fcphHigh;
+               uint8_t fcphLow;
+               uint8_t feaLevelHigh;
+               uint8_t feaLevelLow;
+               uint32_t postKernRev;
+               uint32_t opFwRev;
+               uint8_t opFwName[16];
+               uint32_t sli1FwRev;
+               uint8_t sli1FwName[16];
+               uint32_t sli2FwRev;
+               uint8_t sli2FwName[16];
+       } rev;
+} lpfc_vpd_t;
+
+struct lpfc_scsi_buf;
+
+
+/*
+ * lpfc stat counters
+ */
+struct lpfc_stats {
+       /* Statistics for ELS commands */
+       uint32_t elsLogiCol;
+       uint32_t elsRetryExceeded;
+       uint32_t elsXmitRetry;
+       uint32_t elsDelayRetry;
+       uint32_t elsRcvDrop;
+       uint32_t elsRcvFrame;
+       uint32_t elsRcvRSCN;
+       uint32_t elsRcvRNID;
+       uint32_t elsRcvFARP;
+       uint32_t elsRcvFARPR;
+       uint32_t elsRcvFLOGI;
+       uint32_t elsRcvPLOGI;
+       uint32_t elsRcvADISC;
+       uint32_t elsRcvPDISC;
+       uint32_t elsRcvFAN;
+       uint32_t elsRcvLOGO;
+       uint32_t elsRcvPRLO;
+       uint32_t elsRcvPRLI;
+       uint32_t elsRcvRRQ;
+       uint32_t elsXmitFLOGI;
+       uint32_t elsXmitPLOGI;
+       uint32_t elsXmitPRLI;
+       uint32_t elsXmitADISC;
+       uint32_t elsXmitLOGO;
+       uint32_t elsXmitSCR;
+       uint32_t elsXmitRNID;
+       uint32_t elsXmitFARP;
+       uint32_t elsXmitFARPR;
+       uint32_t elsXmitACC;
+       uint32_t elsXmitLSRJT;
+
+       uint32_t frameRcvBcast;
+       uint32_t frameRcvMulti;
+       uint32_t strayXmitCmpl;
+       uint32_t frameXmitDelay;
+       uint32_t xriCmdCmpl;
+       uint32_t xriStatErr;
+       uint32_t LinkUp;
+       uint32_t LinkDown;
+       uint32_t LinkMultiEvent;
+       uint32_t NoRcvBuf;
+       uint32_t fcpCmd;
+       uint32_t fcpCmpl;
+       uint32_t fcpRspErr;
+       uint32_t fcpRemoteStop;
+       uint32_t fcpPortRjt;
+       uint32_t fcpPortBusy;
+       uint32_t fcpError;
+       uint32_t fcpLocalErr;
+};
+
+enum sysfs_mbox_state {
+       SMBOX_IDLE,
+       SMBOX_WRITING,
+       SMBOX_READING
+};
+
+struct lpfc_sysfs_mbox {
+       enum sysfs_mbox_state state;
+       size_t                offset;
+       struct lpfcMboxq *    mbox;
+};
+
+struct lpfc_hba {
+       struct list_head hba_list;      /* List of hbas/ports */
+       struct lpfc_sli sli;
+       struct lpfc_sli2_slim *slim2p;
+       dma_addr_t slim2p_mapping;
+       uint16_t pci_cfg_value;
+
+       uint32_t hba_state;
+
+#define LPFC_INIT_START           1    /* Initial state after board reset */
+#define LPFC_INIT_MBX_CMDS        2    /* Initialize HBA with mbox commands */
+#define LPFC_LINK_DOWN            3    /* HBA initialized, link is down */
+#define LPFC_LINK_UP              4    /* Link is up  - issue READ_LA */
+#define LPFC_LOCAL_CFG_LINK       5    /* local NPORT Id configured */
+#define LPFC_FLOGI                6    /* FLOGI sent to Fabric */
+#define LPFC_FABRIC_CFG_LINK      7    /* Fabric assigned NPORT Id
+                                          configured */
+#define LPFC_NS_REG               8    /* Register with NameServer */
+#define LPFC_NS_QRY               9    /* Query NameServer for NPort ID list */
+#define LPFC_BUILD_DISC_LIST      10   /* Build ADISC and PLOGI lists for
+                                        * device authentication / discovery */
+#define LPFC_DISC_AUTH            11   /* Processing ADISC list */
+#define LPFC_CLEAR_LA             12   /* authentication cmplt - issue
+                                          CLEAR_LA */
+#define LPFC_HBA_READY            32
+#define LPFC_HBA_ERROR            0xff
+
+       uint8_t fc_linkspeed;   /* Link speed after last READ_LA */
+
+       uint32_t fc_eventTag;   /* event tag for link attention */
+       uint32_t fc_prli_sent;  /* cntr for outstanding PRLIs */
+
+       uint32_t num_disc_nodes;        /*in addition to hba_state */
+
+       struct timer_list fc_estabtmo;  /* link establishment timer */
+       struct timer_list fc_disctmo;   /* Discovery rescue timer */
+       struct timer_list fc_fdmitmo;   /* fdmi timer */
+       /* These fields used to be binfo */
+       struct lpfc_name fc_nodename;   /* fc nodename */
+       struct lpfc_name fc_portname;   /* fc portname */
+       uint32_t fc_pref_DID;   /* preferred D_ID */
+       uint8_t fc_pref_ALPA;   /* preferred AL_PA */
+       uint32_t fc_edtov;      /* E_D_TOV timer value */
+       uint32_t fc_arbtov;     /* ARB_TOV timer value */
+       uint32_t fc_ratov;      /* R_A_TOV timer value */
+       uint32_t fc_rttov;      /* R_T_TOV timer value */
+       uint32_t fc_altov;      /* AL_TOV timer value */
+       uint32_t fc_crtov;      /* C_R_TOV timer value */
+       uint32_t fc_citov;      /* C_I_TOV timer value */
+       uint32_t fc_myDID;      /* fibre channel S_ID */
+       uint32_t fc_prevDID;    /* previous fibre channel S_ID */
+
+       struct serv_parm fc_sparam;     /* buffer for our service parameters */
+       struct serv_parm fc_fabparam;   /* fabric service parameters buffer */
+       uint8_t alpa_map[128];  /* AL_PA map from READ_LA */
+
+       uint8_t fc_ns_retry;    /* retries for fabric nameserver */
+       uint32_t fc_nlp_cnt;    /* outstanding NODELIST requests */
+       uint32_t fc_rscn_id_cnt;        /* count of RSCNs payloads in list */
+       struct lpfc_dmabuf *fc_rscn_id_list[FC_MAX_HOLD_RSCN];
+       uint32_t lmt;
+       uint32_t fc_flag;       /* FC flags */
+#define FC_PT2PT                0x1    /* pt2pt with no fabric */
+#define FC_PT2PT_PLOGI          0x2    /* pt2pt initiate PLOGI */
+#define FC_DISC_TMO             0x4    /* Discovery timer running */
+#define FC_PUBLIC_LOOP          0x8    /* Public loop */
+#define FC_LBIT                 0x10   /* LOGIN bit in loopinit set */
+#define FC_RSCN_MODE            0x20   /* RSCN cmd rcv'ed */
+#define FC_NLP_MORE             0x40   /* More node to process in node tbl */
+#define FC_OFFLINE_MODE         0x80   /* Interface is offline for diag */
+#define FC_FABRIC               0x100  /* We are fabric attached */
+#define FC_ESTABLISH_LINK       0x200  /* Reestablish Link */
+#define FC_RSCN_DISCOVERY       0x400  /* Authenticate all devices after RSCN*/
+#define FC_LOADING             0x1000  /* HBA in process of loading drvr */
+#define FC_UNLOADING           0x2000  /* HBA in process of unloading drvr */
+#define FC_SCSI_SCAN_TMO        0x4000 /* scsi scan timer running */
+#define FC_ABORT_DISCOVERY      0x8000 /* we want to abort discovery */
+#define FC_NDISC_ACTIVE         0x10000        /* NPort discovery active */
+
+       uint32_t fc_topology;   /* link topology, from LINK INIT */
+
+       struct lpfc_stats fc_stat;
+
+       /* These are the head/tail pointers for the bind, plogi, adisc, unmap,
+        *  and map lists.  Their counters are immediately following.
+        */
+       struct list_head fc_plogi_list;
+       struct list_head fc_adisc_list;
+       struct list_head fc_reglogin_list;
+       struct list_head fc_prli_list;
+       struct list_head fc_nlpunmap_list;
+       struct list_head fc_nlpmap_list;
+       struct list_head fc_npr_list;
+       struct list_head fc_unused_list;
+
+       /* Keep counters for the number of entries in each list. */
+       uint16_t fc_plogi_cnt;
+       uint16_t fc_adisc_cnt;
+       uint16_t fc_reglogin_cnt;
+       uint16_t fc_prli_cnt;
+       uint16_t fc_unmap_cnt;
+       uint16_t fc_map_cnt;
+       uint16_t fc_npr_cnt;
+       uint16_t fc_unused_cnt;
+       struct lpfc_nodelist fc_fcpnodev; /* nodelist entry for no device */
+       uint32_t nport_event_cnt;       /* timestamp for nlplist entry */
+
+#define LPFC_RPI_HASH_SIZE     64
+#define LPFC_RPI_HASH_FUNC(x)  ((x) & (0x3f))
+       /* ptr to active D_ID / RPIs */
+       struct lpfc_nodelist *fc_nlplookup[LPFC_RPI_HASH_SIZE];
+       uint32_t wwnn[2];
+       uint32_t RandomData[7];
+
+       uint32_t cfg_log_verbose;
+       uint32_t cfg_lun_queue_depth;
+       uint32_t cfg_nodev_tmo;
+       uint32_t cfg_hba_queue_depth;
+       uint32_t cfg_fcp_class;
+       uint32_t cfg_use_adisc;
+       uint32_t cfg_ack0;
+       uint32_t cfg_topology;
+       uint32_t cfg_scan_down;
+       uint32_t cfg_link_speed;
+       uint32_t cfg_cr_delay;
+       uint32_t cfg_cr_count;
+       uint32_t cfg_fdmi_on;
+       uint32_t cfg_fcp_bind_method;
+       uint32_t cfg_discovery_threads;
+       uint32_t cfg_max_luns;
+       uint32_t cfg_sg_seg_cnt;
+       uint32_t cfg_sg_dma_buf_size;
+
+       lpfc_vpd_t vpd;         /* vital product data */
+
+       struct Scsi_Host *host;
+       struct pci_dev *pcidev;
+       struct list_head      work_list;
+       uint32_t              work_ha;      /* Host Attention Bits for WT */
+       uint32_t              work_ha_mask; /* HA Bits owned by WT        */
+       uint32_t              work_hs;      /* HS stored in case of ERRAT */
+       uint32_t              work_status[2]; /* Extra status from SLIM */
+       uint32_t              work_hba_events; /* Timeout to be handled  */
+#define WORKER_DISC_TMO                0x1     /* Discovery timeout */
+#define WORKER_ELS_TMO                 0x2     /* ELS timeout */
+#define WORKER_MBOX_TMO                0x4     /* MBOX timeout */
+#define WORKER_FDMI_TMO                0x8     /* FDMI timeout */
+
+       wait_queue_head_t    *work_wait;
+       struct task_struct   *worker_thread;
+
+       unsigned long pci_bar0_map;     /* Physical address for PCI BAR0 */
+       unsigned long pci_bar2_map;     /* Physical address for PCI BAR2 */
+       void __iomem *slim_memmap_p;    /* Kernel memory mapped address for
+                                          PCI BAR0 */
+       void __iomem *ctrl_regs_memmap_p;/* Kernel memory mapped address for
+                                           PCI BAR2 */
+
+       void __iomem *MBslimaddr;       /* virtual address for mbox cmds */
+       void __iomem *HAregaddr;        /* virtual address for host attn reg */
+       void __iomem *CAregaddr;        /* virtual address for chip attn reg */
+       void __iomem *HSregaddr;        /* virtual address for host status
+                                          reg */
+       void __iomem *HCregaddr;        /* virtual address for host ctl reg */
+
+       int brd_no;                     /* FC board number */
+
+       char SerialNumber[32];          /* adapter Serial Number */
+       char OptionROMVersion[32];      /* adapter BIOS / Fcode version */
+       char ModelDesc[256];            /* Model Description */
+       char ModelName[80];             /* Model Name */
+       char ProgramType[256];          /* Program Type */
+       char Port[20];                  /* Port No */
+       uint8_t vpd_flag;               /* VPD data flag */
+
+#define VPD_MODEL_DESC      0x1         /* valid vpd model description */
+#define VPD_MODEL_NAME      0x2         /* valid vpd model name */
+#define VPD_PROGRAM_TYPE    0x4         /* valid vpd program type */
+#define VPD_PORT            0x8         /* valid vpd port data */
+#define VPD_MASK            0xf         /* mask for any vpd data */
+
+       struct timer_list els_tmofunc;
+
+       void *link_stats;
+
+       /*
+        * stat  counters
+        */
+       uint64_t fc4InputRequests;
+       uint64_t fc4OutputRequests;
+       uint64_t fc4ControlRequests;
+
+       struct lpfc_sysfs_mbox sysfs_mbox;
+
+       /* fastpath list. */
+       struct list_head lpfc_scsi_buf_list;
+       uint32_t total_scsi_bufs;
+       struct list_head lpfc_iocb_list;
+       uint32_t total_iocbq_bufs;
+
+       /* pci_mem_pools */
+       struct pci_pool *lpfc_scsi_dma_buf_pool;
+       struct pci_pool *lpfc_mbuf_pool;
+       struct lpfc_dma_pool lpfc_mbuf_safety_pool;
+
+       mempool_t *mbox_mem_pool;
+       mempool_t *nlp_mem_pool;
+       struct list_head freebufList;
+       struct list_head ctrspbuflist;
+       struct list_head rnidrspbuflist;
+};
+
+
+struct rnidrsp {
+       void *buf;
+       uint32_t uniqueid;
+       struct list_head list;
+       uint32_t data;
+};
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
new file mode 100644 (file)
index 0000000..1276bd7
--- /dev/null
@@ -0,0 +1,1291 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Enterprise Fibre Channel Host Bus Adapters.                     *
+ * Refer to the README file included with this package for         *
+ * driver version and adapter support.                             *
+ * Copyright (C) 2004 Emulex Corporation.                          *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of the GNU General Public License     *
+ * as published by the Free Software Foundation; either version 2  *
+ * of the License, or (at your option) any later version.          *
+ *                                                                 *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of  *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the   *
+ * GNU General Public License for more details, a copy of which    *
+ * can be found in the file COPYING included with this package.    *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_attr.c 1.24 2005/04/13 11:58:55EDT sf_support Exp  $
+ */
+
+#include <linux/ctype.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_version.h"
+#include "lpfc_compat.h"
+#include "lpfc_crtn.h"
+
+
+static void
+lpfc_jedec_to_ascii(int incr, char hdw[])
+{
+       int i, j;
+       for (i = 0; i < 8; i++) {
+               j = (incr & 0xf);
+               if (j <= 9)
+                       hdw[7 - i] = 0x30 +  j;
+                else
+                       hdw[7 - i] = 0x61 + j - 10;
+               incr = (incr >> 4);
+       }
+       hdw[8] = 0;
+       return;
+}
+
+static ssize_t
+lpfc_drvr_version_show(struct class_device *cdev, char *buf)
+{
+       return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
+}
+
+static ssize_t
+management_version_show(struct class_device *cdev, char *buf)
+{
+       return snprintf(buf, PAGE_SIZE, DFC_API_VERSION "\n");
+}
+
+static ssize_t
+lpfc_info_show(struct class_device *cdev, char *buf)
+{
+       struct Scsi_Host *host = class_to_shost(cdev);
+       return snprintf(buf, PAGE_SIZE, "%s\n",lpfc_info(host));
+}
+
+static ssize_t
+lpfc_serialnum_show(struct class_device *cdev, char *buf)
+{
+       struct Scsi_Host *host = class_to_shost(cdev);
+       struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
+       return snprintf(buf, PAGE_SIZE, "%s\n",phba->SerialNumber);
+}
+
+static ssize_t
+lpfc_modeldesc_show(struct class_device *cdev, char *buf)
+{
+       struct Scsi_Host *host = class_to_shost(cdev);
+       struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
+       return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelDesc);
+}
+
+static ssize_t
+lpfc_modelname_show(struct class_device *cdev, char *buf)
+{
+       struct Scsi_Host *host = class_to_shost(cdev);
+       struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
+       return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelName);
+}
+
+static ssize_t
+lpfc_programtype_show(struct class_device *cdev, char *buf)
+{
+       struct Scsi_Host *host = class_to_shost(cdev);
+       struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
+       return snprintf(buf, PAGE_SIZE, "%s\n",phba->ProgramType);
+}
+
+static ssize_t
+lpfc_portnum_show(struct class_device *cdev, char *buf)
+{
+       struct Scsi_Host *host = class_to_shost(cdev);
+       struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
+       return snprintf(buf, PAGE_SIZE, "%s\n",phba->Port);
+}
+
+static ssize_t
+lpfc_fwrev_show(struct class_device *cdev, char *buf)
+{
+       struct Scsi_Host *host = class_to_shost(cdev);
+       struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
+       char fwrev[32];
+       lpfc_decode_firmware_rev(phba, fwrev, 1);
+       return snprintf(buf, PAGE_SIZE, "%s\n",fwrev);
+}
+
+static ssize_t
+lpfc_hdw_show(struct class_device *cdev, char *buf)
+{
+       char hdw[9];
+       struct Scsi_Host *host = class_to_shost(cdev);
+       struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
+       lpfc_vpd_t *vp = &phba->vpd;
+       lpfc_jedec_to_ascii(vp->rev.biuRev, hdw);
+       return snprintf(buf, PAGE_SIZE, "%s\n", hdw);
+}
+static ssize_t
+lpfc_option_rom_version_show(struct class_device *cdev, char *buf)
+{
+       struct Scsi_Host *host = class_to_shost(cdev);
+       struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
+       return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion);
+}
+static ssize_t
+lpfc_state_show(struct class_device *cdev, char *buf)
+{
+       struct Scsi_Host *host = class_to_shost(cdev);
+       struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
+       int len = 0;
+       switch (phba->hba_state) {
+       case LPFC_INIT_START:
+       case LPFC_INIT_MBX_CMDS:
+       case LPFC_LINK_DOWN:
+               len += snprintf(buf + len, PAGE_SIZE-len, "Link Down\n");
+               break;
+       case LPFC_LINK_UP:
+       case LPFC_LOCAL_CFG_LINK:
+               len += snprintf(buf + len, PAGE_SIZE-len, "Link Up\n");
+               break;
+       case LPFC_FLOGI:
+       case LPFC_FABRIC_CFG_LINK:
+       case LPFC_NS_REG:
+       case LPFC_NS_QRY:
+       case LPFC_BUILD_DISC_LIST:
+       case LPFC_DISC_AUTH:
+       case LPFC_CLEAR_LA:
+               len += snprintf(buf + len, PAGE_SIZE-len,
+                               "Link Up - Discovery\n");
+               break;
+       case LPFC_HBA_READY:
+               len += snprintf(buf + len, PAGE_SIZE-len,
+                               "Link Up - Ready:\n");
+               if (phba->fc_topology == TOPOLOGY_LOOP) {
+                       if (phba->fc_flag & FC_PUBLIC_LOOP)
+                               len += snprintf(buf + len, PAGE_SIZE-len,
+                                               "   Public Loop\n");
+                       else
+                               len += snprintf(buf + len, PAGE_SIZE-len,
+                                               "   Private Loop\n");
+               } else {
+                       if (phba->fc_flag & FC_FABRIC)
+                               len += snprintf(buf + len, PAGE_SIZE-len,
+                                               "   Fabric\n");
+                       else
+                               len += snprintf(buf + len, PAGE_SIZE-len,
+                                               "   Point-2-Point\n");
+               }
+       }
+       return len;
+}
+
+static ssize_t
+lpfc_num_discovered_ports_show(struct class_device *cdev, char *buf)
+{
+       struct Scsi_Host *host = class_to_shost(cdev);
+       struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
+       return snprintf(buf, PAGE_SIZE, "%d\n", phba->fc_map_cnt +
+                                                       phba->fc_unmap_cnt);
+}
+
+
+static ssize_t
+lpfc_issue_lip (struct class_device *cdev, const char *buf, size_t count)
+{
+       struct Scsi_Host *host = class_to_shost(cdev);
+       struct lpfc_hba *phba = (struct lpfc_hba *) host->hostdata[0];
+       int val = 0;
+       LPFC_MBOXQ_t *pmboxq;
+       int mbxstatus = MBXERR_ERROR;
+
+       if ((sscanf(buf, "%d", &val) != 1) ||
+           (val != 1))
+               return -EINVAL;
+
+       if ((phba->fc_flag & FC_OFFLINE_MODE) ||
+           (phba->hba_state != LPFC_HBA_READY))
+               return -EPERM;
+
+       pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
+
+       if (!pmboxq)
+               return -ENOMEM;
+
+       memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
+       lpfc_init_link(phba, pmboxq, phba->cfg_topology, phba->cfg_link_speed);
+       mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+
+       if (mbxstatus == MBX_TIMEOUT)
+               pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+       else
+               mempool_free( pmboxq, phba->mbox_mem_pool);
+
+       if (mbxstatus == MBXERR_ERROR)
+               return -EIO;
+
+       return strlen(buf);
+}
+
+static ssize_t
+lpfc_nport_evt_cnt_show(struct class_device *cdev, char *buf)
+{
+       struct Scsi_Host *host = class_to_shost(cdev);
+       struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
+       return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
+}
+
+static ssize_t
+lpfc_board_online_show(struct class_device *cdev, char *buf)
+{
+       struct Scsi_Host *host = class_to_shost(cdev);
+       struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
+
+       if (!phba) return 0;
+
+       if (phba->fc_flag & FC_OFFLINE_MODE)
+               return snprintf(buf, PAGE_SIZE, "0\n");
+       else
+               return snprintf(buf, PAGE_SIZE, "1\n");
+}
+
+static ssize_t
+lpfc_board_online_store(struct class_device *cdev, const char *buf,
+                                                               size_t count)
+{
+       struct Scsi_Host *host = class_to_shost(cdev);
+       struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
+       struct completion online_compl;
+       int val=0, status=0;
+
+       if (sscanf(buf, "%d", &val) != 1)
+               return 0;
+
+       init_completion(&online_compl);
+
+       if (val)
+               lpfc_workq_post_event(phba, &status, &online_compl,
+                                                       LPFC_EVT_ONLINE);
+       else
+               lpfc_workq_post_event(phba, &status, &online_compl,
+                                                       LPFC_EVT_OFFLINE);
+       wait_for_completion(&online_compl);
+       if (!status)
+               return strlen(buf);
+       else
+               return 0;
+}
+
+
+#define lpfc_param_show(attr)  \
+static ssize_t \
+lpfc_##attr##_show(struct class_device *cdev, char *buf) \
+{ \
+       struct Scsi_Host *host = class_to_shost(cdev);\
+       struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];\
+       int val = 0;\
+       if (phba){\
+               val = phba->cfg_##attr;\
+               return snprintf(buf, PAGE_SIZE, "%d\n",\
+                               phba->cfg_##attr);\
+       }\
+       return 0;\
+}
+
+#define lpfc_param_store(attr, minval, maxval) \
+static ssize_t \
+lpfc_##attr##_store(struct class_device *cdev, const char *buf, size_t count) \
+{ \
+       struct Scsi_Host *host = class_to_shost(cdev);\
+       struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];\
+       int val = 0;\
+       if (!isdigit(buf[0]))\
+               return -EINVAL;\
+       if (sscanf(buf, "0x%x", &val) != 1)\
+               if (sscanf(buf, "%d", &val) != 1)\
+                       return -EINVAL;\
+       if (phba){\
+               if (val >= minval && val <= maxval) {\
+                       phba->cfg_##attr = val;\
+                       return strlen(buf);\
+               }\
+       }\
+       return 0;\
+}
+
+#define LPFC_ATTR_R_NOINIT(name, desc) \
+extern int lpfc_##name;\
+module_param(lpfc_##name, int, 0);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_param_show(name)\
+static CLASS_DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
+
+#define LPFC_ATTR_R(name, defval, minval, maxval, desc) \
+static int lpfc_##name = defval;\
+module_param(lpfc_##name, int, 0);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_param_show(name)\
+static CLASS_DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
+
+#define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \
+static int lpfc_##name = defval;\
+module_param(lpfc_##name, int, 0);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_param_show(name)\
+lpfc_param_store(name, minval, maxval)\
+static CLASS_DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
+                        lpfc_##name##_show, lpfc_##name##_store)
+
+static CLASS_DEVICE_ATTR(info, S_IRUGO, lpfc_info_show, NULL);
+static CLASS_DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL);
+static CLASS_DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL);
+static CLASS_DEVICE_ATTR(modelname, S_IRUGO, lpfc_modelname_show, NULL);
+static CLASS_DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL);
+static CLASS_DEVICE_ATTR(portnum, S_IRUGO, lpfc_portnum_show, NULL);
+static CLASS_DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL);
+static CLASS_DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL);
+static CLASS_DEVICE_ATTR(state, S_IRUGO, lpfc_state_show, NULL);
+static CLASS_DEVICE_ATTR(option_rom_version, S_IRUGO,
+                                       lpfc_option_rom_version_show, NULL);
+static CLASS_DEVICE_ATTR(num_discovered_ports, S_IRUGO,
+                                       lpfc_num_discovered_ports_show, NULL);
+static CLASS_DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL);
+static CLASS_DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show,
+                        NULL);
+static CLASS_DEVICE_ATTR(management_version, S_IRUGO, management_version_show,
+                        NULL);
+static CLASS_DEVICE_ATTR(issue_lip, S_IWUSR, NULL, lpfc_issue_lip);
+static CLASS_DEVICE_ATTR(board_online, S_IRUGO | S_IWUSR,
+                        lpfc_board_online_show, lpfc_board_online_store);
+
+
+/*
+# lpfc_log_verbose: Only turn this flag on if you are willing to risk being
+# deluged with LOTS of information.
+# You can set a bit mask to record specific types of verbose messages:
+#
+# LOG_ELS                       0x1        ELS events
+# LOG_DISCOVERY                 0x2        Link discovery events
+# LOG_MBOX                      0x4        Mailbox events
+# LOG_INIT                      0x8        Initialization events
+# LOG_LINK_EVENT                0x10       Link events
+# LOG_IP                        0x20       IP traffic history
+# LOG_FCP                       0x40       FCP traffic history
+# LOG_NODE                      0x80       Node table events
+# LOG_MISC                      0x400      Miscellaneous events
+# LOG_SLI                       0x800      SLI events
+# LOG_CHK_COND                  0x1000     FCP Check condition flag
+# LOG_LIBDFC                    0x2000     LIBDFC events
+# LOG_ALL_MSG                   0xffff     LOG all messages
+*/
+LPFC_ATTR_RW(log_verbose, 0x0, 0x0, 0xffff, "Verbose logging bit-mask");
+
+/*
+# lun_queue_depth:  This parameter is used to limit the number of outstanding
+# commands per FCP LUN. Value range is [1,128]. Default value is 30.
+*/
+LPFC_ATTR_R(lun_queue_depth, 30, 1, 128,
+           "Max number of FCP commands we can queue to a specific LUN");
+
+/*
+# Some disk devices have a "select ID" or "select Target" capability.
+# From a protocol standpoint "select ID" usually means select the
+# Fibre channel "ALPA".  In the FC-AL Profile there is an "informative
+# annex" which contains a table that maps a "select ID" (a number
+# between 0 and 7F) to an ALPA.  By default, for compatibility with
+# older drivers, the lpfc driver scans this table from low ALPA to high
+# ALPA.
+#
+# Turning on the scan-down variable (on  = 1, off = 0) will
+# cause the lpfc driver to use an inverted table, effectively
+# scanning ALPAs from high to low. Value range is [0,1]. Default value is 1.
+#
+# (Note: This "select ID" functionality is a LOOP ONLY characteristic
+# and will not work across a fabric. Also this parameter will take
+# effect only in the case when ALPA map is not available.)
+*/
+LPFC_ATTR_R(scan_down, 1, 0, 1,
+            "Start scanning for devices from highest ALPA to lowest");
+
+/*
+# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
+# until the timer expires. Value range is [0,255]. Default value is 20.
+# NOTE: this MUST be less then the SCSI Layer command timeout - 1.
+*/
+LPFC_ATTR_RW(nodev_tmo, 30, 0, 255,
+            "Seconds driver will hold I/O waiting for a device to come back");
+
+/*
+# lpfc_topology:  link topology for init link
+#            0x0  = attempt loop mode then point-to-point
+#            0x02 = attempt point-to-point mode only
+#            0x04 = attempt loop mode only
+#            0x06 = attempt point-to-point mode then loop
+# Set point-to-point mode if you want to run as an N_Port.
+# Set loop mode if you want to run as an NL_Port. Value range is [0,0x6].
+# Default value is 0.
+*/
+LPFC_ATTR_R(topology, 0, 0, 6, "Select Fibre Channel topology");
+
+/*
+# lpfc_link_speed: Link speed selection for initializing the Fibre Channel
+# connection.
+#       0  = auto select (default)
+#       1  = 1 Gigabaud
+#       2  = 2 Gigabaud
+#       4  = 4 Gigabaud
+# Value range is [0,4]. Default value is 0.
+*/
+LPFC_ATTR_R(link_speed, 0, 0, 4, "Select link speed");
+
+/*
+# lpfc_fcp_class:  Determines FC class to use for the FCP protocol.
+# Value range is [2,3]. Default value is 3.
+*/
+LPFC_ATTR_R(fcp_class, 3, 2, 3,
+            "Select Fibre Channel class of service for FCP sequences");
+
+/*
+# lpfc_use_adisc: Use ADISC for FCP rediscovery instead of PLOGI. Value range
+# is [0,1]. Default value is 0.
+*/
+LPFC_ATTR_RW(use_adisc, 0, 0, 1,
+            "Use ADISC on rediscovery to authenticate FCP devices");
+
+/*
+# lpfc_ack0: Use ACK0, instead of ACK1 for class 2 acknowledgement. Value
+# range is [0,1]. Default value is 0.
+*/
+LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
+
+/*
+# lpfc_cr_delay & lpfc_cr_count: Default values for I/O colaesing
+# cr_delay (msec) or cr_count outstanding commands. cr_delay can take
+# value [0,63]. cr_count can take value [0,255]. Default value of cr_delay
+# is 0. Default value of cr_count is 1. The cr_count feature is disabled if
+# cr_delay is set to 0.
+*/
+static int lpfc_cr_delay = 0;
+module_param(lpfc_cr_delay, int , 0);
+MODULE_PARM_DESC(lpfc_cr_delay, "A count of milliseconds after which an "
+               "interrupt response is generated");
+
+static int lpfc_cr_count = 1;
+module_param(lpfc_cr_count, int, 0);
+MODULE_PARM_DESC(lpfc_cr_count, "A count of I/O completions after which an "
+               "interrupt response is generated");
+
+/*
+# lpfc_fdmi_on: controls FDMI support.
+#       0 = no FDMI support
+#       1 = support FDMI without attribute of hostname
+#       2 = support FDMI with attribute of hostname
+# Value range [0,2]. Default value is 0.
+*/
+LPFC_ATTR_RW(fdmi_on, 0, 0, 2, "Enable FDMI support");
+
+/*
+# Specifies the maximum number of ELS cmds we can have outstanding (for
+# discovery). Value range is [1,64]. Default value = 32.
+*/
+static int lpfc_discovery_threads = 32;
+module_param(lpfc_discovery_threads, int, 0);
+MODULE_PARM_DESC(lpfc_discovery_threads, "Maximum number of ELS commands "
+                "during discovery");
+
+/*
+# lpfc_max_luns: maximum number of LUNs per target driver will support
+# Value range is [1,32768]. Default value is 256.
+# NOTE: The SCSI layer will scan each target for this many luns
+*/
+LPFC_ATTR_R(max_luns, 256, 1, 32768,
+            "Maximum number of LUNs per target driver will support");
+
+struct class_device_attribute *lpfc_host_attrs[] = {
+       &class_device_attr_info,
+       &class_device_attr_serialnum,
+       &class_device_attr_modeldesc,
+       &class_device_attr_modelname,
+       &class_device_attr_programtype,
+       &class_device_attr_portnum,
+       &class_device_attr_fwrev,
+       &class_device_attr_hdw,
+       &class_device_attr_option_rom_version,
+       &class_device_attr_state,
+       &class_device_attr_num_discovered_ports,
+       &class_device_attr_lpfc_drvr_version,
+       &class_device_attr_lpfc_log_verbose,
+       &class_device_attr_lpfc_lun_queue_depth,
+       &class_device_attr_lpfc_nodev_tmo,
+       &class_device_attr_lpfc_fcp_class,
+       &class_device_attr_lpfc_use_adisc,
+       &class_device_attr_lpfc_ack0,
+       &class_device_attr_lpfc_topology,
+       &class_device_attr_lpfc_scan_down,
+       &class_device_attr_lpfc_link_speed,
+       &class_device_attr_lpfc_fdmi_on,
+       &class_device_attr_lpfc_max_luns,
+       &class_device_attr_nport_evt_cnt,
+       &class_device_attr_management_version,
+       &class_device_attr_issue_lip,
+       &class_device_attr_board_online,
+       NULL,
+};
+
+static ssize_t
+sysfs_ctlreg_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
+{
+       size_t buf_off;
+       struct Scsi_Host *host = class_to_shost(container_of(kobj,
+                                            struct class_device, kobj));
+       struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
+
+       if ((off + count) > FF_REG_AREA_SIZE)
+               return -ERANGE;
+
+       if (count == 0) return 0;
+
+       if (off % 4 || count % 4 || (unsigned long)buf % 4)
+               return -EINVAL;
+
+       spin_lock_irq(phba->host->host_lock);
+
+       if (!(phba->fc_flag & FC_OFFLINE_MODE)) {
+               spin_unlock_irq(phba->host->host_lock);
+               return -EPERM;
+       }
+
+       for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t))
+               writel(*((uint32_t *)(buf + buf_off)),
+                      phba->ctrl_regs_memmap_p + off + buf_off);
+
+       spin_unlock_irq(phba->host->host_lock);
+
+       return count;
+}
+
+static ssize_t
+sysfs_ctlreg_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
+{
+       size_t buf_off;
+       uint32_t * tmp_ptr;
+       struct Scsi_Host *host = class_to_shost(container_of(kobj,
+                                            struct class_device, kobj));
+       struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
+
+       if (off > FF_REG_AREA_SIZE)
+               return -ERANGE;
+
+       if ((off + count) > FF_REG_AREA_SIZE)
+               count = FF_REG_AREA_SIZE - off;
+
+       if (count == 0) return 0;
+
+       if (off % 4 || count % 4 || (unsigned long)buf % 4)
+               return -EINVAL;
+
+       spin_lock_irq(phba->host->host_lock);
+
+       for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) {
+               tmp_ptr = (uint32_t *)(buf + buf_off);
+               *tmp_ptr = readl(phba->ctrl_regs_memmap_p + off + buf_off);
+       }
+
+       spin_unlock_irq(phba->host->host_lock);
+
+       return count;
+}
+
+static struct bin_attribute sysfs_ctlreg_attr = {
+       .attr = {
+               .name = "ctlreg",
+               .mode = S_IRUSR | S_IWUSR,
+               .owner = THIS_MODULE,
+       },
+       .size = 256,
+       .read = sysfs_ctlreg_read,
+       .write = sysfs_ctlreg_write,
+};
+
+
+static void
+sysfs_mbox_idle (struct lpfc_hba * phba)
+{
+       phba->sysfs_mbox.state = SMBOX_IDLE;
+       phba->sysfs_mbox.offset = 0;
+
+       if (phba->sysfs_mbox.mbox) {
+               mempool_free(phba->sysfs_mbox.mbox,
+                            phba->mbox_mem_pool);
+               phba->sysfs_mbox.mbox = NULL;
+       }
+}
+
+static ssize_t
+sysfs_mbox_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
+{
+       struct Scsi_Host * host =
+               class_to_shost(container_of(kobj, struct class_device, kobj));
+       struct lpfc_hba * phba = (struct lpfc_hba*)host->hostdata[0];
+       struct lpfcMboxq * mbox = NULL;
+
+       if ((count + off) > MAILBOX_CMD_SIZE)
+               return -ERANGE;
+
+       if (off % 4 ||  count % 4 || (unsigned long)buf % 4)
+               return -EINVAL;
+
+       if (count == 0)
+               return 0;
+
+       if (off == 0) {
+               mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+               if (!mbox)
+                       return -ENOMEM;
+
+       }
+
+       spin_lock_irq(host->host_lock);
+
+       if (off == 0) {
+               if (phba->sysfs_mbox.mbox)
+                       mempool_free(mbox, phba->mbox_mem_pool);
+               else
+                       phba->sysfs_mbox.mbox = mbox;
+               phba->sysfs_mbox.state = SMBOX_WRITING;
+       } else {
+               if (phba->sysfs_mbox.state  != SMBOX_WRITING ||
+                   phba->sysfs_mbox.offset != off           ||
+                   phba->sysfs_mbox.mbox   == NULL ) {
+                       sysfs_mbox_idle(phba);
+                       spin_unlock_irq(host->host_lock);
+                       return -EINVAL;
+               }
+       }
+
+       memcpy((uint8_t *) & phba->sysfs_mbox.mbox->mb + off,
+              buf, count);
+
+       phba->sysfs_mbox.offset = off + count;
+
+       spin_unlock_irq(host->host_lock);
+
+       return count;
+}
+
+static ssize_t
+sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
+{
+       struct Scsi_Host *host =
+               class_to_shost(container_of(kobj, struct class_device,
+                                           kobj));
+       struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
+       int rc;
+
+       if (off > sizeof(MAILBOX_t))
+               return -ERANGE;
+
+       if ((count + off) > sizeof(MAILBOX_t))
+               count = sizeof(MAILBOX_t) - off;
+
+       if (off % 4 ||  count % 4 || (unsigned long)buf % 4)
+               return -EINVAL;
+
+       if (off && count == 0)
+               return 0;
+
+       spin_lock_irq(phba->host->host_lock);
+
+       if (off == 0 &&
+           phba->sysfs_mbox.state  == SMBOX_WRITING &&
+           phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) {
+
+               switch (phba->sysfs_mbox.mbox->mb.mbxCommand) {
+                       /* Offline only */
+               case MBX_WRITE_NV:
+               case MBX_INIT_LINK:
+               case MBX_DOWN_LINK:
+               case MBX_CONFIG_LINK:
+               case MBX_CONFIG_RING:
+               case MBX_RESET_RING:
+               case MBX_UNREG_LOGIN:
+               case MBX_CLEAR_LA:
+               case MBX_DUMP_CONTEXT:
+               case MBX_RUN_DIAGS:
+               case MBX_RESTART:
+               case MBX_FLASH_WR_ULA:
+               case MBX_SET_MASK:
+               case MBX_SET_SLIM:
+               case MBX_SET_DEBUG:
+                       if (!(phba->fc_flag & FC_OFFLINE_MODE)) {
+                               printk(KERN_WARNING "mbox_read:Command 0x%x "
+                                      "is illegal in on-line state\n",
+                                      phba->sysfs_mbox.mbox->mb.mbxCommand);
+                               sysfs_mbox_idle(phba);
+                               spin_unlock_irq(phba->host->host_lock);
+                               return -EPERM;
+                       }
+               case MBX_LOAD_SM:
+               case MBX_READ_NV:
+               case MBX_READ_CONFIG:
+               case MBX_READ_RCONFIG:
+               case MBX_READ_STATUS:
+               case MBX_READ_XRI:
+               case MBX_READ_REV:
+               case MBX_READ_LNK_STAT:
+               case MBX_DUMP_MEMORY:
+               case MBX_DOWN_LOAD:
+               case MBX_UPDATE_CFG:
+               case MBX_LOAD_AREA:
+               case MBX_LOAD_EXP_ROM:
+                       break;
+               case MBX_READ_SPARM64:
+               case MBX_READ_LA:
+               case MBX_READ_LA64:
+               case MBX_REG_LOGIN:
+               case MBX_REG_LOGIN64:
+               case MBX_CONFIG_PORT:
+               case MBX_RUN_BIU_DIAG:
+                       printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n",
+                              phba->sysfs_mbox.mbox->mb.mbxCommand);
+                       sysfs_mbox_idle(phba);
+                       spin_unlock_irq(phba->host->host_lock);
+                       return -EPERM;
+               default:
+                       printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n",
+                              phba->sysfs_mbox.mbox->mb.mbxCommand);
+                       sysfs_mbox_idle(phba);
+                       spin_unlock_irq(phba->host->host_lock);
+                       return -EPERM;
+               }
+
+               if ((phba->fc_flag & FC_OFFLINE_MODE) ||
+                   (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE))){
+
+                       spin_unlock_irq(phba->host->host_lock);
+                       rc = lpfc_sli_issue_mbox (phba,
+                                                 phba->sysfs_mbox.mbox,
+                                                 MBX_POLL);
+                       spin_lock_irq(phba->host->host_lock);
+
+               } else {
+                       spin_unlock_irq(phba->host->host_lock);
+                       rc = lpfc_sli_issue_mbox_wait (phba,
+                                                      phba->sysfs_mbox.mbox,
+                                                      phba->fc_ratov * 2);
+                       spin_lock_irq(phba->host->host_lock);
+               }
+
+               if (rc != MBX_SUCCESS) {
+                       sysfs_mbox_idle(phba);
+                       spin_unlock_irq(host->host_lock);
+                       return -ENODEV;
+               }
+               phba->sysfs_mbox.state = SMBOX_READING;
+       }
+       else if (phba->sysfs_mbox.offset != off ||
+                phba->sysfs_mbox.state  != SMBOX_READING) {
+               printk(KERN_WARNING  "mbox_read: Bad State\n");
+               sysfs_mbox_idle(phba);
+               spin_unlock_irq(host->host_lock);
+               return -EINVAL;
+       }
+
+       memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count);
+
+       phba->sysfs_mbox.offset = off + count;
+
+       if (phba->sysfs_mbox.offset == sizeof(MAILBOX_t))
+               sysfs_mbox_idle(phba);
+
+       spin_unlock_irq(phba->host->host_lock);
+
+       return count;
+}
+
+static struct bin_attribute sysfs_mbox_attr = {
+       .attr = {
+               .name = "mbox",
+               .mode = S_IRUSR | S_IWUSR,
+               .owner = THIS_MODULE,
+       },
+       .size = sizeof(MAILBOX_t),
+       .read = sysfs_mbox_read,
+       .write = sysfs_mbox_write,
+};
+
+int
+lpfc_alloc_sysfs_attr(struct lpfc_hba *phba)
+{
+       struct Scsi_Host *host = phba->host;
+       int error;
+
+       error = sysfs_create_bin_file(&host->shost_classdev.kobj,
+                                                       &sysfs_ctlreg_attr);
+       if (error)
+               goto out;
+
+       error = sysfs_create_bin_file(&host->shost_classdev.kobj,
+                                                       &sysfs_mbox_attr);
+       if (error)
+               goto out_remove_ctlreg_attr;
+
+       return 0;
+out_remove_ctlreg_attr:
+       sysfs_remove_bin_file(&host->shost_classdev.kobj, &sysfs_ctlreg_attr);
+out:
+       return error;
+}
+
+void
+lpfc_free_sysfs_attr(struct lpfc_hba *phba)
+{
+       struct Scsi_Host *host = phba->host;
+
+       sysfs_remove_bin_file(&host->shost_classdev.kobj, &sysfs_mbox_attr);
+       sysfs_remove_bin_file(&host->shost_classdev.kobj, &sysfs_ctlreg_attr);
+}
+
+
+/*
+ * Dynamic FC Host Attributes Support
+ */
+
+static void
+lpfc_get_host_port_id(struct Scsi_Host *shost)
+{
+       struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata[0];
+       /* note: fc_myDID already in cpu endianness */
+       fc_host_port_id(shost) = phba->fc_myDID;
+}
+
+static void
+lpfc_get_host_port_type(struct Scsi_Host *shost)
+{
+       struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata[0];
+
+       spin_lock_irq(shost->host_lock);
+
+       if (phba->hba_state == LPFC_HBA_READY) {
+               if (phba->fc_topology == TOPOLOGY_LOOP) {
+                       if (phba->fc_flag & FC_PUBLIC_LOOP)
+                               fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
+                       else
+                               fc_host_port_type(shost) = FC_PORTTYPE_LPORT;
+               } else {
+                       if (phba->fc_flag & FC_FABRIC)
+                               fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
+                       else
+                               fc_host_port_type(shost) = FC_PORTTYPE_PTP;
+               }
+       } else
+               fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
+
+       spin_unlock_irq(shost->host_lock);
+}
+
+static void
+lpfc_get_host_port_state(struct Scsi_Host *shost)
+{
+       struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata[0];
+
+       spin_lock_irq(shost->host_lock);
+
+       if (phba->fc_flag & FC_OFFLINE_MODE)
+               fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
+       else {
+               switch (phba->hba_state) {
+               case LPFC_INIT_START:
+               case LPFC_INIT_MBX_CMDS:
+               case LPFC_LINK_DOWN:
+                       fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+                       break;
+               case LPFC_LINK_UP:
+               case LPFC_LOCAL_CFG_LINK:
+               case LPFC_FLOGI:
+               case LPFC_FABRIC_CFG_LINK:
+               case LPFC_NS_REG:
+               case LPFC_NS_QRY:
+               case LPFC_BUILD_DISC_LIST:
+               case LPFC_DISC_AUTH:
+               case LPFC_CLEAR_LA:
+               case LPFC_HBA_READY:
+                       /* Links up, beyond this port_type reports state */
+                       fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+                       break;
+               case LPFC_HBA_ERROR:
+                       fc_host_port_state(shost) = FC_PORTSTATE_ERROR;
+                       break;
+               default:
+                       fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
+                       break;
+               }
+       }
+
+       spin_unlock_irq(shost->host_lock);
+}
+
+static void
+lpfc_get_host_speed(struct Scsi_Host *shost)
+{
+       struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata[0];
+
+       spin_lock_irq(shost->host_lock);
+
+       if (phba->hba_state == LPFC_HBA_READY) {
+               switch(phba->fc_linkspeed) {
+                       case LA_1GHZ_LINK:
+                               fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
+                       break;
+                       case LA_2GHZ_LINK:
+                               fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
+                       break;
+                       case LA_4GHZ_LINK:
+                               fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
+                       break;
+                       default:
+                               fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
+                       break;
+               }
+       }
+
+       spin_unlock_irq(shost->host_lock);
+}
+
+static void
+lpfc_get_host_fabric_name (struct Scsi_Host *shost)
+{
+       struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata[0];
+       u64 nodename;
+
+       spin_lock_irq(shost->host_lock);
+
+       if ((phba->fc_flag & FC_FABRIC) ||
+           ((phba->fc_topology == TOPOLOGY_LOOP) &&
+            (phba->fc_flag & FC_PUBLIC_LOOP)))
+               memcpy(&nodename, &phba->fc_fabparam.nodeName, sizeof(u64));
+       else
+               /* fabric is local port if there is no F/FL_Port */
+               memcpy(&nodename, &phba->fc_nodename, sizeof(u64));
+
+       spin_unlock_irq(shost->host_lock);
+
+       fc_host_fabric_name(shost) = be64_to_cpu(nodename);
+}
+
+
+static struct fc_host_statistics *
+lpfc_get_stats(struct Scsi_Host *shost)
+{
+       struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0];
+       struct lpfc_sli *psli = &phba->sli;
+       struct fc_host_statistics *hs =
+                       (struct fc_host_statistics *)phba->link_stats;
+       LPFC_MBOXQ_t *pmboxq;
+       MAILBOX_t *pmb;
+       int rc=0;
+
+       pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!pmboxq)
+               return NULL;
+       memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
+
+       pmb = &pmboxq->mb;
+       pmb->mbxCommand = MBX_READ_STATUS;
+       pmb->mbxOwner = OWN_HOST;
+       pmboxq->context1 = NULL;
+
+       if ((phba->fc_flag & FC_OFFLINE_MODE) ||
+           (!(psli->sli_flag & LPFC_SLI2_ACTIVE))){
+               rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+       } else
+               rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+
+       if (rc != MBX_SUCCESS) {
+               if (pmboxq) {
+                       if (rc == MBX_TIMEOUT)
+                               pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+                       else
+                               mempool_free( pmboxq, phba->mbox_mem_pool);
+               }
+               return NULL;
+       }
+
+       hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt;
+       hs->tx_words = (pmb->un.varRdStatus.xmitByteCnt * 256);
+       hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt;
+       hs->rx_words = (pmb->un.varRdStatus.rcvByteCnt * 256);
+
+       memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
+       pmb->mbxCommand = MBX_READ_LNK_STAT;
+       pmb->mbxOwner = OWN_HOST;
+       pmboxq->context1 = NULL;
+
+       if ((phba->fc_flag & FC_OFFLINE_MODE) ||
+           (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) {
+               rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+       } else
+               rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+
+       if (rc != MBX_SUCCESS) {
+               if (pmboxq) {
+                       if (rc == MBX_TIMEOUT)
+                               pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+                       else
+                               mempool_free( pmboxq, phba->mbox_mem_pool);
+               }
+               return NULL;
+       }
+
+       hs->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
+       hs->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt;
+       hs->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt;
+       hs->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt;
+       hs->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
+       hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
+       hs->error_frames = pmb->un.varRdLnk.crcCnt;
+
+       if (phba->fc_topology == TOPOLOGY_LOOP) {
+               hs->lip_count = (phba->fc_eventTag >> 1);
+               hs->nos_count = -1;
+       } else {
+               hs->lip_count = -1;
+               hs->nos_count = (phba->fc_eventTag >> 1);
+       }
+
+       hs->dumped_frames = -1;
+
+/* FIX ME */
+       /*hs->SecondsSinceLastReset = (jiffies - lpfc_loadtime) / HZ;*/
+
+       return hs;
+}
+
+
+/*
+ * The LPFC driver treats linkdown handling as target loss events so there
+ * are no sysfs handlers for link_down_tmo.
+ */
+static void
+lpfc_get_starget_port_id(struct scsi_target *starget)
+{
+       struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+       struct lpfc_hba *phba = (struct lpfc_hba *) shost->hostdata[0];
+       uint32_t did = -1;
+       struct lpfc_nodelist *ndlp = NULL;
+
+       spin_lock_irq(shost->host_lock);
+       /* Search the mapped list for this target ID */
+       list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
+               if (starget->id == ndlp->nlp_sid) {
+                       did = ndlp->nlp_DID;
+                       break;
+               }
+       }
+       spin_unlock_irq(shost->host_lock);
+
+       fc_starget_port_id(starget) = did;
+}
+
+static void
+lpfc_get_starget_node_name(struct scsi_target *starget)
+{
+       struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+       struct lpfc_hba *phba = (struct lpfc_hba *) shost->hostdata[0];
+       uint64_t node_name = 0;
+       struct lpfc_nodelist *ndlp = NULL;
+
+       spin_lock_irq(shost->host_lock);
+       /* Search the mapped list for this target ID */
+       list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
+               if (starget->id == ndlp->nlp_sid) {
+                       memcpy(&node_name, &ndlp->nlp_nodename,
+                                               sizeof(struct lpfc_name));
+                       break;
+               }
+       }
+       spin_unlock_irq(shost->host_lock);
+
+       fc_starget_node_name(starget) = be64_to_cpu(node_name);
+}
+
+static void
+lpfc_get_starget_port_name(struct scsi_target *starget)
+{
+       struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+       struct lpfc_hba *phba = (struct lpfc_hba *) shost->hostdata[0];
+       uint64_t port_name = 0;
+       struct lpfc_nodelist *ndlp = NULL;
+
+       spin_lock_irq(shost->host_lock);
+       /* Search the mapped list for this target ID */
+       list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
+               if (starget->id == ndlp->nlp_sid) {
+                       memcpy(&port_name, &ndlp->nlp_portname,
+                                               sizeof(struct lpfc_name));
+                       break;
+               }
+       }
+       spin_unlock_irq(shost->host_lock);
+
+       fc_starget_port_name(starget) = be64_to_cpu(port_name);
+}
+
+static void
+lpfc_get_rport_loss_tmo(struct fc_rport *rport)
+{
+       /*
+        * Return the driver's global value for device loss timeout plus
+        * five seconds to allow the driver's nodev timer to run.
+        */
+       rport->dev_loss_tmo = lpfc_nodev_tmo + 5;
+}
+
+static void
+lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
+{
+       /*
+        * The driver doesn't have a per-target timeout setting.  Set
+        * this value globally. lpfc_nodev_tmo should be greater then 0.
+        */
+       if (timeout)
+               lpfc_nodev_tmo = timeout;
+       else
+               lpfc_nodev_tmo = 1;
+       rport->dev_loss_tmo = lpfc_nodev_tmo + 5;
+}
+
+
+#define lpfc_rport_show_function(field, format_string, sz, cast)       \
+static ssize_t                                                         \
+lpfc_show_rport_##field (struct class_device *cdev, char *buf)         \
+{                                                                      \
+       struct fc_rport *rport = transport_class_to_rport(cdev);        \
+       struct lpfc_rport_data *rdata = rport->hostdata;                \
+       return snprintf(buf, sz, format_string,                         \
+               (rdata->target) ? cast rdata->target->field : 0);       \
+}
+
+#define lpfc_rport_rd_attr(field, format_string, sz)                   \
+       lpfc_rport_show_function(field, format_string, sz, )            \
+static FC_RPORT_ATTR(field, S_IRUGO, lpfc_show_rport_##field, NULL)
+
+
+struct fc_function_template lpfc_transport_functions = {
+       /* fixed attributes the driver supports */
+       .show_host_node_name = 1,
+       .show_host_port_name = 1,
+       .show_host_supported_classes = 1,
+       .show_host_supported_fc4s = 1,
+       .show_host_symbolic_name = 1,
+       .show_host_supported_speeds = 1,
+       .show_host_maxframe_size = 1,
+
+       /* dynamic attributes the driver supports */
+       .get_host_port_id = lpfc_get_host_port_id,
+       .show_host_port_id = 1,
+
+       .get_host_port_type = lpfc_get_host_port_type,
+       .show_host_port_type = 1,
+
+       .get_host_port_state = lpfc_get_host_port_state,
+       .show_host_port_state = 1,
+
+       /* active_fc4s is shown but doesn't change (thus no get function) */
+       .show_host_active_fc4s = 1,
+
+       .get_host_speed = lpfc_get_host_speed,
+       .show_host_speed = 1,
+
+       .get_host_fabric_name = lpfc_get_host_fabric_name,
+       .show_host_fabric_name = 1,
+
+       /*
+        * The LPFC driver treats linkdown handling as target loss events
+        * so there are no sysfs handlers for link_down_tmo.
+        */
+
+       .get_fc_host_stats = lpfc_get_stats,
+
+       /* the LPFC driver doesn't support resetting stats yet */
+
+       .dd_fcrport_size = sizeof(struct lpfc_rport_data),
+       .show_rport_maxframe_size = 1,
+       .show_rport_supported_classes = 1,
+
+       .get_rport_dev_loss_tmo = lpfc_get_rport_loss_tmo,
+       .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
+       .show_rport_dev_loss_tmo = 1,
+
+       .get_starget_port_id  = lpfc_get_starget_port_id,
+       .show_starget_port_id = 1,
+
+       .get_starget_node_name = lpfc_get_starget_node_name,
+       .show_starget_node_name = 1,
+
+       .get_starget_port_name = lpfc_get_starget_port_name,
+       .show_starget_port_name = 1,
+};
+
+void
+lpfc_get_cfgparam(struct lpfc_hba *phba)
+{
+       phba->cfg_log_verbose = lpfc_log_verbose;
+       phba->cfg_cr_delay = lpfc_cr_delay;
+       phba->cfg_cr_count = lpfc_cr_count;
+       phba->cfg_lun_queue_depth = lpfc_lun_queue_depth;
+       phba->cfg_fcp_class = lpfc_fcp_class;
+       phba->cfg_use_adisc = lpfc_use_adisc;
+       phba->cfg_ack0 = lpfc_ack0;
+       phba->cfg_topology = lpfc_topology;
+       phba->cfg_scan_down = lpfc_scan_down;
+       phba->cfg_nodev_tmo = lpfc_nodev_tmo;
+       phba->cfg_link_speed = lpfc_link_speed;
+       phba->cfg_fdmi_on = lpfc_fdmi_on;
+       phba->cfg_discovery_threads = lpfc_discovery_threads;
+       phba->cfg_max_luns = lpfc_max_luns;
+
+       /*
+        * The total number of segments is the configuration value plus 2
+        * since the IOCB need a command and response bde.
+        */
+       phba->cfg_sg_seg_cnt = LPFC_SG_SEG_CNT + 2;
+
+       /*
+        * Since the sg_tablesize is module parameter, the sg_dma_buf_size
+        * used to create the sg_dma_buf_pool must be dynamically calculated
+        */
+       phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+                       sizeof(struct fcp_rsp) +
+                       (phba->cfg_sg_seg_cnt * sizeof(struct ulp_bde64));
+
+       switch (phba->pcidev->device) {
+       case PCI_DEVICE_ID_LP101:
+       case PCI_DEVICE_ID_BSMB:
+       case PCI_DEVICE_ID_ZSMB:
+               phba->cfg_hba_queue_depth = LPFC_LP101_HBA_Q_DEPTH;
+               break;
+       case PCI_DEVICE_ID_RFLY:
+       case PCI_DEVICE_ID_PFLY:
+       case PCI_DEVICE_ID_BMID:
+       case PCI_DEVICE_ID_ZMID:
+       case PCI_DEVICE_ID_TFLY:
+               phba->cfg_hba_queue_depth = LPFC_LC_HBA_Q_DEPTH;
+               break;
+       default:
+               phba->cfg_hba_queue_depth = LPFC_DFT_HBA_Q_DEPTH;
+       }
+       return;
+}
diff --git a/drivers/scsi/lpfc/lpfc_compat.h b/drivers/scsi/lpfc/lpfc_compat.h
new file mode 100644 (file)
index 0000000..646649f
--- /dev/null
@@ -0,0 +1,97 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Enterprise Fibre Channel Host Bus Adapters.                     *
+ * Refer to the README file included with this package for         *
+ * driver version and adapter support.                             *
+ * Copyright (C) 2004 Emulex Corporation.                          *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of the GNU General Public License     *
+ * as published by the Free Software Foundation; either version 2  *
+ * of the License, or (at your option) any later version.          *
+ *                                                                 *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of  *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the   *
+ * GNU General Public License for more details, a copy of which    *
+ * can be found in the file COPYING included with this package.    *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_compat.h 1.32 2005/01/25 17:51:45EST sf_support Exp  $
+ *
+ * This file provides macros to aid compilation in the Linux 2.4 kernel
+ * over various platform architectures.
+ */
+
+/*******************************************************************
+Note: HBA's SLI memory contains little-endian LW.
+Thus to access it from a little-endian host,
+memcpy_toio() and memcpy_fromio() can be used.
+However on a big-endian host, copy 4 bytes at a time,
+using writel() and readl().
+ *******************************************************************/
+
+#if __BIG_ENDIAN
+
+static inline void
+lpfc_memcpy_to_slim(void __iomem *dest, void *src, unsigned int bytes)
+{
+       uint32_t __iomem *dest32;
+       uint32_t *src32;
+       unsigned int four_bytes;
+
+
+       dest32  = (uint32_t __iomem *) dest;
+       src32  = (uint32_t *) src;
+
+       /* write input bytes, 4 bytes at a time */
+       for (four_bytes = bytes /4; four_bytes > 0; four_bytes--) {
+               writel( *src32, dest32);
+               readl(dest32); /* flush */
+               dest32++;
+               src32++;
+       }
+
+       return;
+}
+
+static inline void
+lpfc_memcpy_from_slim( void *dest, void __iomem *src, unsigned int bytes)
+{
+       uint32_t *dest32;
+       uint32_t __iomem *src32;
+       unsigned int four_bytes;
+
+
+       dest32  = (uint32_t *) dest;
+       src32  = (uint32_t __iomem *) src;
+
+       /* read input bytes, 4 bytes at a time */
+       for (four_bytes = bytes /4; four_bytes > 0; four_bytes--) {
+               *dest32 = readl( src32);
+               dest32++;
+               src32++;
+       }
+
+       return;
+}
+
+#else
+
+static inline void
+lpfc_memcpy_to_slim( void __iomem *dest, void *src, unsigned int bytes)
+{
+       /* actually returns 1 byte past dest */
+       memcpy_toio( dest, src, bytes);
+}
+
+static inline void
+lpfc_memcpy_from_slim( void *dest, void __iomem *src, unsigned int bytes)
+{
+       /* actually returns 1 byte past dest */
+       memcpy_fromio( dest, src, bytes);
+}
+
+#endif /* __BIG_ENDIAN */
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
new file mode 100644 (file)
index 0000000..c504477
--- /dev/null
@@ -0,0 +1,216 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Enterprise Fibre Channel Host Bus Adapters.                     *
+ * Refer to the README file included with this package for         *
+ * driver version and adapter support.                             *
+ * Copyright (C) 2004 Emulex Corporation.                          *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of the GNU General Public License     *
+ * as published by the Free Software Foundation; either version 2  *
+ * of the License, or (at your option) any later version.          *
+ *                                                                 *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of  *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the   *
+ * GNU General Public License for more details, a copy of which    *
+ * can be found in the file COPYING included with this package.    *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_crtn.h 1.166 2005/04/07 08:46:47EDT sf_support Exp  $
+ */
+
+void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
+void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
+                struct lpfc_dmabuf *mp);
+void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_set_slim(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
+int lpfc_reg_login(struct lpfc_hba *, uint32_t, uint8_t *, LPFC_MBOXQ_t *,
+                  uint32_t);
+void lpfc_unreg_login(struct lpfc_hba *, uint32_t, LPFC_MBOXQ_t *);
+void lpfc_unreg_did(struct lpfc_hba *, uint32_t, LPFC_MBOXQ_t *);
+void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
+
+
+int lpfc_linkdown(struct lpfc_hba *);
+void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
+
+void lpfc_mbx_cmpl_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_nlp_plogi(struct lpfc_hba *, struct lpfc_nodelist *);
+int lpfc_nlp_adisc(struct lpfc_hba *, struct lpfc_nodelist *);
+int lpfc_nlp_unmapped(struct lpfc_hba *, struct lpfc_nodelist *);
+int lpfc_nlp_list(struct lpfc_hba *, struct lpfc_nodelist *, int);
+void lpfc_set_disctmo(struct lpfc_hba *);
+int lpfc_can_disctmo(struct lpfc_hba *);
+int lpfc_unreg_rpi(struct lpfc_hba *, struct lpfc_nodelist *);
+int lpfc_check_sli_ndlp(struct lpfc_hba *, struct lpfc_sli_ring *,
+                   struct lpfc_iocbq *, struct lpfc_nodelist *);
+int lpfc_nlp_remove(struct lpfc_hba *, struct lpfc_nodelist *);
+void lpfc_nlp_init(struct lpfc_hba *, struct lpfc_nodelist *, uint32_t);
+struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_hba *, uint32_t);
+struct lpfc_nodelist *lpfc_setup_rscn_node(struct lpfc_hba *, uint32_t);
+void lpfc_disc_list_loopmap(struct lpfc_hba *);
+void lpfc_disc_start(struct lpfc_hba *);
+void lpfc_disc_flush_list(struct lpfc_hba *);
+void lpfc_disc_timeout(unsigned long);
+void lpfc_scan_timeout(unsigned long);
+
+struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi);
+struct lpfc_nodelist *lpfc_findnode_remove_rpi(struct lpfc_hba * phba,
+                                              uint16_t rpi);
+void lpfc_addnode_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+                     uint16_t rpi);
+
+int lpfc_workq_post_event(struct lpfc_hba *, void *, void *, uint32_t);
+int lpfc_do_work(void *);
+int lpfc_disc_state_machine(struct lpfc_hba *, struct lpfc_nodelist *, void *,
+                           uint32_t);
+
+uint32_t lpfc_cmpl_prli_reglogin_issue(struct lpfc_hba *,
+                                      struct lpfc_nodelist *, void *,
+                                      uint32_t);
+uint32_t lpfc_cmpl_plogi_prli_issue(struct lpfc_hba *, struct lpfc_nodelist *,
+                                   void *, uint32_t);
+
+int lpfc_check_sparm(struct lpfc_hba *, struct lpfc_nodelist *,
+                    struct serv_parm *, uint32_t);
+int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist * ndlp,
+                       int);
+int lpfc_els_abort_flogi(struct lpfc_hba *);
+int lpfc_initial_flogi(struct lpfc_hba *);
+int lpfc_issue_els_plogi(struct lpfc_hba *, struct lpfc_nodelist *, uint8_t);
+int lpfc_issue_els_prli(struct lpfc_hba *, struct lpfc_nodelist *, uint8_t);
+int lpfc_issue_els_adisc(struct lpfc_hba *, struct lpfc_nodelist *, uint8_t);
+int lpfc_issue_els_logo(struct lpfc_hba *, struct lpfc_nodelist *, uint8_t);
+int lpfc_issue_els_scr(struct lpfc_hba *, uint32_t, uint8_t);
+int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
+int lpfc_els_rsp_acc(struct lpfc_hba *, uint32_t, struct lpfc_iocbq *,
+                    struct lpfc_nodelist *, LPFC_MBOXQ_t *, uint8_t);
+int lpfc_els_rsp_reject(struct lpfc_hba *, uint32_t, struct lpfc_iocbq *,
+                       struct lpfc_nodelist *);
+int lpfc_els_rsp_adisc_acc(struct lpfc_hba *, struct lpfc_iocbq *,
+                          struct lpfc_nodelist *);
+int lpfc_els_rsp_prli_acc(struct lpfc_hba *, struct lpfc_iocbq *,
+                         struct lpfc_nodelist *);
+void lpfc_els_retry_delay(unsigned long);
+void lpfc_els_retry_delay_handler(struct lpfc_nodelist *);
+void lpfc_els_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
+                         struct lpfc_iocbq *);
+int lpfc_els_handle_rscn(struct lpfc_hba *);
+int lpfc_els_flush_rscn(struct lpfc_hba *);
+int lpfc_rscn_payload_check(struct lpfc_hba *, uint32_t);
+void lpfc_els_flush_cmd(struct lpfc_hba *);
+int lpfc_els_disc_adisc(struct lpfc_hba *);
+int lpfc_els_disc_plogi(struct lpfc_hba *);
+void lpfc_els_timeout(unsigned long);
+void lpfc_els_timeout_handler(struct lpfc_hba *);
+
+void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
+                        struct lpfc_iocbq *);
+int lpfc_ns_cmd(struct lpfc_hba *, struct lpfc_nodelist *, int);
+int lpfc_fdmi_cmd(struct lpfc_hba *, struct lpfc_nodelist *, int);
+void lpfc_fdmi_tmo(unsigned long);
+void lpfc_fdmi_tmo_handler(struct lpfc_hba *);
+
+int lpfc_config_port_prep(struct lpfc_hba *);
+int lpfc_config_port_post(struct lpfc_hba *);
+int lpfc_hba_down_prep(struct lpfc_hba *);
+void lpfc_hba_init(struct lpfc_hba *, uint32_t *);
+int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int, int);
+void lpfc_decode_firmware_rev(struct lpfc_hba *, char *, int);
+uint8_t *lpfc_get_lpfchba_info(struct lpfc_hba *, uint8_t *);
+int lpfc_fcp_abort(struct lpfc_hba *, int, int, int);
+int lpfc_online(struct lpfc_hba *);
+int lpfc_offline(struct lpfc_hba *);
+
+
+int lpfc_sli_setup(struct lpfc_hba *);
+int lpfc_sli_queue_setup(struct lpfc_hba *);
+void lpfc_slim_access(struct lpfc_hba *);
+
+void lpfc_handle_eratt(struct lpfc_hba *);
+void lpfc_handle_latt(struct lpfc_hba *);
+irqreturn_t lpfc_intr_handler(int, void *, struct pt_regs *);
+
+void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *);
+void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
+LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *);
+
+int lpfc_mem_alloc(struct lpfc_hba *);
+void lpfc_mem_free(struct lpfc_hba *);
+
+int lpfc_sli_hba_setup(struct lpfc_hba *);
+int lpfc_sli_hba_down(struct lpfc_hba *);
+int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
+int lpfc_sli_handle_mb_event(struct lpfc_hba *);
+int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
+                                   struct lpfc_sli_ring *, uint32_t);
+void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_sli_issue_iocb(struct lpfc_hba *, struct lpfc_sli_ring *,
+                       struct lpfc_iocbq *, uint32_t);
+void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
+int lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
+int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
+                            struct lpfc_dmabuf *);
+struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *,
+                                            struct lpfc_sli_ring *,
+                                            dma_addr_t);
+int lpfc_sli_issue_abort_iotag32(struct lpfc_hba *, struct lpfc_sli_ring *,
+                                struct lpfc_iocbq *);
+int lpfc_sli_sum_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, uint16_t,
+                         uint64_t, lpfc_ctx_cmd);
+int lpfc_sli_abort_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, uint16_t,
+                           uint64_t, uint32_t, lpfc_ctx_cmd);
+
+void lpfc_mbox_timeout(unsigned long);
+void lpfc_mbox_timeout_handler(struct lpfc_hba *);
+void lpfc_map_fcp_cmnd_to_bpl(struct lpfc_hba *, struct lpfc_scsi_buf *);
+void lpfc_free_scsi_cmd(struct lpfc_scsi_buf *);
+uint32_t lpfc_os_timeout_transform(struct lpfc_hba *, uint32_t);
+
+struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order,
+                                       uint32_t did);
+
+int lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
+                        uint32_t timeout);
+
+int lpfc_sli_issue_iocb_wait_high_priority(struct lpfc_hba * phba,
+                                          struct lpfc_sli_ring * pring,
+                                          struct lpfc_iocbq * piocb,
+                                          uint32_t flag,
+                                          struct lpfc_iocbq * prspiocbq,
+                                          uint32_t timeout);
+void lpfc_sli_wake_iocb_high_priority(struct lpfc_hba * phba,
+                                     struct lpfc_iocbq * queue1,
+                                     struct lpfc_iocbq * queue2);
+
+void *lpfc_mbuf_alloc(struct lpfc_hba *, int, dma_addr_t *);
+void lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t);
+
+/* Function prototypes. */
+const char* lpfc_info(struct Scsi_Host *);
+void lpfc_get_cfgparam(struct lpfc_hba *);
+int lpfc_alloc_sysfs_attr(struct lpfc_hba *);
+void lpfc_free_sysfs_attr(struct lpfc_hba *);
+extern struct class_device_attribute *lpfc_host_attrs[];
+extern struct scsi_host_template lpfc_template;
+extern struct fc_function_template lpfc_transport_functions;
+
+void lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp);
+
+#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
+#define HBA_EVENT_RSCN                   5
+#define HBA_EVENT_LINK_UP                2
+#define HBA_EVENT_LINK_DOWN              3
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
new file mode 100644 (file)
index 0000000..c40cb23
--- /dev/null
@@ -0,0 +1,1237 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Enterprise Fibre Channel Host Bus Adapters.                     *
+ * Refer to the README file included with this package for         *
+ * driver version and adapter support.                             *
+ * Copyright (C) 2004 Emulex Corporation.                          *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of the GNU General Public License     *
+ * as published by the Free Software Foundation; either version 2  *
+ * of the License, or (at your option) any later version.          *
+ *                                                                 *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of  *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the   *
+ * GNU General Public License for more details, a copy of which    *
+ * can be found in the file COPYING included with this package.    *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_ct.c 1.161 2005/04/13 11:59:01EDT sf_support Exp  $
+ *
+ * Fibre Channel SCSI LAN Device Driver CT support
+ */
+
+#include <linux/blkdev.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/utsname.h>
+
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_version.h"
+
+#define HBA_PORTSPEED_UNKNOWN               0  /* Unknown - transceiver
+                                                * incapable of reporting */
+#define HBA_PORTSPEED_1GBIT                 1  /* 1 GBit/sec */
+#define HBA_PORTSPEED_2GBIT                 2  /* 2 GBit/sec */
+#define HBA_PORTSPEED_4GBIT                 8   /* 4 GBit/sec */
+#define HBA_PORTSPEED_8GBIT                16   /* 8 GBit/sec */
+#define HBA_PORTSPEED_10GBIT                4  /* 10 GBit/sec */
+#define HBA_PORTSPEED_NOT_NEGOTIATED        5  /* Speed not established */
+
+#define FOURBYTES      4
+
+
+static char *lpfc_release_version = LPFC_DRIVER_VERSION;
+
+/*
+ * lpfc_ct_unsol_event
+ */
+void
+lpfc_ct_unsol_event(struct lpfc_hba * phba,
+                   struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocbq)
+{
+
+       struct lpfc_iocbq *next_piocbq;
+       struct lpfc_dmabuf *pmbuf = NULL;
+       struct lpfc_dmabuf *matp, *next_matp;
+       uint32_t ctx = 0, size = 0, cnt = 0;
+       IOCB_t *icmd = &piocbq->iocb;
+       IOCB_t *save_icmd = icmd;
+       int i, go_exit = 0;
+       struct list_head head;
+
+       if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+               ((icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING)) {
+               /* Not enough posted buffers; Try posting more buffers */
+               phba->fc_stat.NoRcvBuf++;
+               lpfc_post_buffer(phba, pring, 0, 1);
+               return;
+       }
+
+       /* If there are no BDEs associated with this IOCB,
+        * there is nothing to do.
+        */
+       if (icmd->ulpBdeCount == 0)
+               return;
+
+       INIT_LIST_HEAD(&head);
+       list_add_tail(&head, &piocbq->list);
+
+       list_for_each_entry_safe(piocbq, next_piocbq, &head, list) {
+               icmd = &piocbq->iocb;
+               if (ctx == 0)
+                       ctx = (uint32_t) (icmd->ulpContext);
+               if (icmd->ulpBdeCount == 0)
+                       continue;
+
+               for (i = 0; i < icmd->ulpBdeCount; i++) {
+                       matp = lpfc_sli_ringpostbuf_get(phba, pring,
+                                                       getPaddr(icmd->un.
+                                                                cont64[i].
+                                                                addrHigh,
+                                                                icmd->un.
+                                                                cont64[i].
+                                                                addrLow));
+                       if (!matp) {
+                               /* Insert lpfc log message here */
+                               lpfc_post_buffer(phba, pring, cnt, 1);
+                               go_exit = 1;
+                               goto ct_unsol_event_exit_piocbq;
+                       }
+
+                       /* Typically for Unsolicited CT requests */
+                       if (!pmbuf) {
+                               pmbuf = matp;
+                               INIT_LIST_HEAD(&pmbuf->list);
+                       } else
+                               list_add_tail(&matp->list, &pmbuf->list);
+
+                       size += icmd->un.cont64[i].tus.f.bdeSize;
+                       cnt++;
+               }
+
+               icmd->ulpBdeCount = 0;
+       }
+
+       lpfc_post_buffer(phba, pring, cnt, 1);
+       if (save_icmd->ulpStatus) {
+               go_exit = 1;
+       }
+
+ct_unsol_event_exit_piocbq:
+       if (pmbuf) {
+               list_for_each_entry_safe(matp, next_matp, &pmbuf->list, list) {
+                       lpfc_mbuf_free(phba, matp->virt, matp->phys);
+                       list_del(&matp->list);
+                       kfree(matp);
+               }
+               lpfc_mbuf_free(phba, pmbuf->virt, pmbuf->phys);
+               kfree(pmbuf);
+       }
+       return;
+}
+
+static void
+lpfc_free_ct_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mlist)
+{
+       struct lpfc_dmabuf *mlast, *next_mlast;
+
+       list_for_each_entry_safe(mlast, next_mlast, &mlist->list, list) {
+               lpfc_mbuf_free(phba, mlast->virt, mlast->phys);
+               list_del(&mlast->list);
+               kfree(mlast);
+       }
+       lpfc_mbuf_free(phba, mlist->virt, mlist->phys);
+       kfree(mlist);
+       return;
+}
+
+static struct lpfc_dmabuf *
+lpfc_alloc_ct_rsp(struct lpfc_hba * phba, int cmdcode, struct ulp_bde64 * bpl,
+                 uint32_t size, int *entries)
+{
+       struct lpfc_dmabuf *mlist = NULL;
+       struct lpfc_dmabuf *mp;
+       int cnt, i = 0;
+
+       /* We get chucks of FCELSSIZE */
+       cnt = size > FCELSSIZE ? FCELSSIZE: size;
+
+       while (size) {
+               /* Allocate buffer for rsp payload */
+               mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+               if (!mp) {
+                       if (mlist)
+                               lpfc_free_ct_rsp(phba, mlist);
+                       return NULL;
+               }
+
+               INIT_LIST_HEAD(&mp->list);
+
+               if (cmdcode == be16_to_cpu(SLI_CTNS_GID_FT))
+                       mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
+               else
+                       mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys));
+
+               if (!mp->virt) {
+                       kfree(mp);
+                       lpfc_free_ct_rsp(phba, mlist);
+                       return NULL;
+               }
+
+               /* Queue it to a linked list */
+               if (!mlist)
+                       mlist = mp;
+               else
+                       list_add_tail(&mp->list, &mlist->list);
+
+               bpl->tus.f.bdeFlags = BUFF_USE_RCV;
+               /* build buffer ptr list for IOCB */
+               bpl->addrLow = le32_to_cpu( putPaddrLow(mp->phys) );
+               bpl->addrHigh = le32_to_cpu( putPaddrHigh(mp->phys) );
+               bpl->tus.f.bdeSize = (uint16_t) cnt;
+               bpl->tus.w = le32_to_cpu(bpl->tus.w);
+               bpl++;
+
+               i++;
+               size -= cnt;
+       }
+
+       *entries = i;
+       return mlist;
+}
+
+static int
+lpfc_gen_req(struct lpfc_hba *phba, struct lpfc_dmabuf *bmp,
+            struct lpfc_dmabuf *inp, struct lpfc_dmabuf *outp,
+            void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+                    struct lpfc_iocbq *),
+            struct lpfc_nodelist *ndlp, uint32_t usr_flg, uint32_t num_entry,
+            uint32_t tmo)
+{
+
+       struct lpfc_sli *psli = &phba->sli;
+       struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
+       struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
+       IOCB_t *icmd;
+       struct lpfc_iocbq *geniocb = NULL;
+
+       /* Allocate buffer for  command iocb */
+       spin_lock_irq(phba->host->host_lock);
+       list_remove_head(lpfc_iocb_list, geniocb, struct lpfc_iocbq, list);
+       spin_unlock_irq(phba->host->host_lock);
+
+       if (geniocb == NULL)
+               return 1;
+       memset(geniocb, 0, sizeof (struct lpfc_iocbq));
+
+       icmd = &geniocb->iocb;
+       icmd->un.genreq64.bdl.ulpIoTag32 = 0;
+       icmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
+       icmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
+       icmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
+       icmd->un.genreq64.bdl.bdeSize = (num_entry * sizeof (struct ulp_bde64));
+
+       if (usr_flg)
+               geniocb->context3 = NULL;
+       else
+               geniocb->context3 = (uint8_t *) bmp;
+
+       /* Save for completion so we can release these resources */
+       geniocb->context1 = (uint8_t *) inp;
+       geniocb->context2 = (uint8_t *) outp;
+
+       /* Fill in payload, bp points to frame payload */
+       icmd->ulpCommand = CMD_GEN_REQUEST64_CR;
+
+       /* Fill in rest of iocb */
+       icmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
+       icmd->un.genreq64.w5.hcsw.Dfctl = 0;
+       icmd->un.genreq64.w5.hcsw.Rctl = FC_UNSOL_CTL;
+       icmd->un.genreq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP;
+
+       if (!tmo)
+               tmo = (2 * phba->fc_ratov) + 1;
+       icmd->ulpTimeout = tmo;
+       icmd->ulpBdeCount = 1;
+       icmd->ulpLe = 1;
+       icmd->ulpClass = CLASS3;
+       icmd->ulpContext = ndlp->nlp_rpi;
+
+       /* Issue GEN REQ IOCB for NPORT <did> */
+       lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+                       "%d:0119 Issue GEN REQ IOCB for NPORT x%x "
+                       "Data: x%x x%x\n", phba->brd_no, icmd->un.ulpWord[5],
+                       icmd->ulpIoTag, phba->hba_state);
+       geniocb->iocb_cmpl = cmpl;
+       geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
+       spin_lock_irq(phba->host->host_lock);
+       if (lpfc_sli_issue_iocb(phba, pring, geniocb, 0) == IOCB_ERROR) {
+               list_add_tail(&geniocb->list, lpfc_iocb_list);
+               spin_unlock_irq(phba->host->host_lock);
+               return 1;
+       }
+       spin_unlock_irq(phba->host->host_lock);
+
+       return 0;
+}
+
+static int
+lpfc_ct_cmd(struct lpfc_hba *phba, struct lpfc_dmabuf *inmp,
+           struct lpfc_dmabuf *bmp, struct lpfc_nodelist *ndlp,
+           void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+                         struct lpfc_iocbq *),
+           uint32_t rsp_size)
+{
+       struct ulp_bde64 *bpl = (struct ulp_bde64 *) bmp->virt;
+       struct lpfc_dmabuf *outmp;
+       int cnt = 0, status;
+       int cmdcode = ((struct lpfc_sli_ct_request *) inmp->virt)->
+               CommandResponse.bits.CmdRsp;
+
+       bpl++;                  /* Skip past ct request */
+
+       /* Put buffer(s) for ct rsp in bpl */
+       outmp = lpfc_alloc_ct_rsp(phba, cmdcode, bpl, rsp_size, &cnt);
+       if (!outmp)
+               return -ENOMEM;
+
+       status = lpfc_gen_req(phba, bmp, inmp, outmp, cmpl, ndlp, 0,
+                             cnt+1, 0);
+       if (status) {
+               lpfc_free_ct_rsp(phba, outmp);
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+static int
+lpfc_ns_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mp, uint32_t Size)
+{
+       struct lpfc_sli_ct_request *Response =
+               (struct lpfc_sli_ct_request *) mp->virt;
+       struct lpfc_nodelist *ndlp = NULL;
+       struct lpfc_dmabuf *mlast, *next_mp;
+       uint32_t *ctptr = (uint32_t *) & Response->un.gid.PortType;
+       uint32_t Did;
+       uint32_t CTentry;
+       int Cnt;
+       struct list_head head;
+
+       lpfc_set_disctmo(phba);
+
+       Cnt = Size  > FCELSSIZE ? FCELSSIZE : Size;
+
+       list_add_tail(&head, &mp->list);
+       list_for_each_entry_safe(mp, next_mp, &head, list) {
+               mlast = mp;
+
+               Size -= Cnt;
+
+               if (!ctptr)
+                       ctptr = (uint32_t *) mlast->virt;
+               else
+                       Cnt -= 16;      /* subtract length of CT header */
+
+               /* Loop through entire NameServer list of DIDs */
+               while (Cnt) {
+
+                       /* Get next DID from NameServer List */
+                       CTentry = *ctptr++;
+                       Did = ((be32_to_cpu(CTentry)) & Mask_DID);
+
+                       ndlp = NULL;
+                       if (Did != phba->fc_myDID) {
+                               /* Check for rscn processing or not */
+                               ndlp = lpfc_setup_disc_node(phba, Did);
+                       }
+                       /* Mark all node table entries that are in the
+                          Nameserver */
+                       if (ndlp) {
+                               /* NameServer Rsp */
+                               lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+                                               "%d:0238 Process x%x NameServer"
+                                               " Rsp Data: x%x x%x x%x\n",
+                                               phba->brd_no,
+                                               Did, ndlp->nlp_flag,
+                                               phba->fc_flag,
+                                               phba->fc_rscn_id_cnt);
+                       } else {
+                               /* NameServer Rsp */
+                               lpfc_printf_log(phba,
+                                               KERN_INFO,
+                                               LOG_DISCOVERY,
+                                               "%d:0239 Skip x%x NameServer "
+                                               "Rsp Data: x%x x%x x%x\n",
+                                               phba->brd_no,
+                                               Did, Size, phba->fc_flag,
+                                               phba->fc_rscn_id_cnt);
+                       }
+
+                       if (CTentry & (be32_to_cpu(SLI_CT_LAST_ENTRY)))
+                               goto nsout1;
+                       Cnt -= sizeof (uint32_t);
+               }
+               ctptr = NULL;
+
+       }
+
+nsout1:
+       list_del(&head);
+
+       /* Here we are finished in the case RSCN */
+       if (phba->hba_state == LPFC_HBA_READY) {
+               lpfc_els_flush_rscn(phba);
+               spin_lock_irq(phba->host->host_lock);
+               phba->fc_flag |= FC_RSCN_MODE; /* we are still in RSCN mode */
+               spin_unlock_irq(phba->host->host_lock);
+       }
+       return 0;
+}
+
+
+
+
+static void
+lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+                       struct lpfc_iocbq * rspiocb)
+{
+       IOCB_t *irsp;
+       struct lpfc_sli *psli;
+       struct lpfc_dmabuf *bmp;
+       struct lpfc_dmabuf *inp;
+       struct lpfc_dmabuf *outp;
+       struct lpfc_nodelist *ndlp;
+       struct lpfc_sli_ct_request *CTrsp;
+
+       psli = &phba->sli;
+       /* we pass cmdiocb to state machine which needs rspiocb as well */
+       cmdiocb->context_un.rsp_iocb = rspiocb;
+
+       inp = (struct lpfc_dmabuf *) cmdiocb->context1;
+       outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+       bmp = (struct lpfc_dmabuf *) cmdiocb->context3;
+
+       irsp = &rspiocb->iocb;
+       if (irsp->ulpStatus) {
+               if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+                       ((irsp->un.ulpWord[4] == IOERR_SLI_DOWN) ||
+                        (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))) {
+                       goto out;
+               }
+
+               /* Check for retry */
+               if (phba->fc_ns_retry < LPFC_MAX_NS_RETRY) {
+                       phba->fc_ns_retry++;
+                       /* CT command is being retried */
+                       ndlp =
+                           lpfc_findnode_did(phba, NLP_SEARCH_UNMAPPED,
+                                             NameServer_DID);
+                       if (ndlp) {
+                               if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT) ==
+                                   0) {
+                                       goto out;
+                               }
+                       }
+               }
+       } else {
+               /* Good status, continue checking */
+               CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+               if (CTrsp->CommandResponse.bits.CmdRsp ==
+                   be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) {
+                       lpfc_ns_rsp(phba, outp,
+                                   (uint32_t) (irsp->un.genreq64.bdl.bdeSize));
+               } else if (CTrsp->CommandResponse.bits.CmdRsp ==
+                          be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
+                       /* NameServer Rsp Error */
+                       lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+                                       "%d:0240 NameServer Rsp Error "
+                                       "Data: x%x x%x x%x x%x\n",
+                                       phba->brd_no,
+                                       CTrsp->CommandResponse.bits.CmdRsp,
+                                       (uint32_t) CTrsp->ReasonCode,
+                                       (uint32_t) CTrsp->Explanation,
+                                       phba->fc_flag);
+               } else {
+                       /* NameServer Rsp Error */
+                       lpfc_printf_log(phba,
+                                       KERN_INFO,
+                                       LOG_DISCOVERY,
+                                       "%d:0241 NameServer Rsp Error "
+                                       "Data: x%x x%x x%x x%x\n",
+                                       phba->brd_no,
+                                       CTrsp->CommandResponse.bits.CmdRsp,
+                                       (uint32_t) CTrsp->ReasonCode,
+                                       (uint32_t) CTrsp->Explanation,
+                                       phba->fc_flag);
+               }
+       }
+       /* Link up / RSCN discovery */
+       lpfc_disc_start(phba);
+out:
+       lpfc_free_ct_rsp(phba, outp);
+       lpfc_mbuf_free(phba, inp->virt, inp->phys);
+       lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+       kfree(inp);
+       kfree(bmp);
+       spin_lock_irq(phba->host->host_lock);
+       list_add_tail(&cmdiocb->list, &phba->lpfc_iocb_list);
+       spin_unlock_irq(phba->host->host_lock);
+       return;
+}
+
+static void
+lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+                       struct lpfc_iocbq * rspiocb)
+{
+       struct lpfc_sli *psli;
+       struct lpfc_dmabuf *bmp;
+       struct lpfc_dmabuf *inp;
+       struct lpfc_dmabuf *outp;
+       IOCB_t *irsp;
+       struct lpfc_sli_ct_request *CTrsp;
+
+       psli = &phba->sli;
+       /* we pass cmdiocb to state machine which needs rspiocb as well */
+       cmdiocb->context_un.rsp_iocb = rspiocb;
+
+       inp = (struct lpfc_dmabuf *) cmdiocb->context1;
+       outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+       bmp = (struct lpfc_dmabuf *) cmdiocb->context3;
+       irsp = &rspiocb->iocb;
+
+       CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+
+       /* RFT request completes status <ulpStatus> CmdRsp <CmdRsp> */
+       lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+                       "%d:0209 RFT request completes ulpStatus x%x "
+                       "CmdRsp x%x\n", phba->brd_no, irsp->ulpStatus,
+                       CTrsp->CommandResponse.bits.CmdRsp);
+
+       lpfc_free_ct_rsp(phba, outp);
+       lpfc_mbuf_free(phba, inp->virt, inp->phys);
+       lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+       kfree(inp);
+       kfree(bmp);
+       spin_lock_irq(phba->host->host_lock);
+       list_add_tail(&cmdiocb->list, &phba->lpfc_iocb_list);
+       spin_unlock_irq(phba->host->host_lock);
+       return;
+}
+
+static void
+lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+                       struct lpfc_iocbq * rspiocb)
+{
+       lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
+       return;
+}
+
+static void
+lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+                        struct lpfc_iocbq * rspiocb)
+{
+       lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
+       return;
+}
+
+void
+lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp)
+{
+       char fwrev[16];
+
+       lpfc_decode_firmware_rev(phba, fwrev, 0);
+
+       if (phba->Port[0]) {
+               sprintf(symbp, "Emulex %s Port %s FV%s DV%s", phba->ModelName,
+                       phba->Port, fwrev, lpfc_release_version);
+       } else {
+               sprintf(symbp, "Emulex %s FV%s DV%s", phba->ModelName,
+                       fwrev, lpfc_release_version);
+       }
+}
+
+/*
+ * lpfc_ns_cmd
+ * Description:
+ *    Issue Cmd to NameServer
+ *       SLI_CTNS_GID_FT
+ *       LI_CTNS_RFT_ID
+ */
+int
+lpfc_ns_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
+{
+       struct lpfc_dmabuf *mp, *bmp;
+       struct lpfc_sli_ct_request *CtReq;
+       struct ulp_bde64 *bpl;
+       void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+                     struct lpfc_iocbq *) = NULL;
+       uint32_t rsp_size = 1024;
+
+       /* fill in BDEs for command */
+       /* Allocate buffer for command payload */
+       mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+       if (!mp)
+               goto ns_cmd_exit;
+
+       INIT_LIST_HEAD(&mp->list);
+       mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
+       if (!mp->virt)
+               goto ns_cmd_free_mp;
+
+       /* Allocate buffer for Buffer ptr list */
+       bmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+       if (!bmp)
+               goto ns_cmd_free_mpvirt;
+
+       INIT_LIST_HEAD(&bmp->list);
+       bmp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(bmp->phys));
+       if (!bmp->virt)
+               goto ns_cmd_free_bmp;
+
+       /* NameServer Req */
+       lpfc_printf_log(phba,
+                       KERN_INFO,
+                       LOG_DISCOVERY,
+                       "%d:0236 NameServer Req Data: x%x x%x x%x\n",
+                       phba->brd_no, cmdcode, phba->fc_flag,
+                       phba->fc_rscn_id_cnt);
+
+       bpl = (struct ulp_bde64 *) bmp->virt;
+       memset(bpl, 0, sizeof(struct ulp_bde64));
+       bpl->addrHigh = le32_to_cpu( putPaddrHigh(mp->phys) );
+       bpl->addrLow = le32_to_cpu( putPaddrLow(mp->phys) );
+       bpl->tus.f.bdeFlags = 0;
+       if (cmdcode == SLI_CTNS_GID_FT)
+               bpl->tus.f.bdeSize = GID_REQUEST_SZ;
+       else if (cmdcode == SLI_CTNS_RFT_ID)
+               bpl->tus.f.bdeSize = RFT_REQUEST_SZ;
+       else if (cmdcode == SLI_CTNS_RNN_ID)
+               bpl->tus.f.bdeSize = RNN_REQUEST_SZ;
+       else if (cmdcode == SLI_CTNS_RSNN_NN)
+               bpl->tus.f.bdeSize = RSNN_REQUEST_SZ;
+       else
+               bpl->tus.f.bdeSize = 0;
+       bpl->tus.w = le32_to_cpu(bpl->tus.w);
+
+       CtReq = (struct lpfc_sli_ct_request *) mp->virt;
+       memset(CtReq, 0, sizeof (struct lpfc_sli_ct_request));
+       CtReq->RevisionId.bits.Revision = SLI_CT_REVISION;
+       CtReq->RevisionId.bits.InId = 0;
+       CtReq->FsType = SLI_CT_DIRECTORY_SERVICE;
+       CtReq->FsSubType = SLI_CT_DIRECTORY_NAME_SERVER;
+       CtReq->CommandResponse.bits.Size = 0;
+       switch (cmdcode) {
+       case SLI_CTNS_GID_FT:
+               CtReq->CommandResponse.bits.CmdRsp =
+                   be16_to_cpu(SLI_CTNS_GID_FT);
+               CtReq->un.gid.Fc4Type = SLI_CTPT_FCP;
+               if (phba->hba_state < LPFC_HBA_READY)
+                       phba->hba_state = LPFC_NS_QRY;
+               lpfc_set_disctmo(phba);
+               cmpl = lpfc_cmpl_ct_cmd_gid_ft;
+               rsp_size = FC_MAX_NS_RSP;
+               break;
+
+       case SLI_CTNS_RFT_ID:
+               CtReq->CommandResponse.bits.CmdRsp =
+                   be16_to_cpu(SLI_CTNS_RFT_ID);
+               CtReq->un.rft.PortId = be32_to_cpu(phba->fc_myDID);
+               CtReq->un.rft.fcpReg = 1;
+               cmpl = lpfc_cmpl_ct_cmd_rft_id;
+               break;
+
+       case SLI_CTNS_RNN_ID:
+               CtReq->CommandResponse.bits.CmdRsp =
+                   be16_to_cpu(SLI_CTNS_RNN_ID);
+               CtReq->un.rnn.PortId = be32_to_cpu(phba->fc_myDID);
+               memcpy(CtReq->un.rnn.wwnn,  &phba->fc_nodename,
+                      sizeof (struct lpfc_name));
+               cmpl = lpfc_cmpl_ct_cmd_rnn_id;
+               break;
+
+       case SLI_CTNS_RSNN_NN:
+               CtReq->CommandResponse.bits.CmdRsp =
+                   be16_to_cpu(SLI_CTNS_RSNN_NN);
+               memcpy(CtReq->un.rsnn.wwnn, &phba->fc_nodename,
+                      sizeof (struct lpfc_name));
+               lpfc_get_hba_sym_node_name(phba, CtReq->un.rsnn.symbname);
+               CtReq->un.rsnn.len = strlen(CtReq->un.rsnn.symbname);
+               cmpl = lpfc_cmpl_ct_cmd_rsnn_nn;
+               break;
+       }
+
+       if (!lpfc_ct_cmd(phba, mp, bmp, ndlp, cmpl, rsp_size))
+               /* On success, The cmpl function will free the buffers */
+               return 0;
+
+       lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+ns_cmd_free_bmp:
+       kfree(bmp);
+ns_cmd_free_mpvirt:
+       lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ns_cmd_free_mp:
+       kfree(mp);
+ns_cmd_exit:
+       return 1;
+}
+
+static void
+lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba * phba,
+                     struct lpfc_iocbq * cmdiocb, struct lpfc_iocbq * rspiocb)
+{
+       struct lpfc_dmabuf *bmp = cmdiocb->context3;
+       struct lpfc_dmabuf *inp = cmdiocb->context1;
+       struct lpfc_dmabuf *outp = cmdiocb->context2;
+       struct lpfc_sli_ct_request *CTrsp = outp->virt;
+       struct lpfc_sli_ct_request *CTcmd = inp->virt;
+       struct lpfc_nodelist *ndlp;
+       uint16_t fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp;
+       uint16_t fdmi_rsp = CTrsp->CommandResponse.bits.CmdRsp;
+
+       ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, FDMI_DID);
+       if (fdmi_rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
+               /* FDMI rsp failed */
+               lpfc_printf_log(phba,
+                               KERN_INFO,
+                               LOG_DISCOVERY,
+                               "%d:0220 FDMI rsp failed Data: x%x\n",
+                               phba->brd_no,
+                              be16_to_cpu(fdmi_cmd));
+       }
+
+       switch (be16_to_cpu(fdmi_cmd)) {
+       case SLI_MGMT_RHBA:
+               lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_RPA);
+               break;
+
+       case SLI_MGMT_RPA:
+               break;
+
+       case SLI_MGMT_DHBA:
+               lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DPRT);
+               break;
+
+       case SLI_MGMT_DPRT:
+               lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_RHBA);
+               break;
+       }
+
+       lpfc_free_ct_rsp(phba, outp);
+       lpfc_mbuf_free(phba, inp->virt, inp->phys);
+       lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+       kfree(inp);
+       kfree(bmp);
+       spin_lock_irq(phba->host->host_lock);
+       list_add_tail(&cmdiocb->list, &phba->lpfc_iocb_list);
+       spin_unlock_irq(phba->host->host_lock);
+       return;
+}
+int
+lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
+{
+       struct lpfc_dmabuf *mp, *bmp;
+       struct lpfc_sli_ct_request *CtReq;
+       struct ulp_bde64 *bpl;
+       uint32_t size;
+       REG_HBA *rh;
+       PORT_ENTRY *pe;
+       REG_PORT_ATTRIBUTE *pab;
+       ATTRIBUTE_BLOCK *ab;
+       ATTRIBUTE_ENTRY *ae;
+       void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+                     struct lpfc_iocbq *);
+
+
+       /* fill in BDEs for command */
+       /* Allocate buffer for command payload */
+       mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+       if (!mp)
+               goto fdmi_cmd_exit;
+
+       mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys));
+       if (!mp->virt)
+               goto fdmi_cmd_free_mp;
+
+       /* Allocate buffer for Buffer ptr list */
+       bmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+       if (!bmp)
+               goto fdmi_cmd_free_mpvirt;
+
+       bmp->virt = lpfc_mbuf_alloc(phba, 0, &(bmp->phys));
+       if (!bmp->virt)
+               goto fdmi_cmd_free_bmp;
+
+       INIT_LIST_HEAD(&mp->list);
+       INIT_LIST_HEAD(&bmp->list);
+
+       /* FDMI request */
+       lpfc_printf_log(phba,
+                       KERN_INFO,
+                       LOG_DISCOVERY,
+                       "%d:0218 FDMI Request Data: x%x x%x x%x\n",
+                       phba->brd_no,
+                      phba->fc_flag, phba->hba_state, cmdcode);
+
+       CtReq = (struct lpfc_sli_ct_request *) mp->virt;
+
+       memset(CtReq, 0, sizeof(struct lpfc_sli_ct_request));
+       CtReq->RevisionId.bits.Revision = SLI_CT_REVISION;
+       CtReq->RevisionId.bits.InId = 0;
+
+       CtReq->FsType = SLI_CT_MANAGEMENT_SERVICE;
+       CtReq->FsSubType = SLI_CT_FDMI_Subtypes;
+       size = 0;
+
+       switch (cmdcode) {
+       case SLI_MGMT_RHBA:
+               {
+                       lpfc_vpd_t *vp = &phba->vpd;
+                       uint32_t i, j, incr;
+                       int len;
+
+                       CtReq->CommandResponse.bits.CmdRsp =
+                           be16_to_cpu(SLI_MGMT_RHBA);
+                       CtReq->CommandResponse.bits.Size = 0;
+                       rh = (REG_HBA *) & CtReq->un.PortID;
+                       memcpy(&rh->hi.PortName, &phba->fc_sparam.portName,
+                              sizeof (struct lpfc_name));
+                       /* One entry (port) per adapter */
+                       rh->rpl.EntryCnt = be32_to_cpu(1);
+                       memcpy(&rh->rpl.pe, &phba->fc_sparam.portName,
+                              sizeof (struct lpfc_name));
+
+                       /* point to the HBA attribute block */
+                       size = 2 * sizeof (struct lpfc_name) + FOURBYTES;
+                       ab = (ATTRIBUTE_BLOCK *) ((uint8_t *) rh + size);
+                       ab->EntryCnt = 0;
+
+                       /* Point to the beginning of the first HBA attribute
+                          entry */
+                       /* #1 HBA attribute entry */
+                       size += FOURBYTES;
+                       ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+                       ae->ad.bits.AttrType = be16_to_cpu(NODE_NAME);
+                       ae->ad.bits.AttrLen =  be16_to_cpu(FOURBYTES
+                                               + sizeof (struct lpfc_name));
+                       memcpy(&ae->un.NodeName, &phba->fc_sparam.nodeName,
+                              sizeof (struct lpfc_name));
+                       ab->EntryCnt++;
+                       size += FOURBYTES + sizeof (struct lpfc_name);
+
+                       /* #2 HBA attribute entry */
+                       ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+                       ae->ad.bits.AttrType = be16_to_cpu(MANUFACTURER);
+                       strcpy(ae->un.Manufacturer, "Emulex Corporation");
+                       len = strlen(ae->un.Manufacturer);
+                       len += (len & 3) ? (4 - (len & 3)) : 4;
+                       ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+                       ab->EntryCnt++;
+                       size += FOURBYTES + len;
+
+                       /* #3 HBA attribute entry */
+                       ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+                       ae->ad.bits.AttrType = be16_to_cpu(SERIAL_NUMBER);
+                       strcpy(ae->un.SerialNumber, phba->SerialNumber);
+                       len = strlen(ae->un.SerialNumber);
+                       len += (len & 3) ? (4 - (len & 3)) : 4;
+                       ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+                       ab->EntryCnt++;
+                       size += FOURBYTES + len;
+
+                       /* #4 HBA attribute entry */
+                       ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+                       ae->ad.bits.AttrType = be16_to_cpu(MODEL);
+                       strcpy(ae->un.Model, phba->ModelName);
+                       len = strlen(ae->un.Model);
+                       len += (len & 3) ? (4 - (len & 3)) : 4;
+                       ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+                       ab->EntryCnt++;
+                       size += FOURBYTES + len;
+
+                       /* #5 HBA attribute entry */
+                       ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+                       ae->ad.bits.AttrType = be16_to_cpu(MODEL_DESCRIPTION);
+                       strcpy(ae->un.ModelDescription, phba->ModelDesc);
+                       len = strlen(ae->un.ModelDescription);
+                       len += (len & 3) ? (4 - (len & 3)) : 4;
+                       ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+                       ab->EntryCnt++;
+                       size += FOURBYTES + len;
+
+                       /* #6 HBA attribute entry */
+                       ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+                       ae->ad.bits.AttrType = be16_to_cpu(HARDWARE_VERSION);
+                       ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 8);
+                       /* Convert JEDEC ID to ascii for hardware version */
+                       incr = vp->rev.biuRev;
+                       for (i = 0; i < 8; i++) {
+                               j = (incr & 0xf);
+                               if (j <= 9)
+                                       ae->un.HardwareVersion[7 - i] =
+                                           (char)((uint8_t) 0x30 +
+                                                  (uint8_t) j);
+                               else
+                                       ae->un.HardwareVersion[7 - i] =
+                                           (char)((uint8_t) 0x61 +
+                                                  (uint8_t) (j - 10));
+                               incr = (incr >> 4);
+                       }
+                       ab->EntryCnt++;
+                       size += FOURBYTES + 8;
+
+                       /* #7 HBA attribute entry */
+                       ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+                       ae->ad.bits.AttrType = be16_to_cpu(DRIVER_VERSION);
+                       strcpy(ae->un.DriverVersion, lpfc_release_version);
+                       len = strlen(ae->un.DriverVersion);
+                       len += (len & 3) ? (4 - (len & 3)) : 4;
+                       ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+                       ab->EntryCnt++;
+                       size += FOURBYTES + len;
+
+                       /* #8 HBA attribute entry */
+                       ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+                       ae->ad.bits.AttrType = be16_to_cpu(OPTION_ROM_VERSION);
+                       strcpy(ae->un.OptionROMVersion, phba->OptionROMVersion);
+                       len = strlen(ae->un.OptionROMVersion);
+                       len += (len & 3) ? (4 - (len & 3)) : 4;
+                       ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+                       ab->EntryCnt++;
+                       size += FOURBYTES + len;
+
+                       /* #9 HBA attribute entry */
+                       ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+                       ae->ad.bits.AttrType = be16_to_cpu(FIRMWARE_VERSION);
+                       lpfc_decode_firmware_rev(phba, ae->un.FirmwareVersion,
+                               1);
+                       len = strlen(ae->un.FirmwareVersion);
+                       len += (len & 3) ? (4 - (len & 3)) : 4;
+                       ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+                       ab->EntryCnt++;
+                       size += FOURBYTES + len;
+
+                       /* #10 HBA attribute entry */
+                       ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+                       ae->ad.bits.AttrType = be16_to_cpu(OS_NAME_VERSION);
+                       sprintf(ae->un.OsNameVersion, "%s %s %s",
+                               system_utsname.sysname, system_utsname.release,
+                               system_utsname.version);
+                       len = strlen(ae->un.OsNameVersion);
+                       len += (len & 3) ? (4 - (len & 3)) : 4;
+                       ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+                       ab->EntryCnt++;
+                       size += FOURBYTES + len;
+
+                       /* #11 HBA attribute entry */
+                       ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+                       ae->ad.bits.AttrType = be16_to_cpu(MAX_CT_PAYLOAD_LEN);
+                       ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
+                       ae->un.MaxCTPayloadLen = (65 * 4096);
+                       ab->EntryCnt++;
+                       size += FOURBYTES + 4;
+
+                       ab->EntryCnt = be32_to_cpu(ab->EntryCnt);
+                       /* Total size */
+                       size = GID_REQUEST_SZ - 4 + size;
+               }
+               break;
+
+       case SLI_MGMT_RPA:
+               {
+                       lpfc_vpd_t *vp;
+                       struct serv_parm *hsp;
+                       int len;
+
+                       vp = &phba->vpd;
+
+                       CtReq->CommandResponse.bits.CmdRsp =
+                           be16_to_cpu(SLI_MGMT_RPA);
+                       CtReq->CommandResponse.bits.Size = 0;
+                       pab = (REG_PORT_ATTRIBUTE *) & CtReq->un.PortID;
+                       size = sizeof (struct lpfc_name) + FOURBYTES;
+                       memcpy((uint8_t *) & pab->PortName,
+                              (uint8_t *) & phba->fc_sparam.portName,
+                              sizeof (struct lpfc_name));
+                       pab->ab.EntryCnt = 0;
+
+                       /* #1 Port attribute entry */
+                       ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
+                       ae->ad.bits.AttrType = be16_to_cpu(SUPPORTED_FC4_TYPES);
+                       ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 32);
+                       ae->un.SupportFC4Types[2] = 1;
+                       ae->un.SupportFC4Types[7] = 1;
+                       pab->ab.EntryCnt++;
+                       size += FOURBYTES + 32;
+
+                       /* #2 Port attribute entry */
+                       ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
+                       ae->ad.bits.AttrType = be16_to_cpu(SUPPORTED_SPEED);
+                       ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
+                       if (FC_JEDEC_ID(vp->rev.biuRev) == VIPER_JEDEC_ID)
+                               ae->un.SupportSpeed = HBA_PORTSPEED_10GBIT;
+                       else if (FC_JEDEC_ID(vp->rev.biuRev) == HELIOS_JEDEC_ID)
+                               ae->un.SupportSpeed = HBA_PORTSPEED_4GBIT;
+                       else if ((FC_JEDEC_ID(vp->rev.biuRev) ==
+                                 CENTAUR_2G_JEDEC_ID)
+                                || (FC_JEDEC_ID(vp->rev.biuRev) ==
+                                    PEGASUS_JEDEC_ID)
+                                || (FC_JEDEC_ID(vp->rev.biuRev) ==
+                                    THOR_JEDEC_ID))
+                               ae->un.SupportSpeed = HBA_PORTSPEED_2GBIT;
+                       else
+                               ae->un.SupportSpeed = HBA_PORTSPEED_1GBIT;
+                       pab->ab.EntryCnt++;
+                       size += FOURBYTES + 4;
+
+                       /* #3 Port attribute entry */
+                       ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
+                       ae->ad.bits.AttrType = be16_to_cpu(PORT_SPEED);
+                       ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
+                       switch(phba->fc_linkspeed) {
+                               case LA_1GHZ_LINK:
+                                       ae->un.PortSpeed = HBA_PORTSPEED_1GBIT;
+                               break;
+                               case LA_2GHZ_LINK:
+                                       ae->un.PortSpeed = HBA_PORTSPEED_2GBIT;
+                               break;
+                               case LA_4GHZ_LINK:
+                                       ae->un.PortSpeed = HBA_PORTSPEED_4GBIT;
+                               break;
+                               default:
+                                       ae->un.PortSpeed =
+                                               HBA_PORTSPEED_UNKNOWN;
+                               break;
+                       }
+                       pab->ab.EntryCnt++;
+                       size += FOURBYTES + 4;
+
+                       /* #4 Port attribute entry */
+                       ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
+                       ae->ad.bits.AttrType = be16_to_cpu(MAX_FRAME_SIZE);
+                       ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
+                       hsp = (struct serv_parm *) & phba->fc_sparam;
+                       ae->un.MaxFrameSize =
+                           (((uint32_t) hsp->cmn.
+                             bbRcvSizeMsb) << 8) | (uint32_t) hsp->cmn.
+                           bbRcvSizeLsb;
+                       pab->ab.EntryCnt++;
+                       size += FOURBYTES + 4;
+
+                       /* #5 Port attribute entry */
+                       ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
+                       ae->ad.bits.AttrType = be16_to_cpu(OS_DEVICE_NAME);
+                       strcpy((char *)ae->un.OsDeviceName, LPFC_DRIVER_NAME);
+                       len = strlen((char *)ae->un.OsDeviceName);
+                       len += (len & 3) ? (4 - (len & 3)) : 4;
+                       ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+                       pab->ab.EntryCnt++;
+                       size += FOURBYTES + len;
+
+                       if (phba->cfg_fdmi_on == 2) {
+                               /* #6 Port attribute entry */
+                               ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab +
+                                                         size);
+                               ae->ad.bits.AttrType = be16_to_cpu(HOST_NAME);
+                               sprintf(ae->un.HostName, "%s",
+                                       system_utsname.nodename);
+                               len = strlen(ae->un.HostName);
+                               len += (len & 3) ? (4 - (len & 3)) : 4;
+                               ae->ad.bits.AttrLen =
+                                   be16_to_cpu(FOURBYTES + len);
+                               pab->ab.EntryCnt++;
+                               size += FOURBYTES + len;
+                       }
+
+                       pab->ab.EntryCnt = be32_to_cpu(pab->ab.EntryCnt);
+                       /* Total size */
+                       size = GID_REQUEST_SZ - 4 + size;
+               }
+               break;
+
+       case SLI_MGMT_DHBA:
+               CtReq->CommandResponse.bits.CmdRsp = be16_to_cpu(SLI_MGMT_DHBA);
+               CtReq->CommandResponse.bits.Size = 0;
+               pe = (PORT_ENTRY *) & CtReq->un.PortID;
+               memcpy((uint8_t *) & pe->PortName,
+                      (uint8_t *) & phba->fc_sparam.portName,
+                      sizeof (struct lpfc_name));
+               size = GID_REQUEST_SZ - 4 + sizeof (struct lpfc_name);
+               break;
+
+       case SLI_MGMT_DPRT:
+               CtReq->CommandResponse.bits.CmdRsp = be16_to_cpu(SLI_MGMT_DPRT);
+               CtReq->CommandResponse.bits.Size = 0;
+               pe = (PORT_ENTRY *) & CtReq->un.PortID;
+               memcpy((uint8_t *) & pe->PortName,
+                      (uint8_t *) & phba->fc_sparam.portName,
+                      sizeof (struct lpfc_name));
+               size = GID_REQUEST_SZ - 4 + sizeof (struct lpfc_name);
+               break;
+       }
+
+       bpl = (struct ulp_bde64 *) bmp->virt;
+       bpl->addrHigh = le32_to_cpu( putPaddrHigh(mp->phys) );
+       bpl->addrLow = le32_to_cpu( putPaddrLow(mp->phys) );
+       bpl->tus.f.bdeFlags = 0;
+       bpl->tus.f.bdeSize = size;
+       bpl->tus.w = le32_to_cpu(bpl->tus.w);
+
+       cmpl = lpfc_cmpl_ct_cmd_fdmi;
+
+       if (!lpfc_ct_cmd(phba, mp, bmp, ndlp, cmpl, FC_MAX_NS_RSP))
+               return 0;
+
+       lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+fdmi_cmd_free_bmp:
+       kfree(bmp);
+fdmi_cmd_free_mpvirt:
+       lpfc_mbuf_free(phba, mp->virt, mp->phys);
+fdmi_cmd_free_mp:
+       kfree(mp);
+fdmi_cmd_exit:
+       /* Issue FDMI request failed */
+       lpfc_printf_log(phba,
+                       KERN_INFO,
+                       LOG_DISCOVERY,
+                       "%d:0244 Issue FDMI request failed Data: x%x\n",
+                       phba->brd_no,
+                       cmdcode);
+       return 1;
+}
+
+void
+lpfc_fdmi_tmo(unsigned long ptr)
+{
+       struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
+       unsigned long iflag;
+
+       spin_lock_irqsave(phba->host->host_lock, iflag);
+       if (!(phba->work_hba_events & WORKER_FDMI_TMO)) {
+               phba->work_hba_events |= WORKER_FDMI_TMO;
+               if (phba->work_wait)
+                       wake_up(phba->work_wait);
+       }
+       spin_unlock_irqrestore(phba->host->host_lock,iflag);
+}
+
+void
+lpfc_fdmi_tmo_handler(struct lpfc_hba *phba)
+{
+       struct lpfc_nodelist *ndlp;
+
+       spin_lock_irq(phba->host->host_lock);
+       if (!(phba->work_hba_events & WORKER_FDMI_TMO)) {
+               spin_unlock_irq(phba->host->host_lock);
+               return;
+       }
+       ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, FDMI_DID);
+       if (ndlp) {
+               if (system_utsname.nodename[0] != '\0') {
+                       lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DHBA);
+               } else {
+                       mod_timer(&phba->fc_fdmitmo, jiffies + HZ * 60);
+               }
+       }
+       spin_unlock_irq(phba->host->host_lock);
+       return;
+}
+
+
+void
+lpfc_decode_firmware_rev(struct lpfc_hba * phba, char *fwrevision, int flag)
+{
+       struct lpfc_sli *psli = &phba->sli;
+       lpfc_vpd_t *vp = &phba->vpd;
+       uint32_t b1, b2, b3, b4, i, rev;
+       char c;
+       uint32_t *ptr, str[4];
+       uint8_t *fwname;
+
+       if (vp->rev.rBit) {
+               if (psli->sli_flag & LPFC_SLI2_ACTIVE)
+                       rev = vp->rev.sli2FwRev;
+               else
+                       rev = vp->rev.sli1FwRev;
+
+               b1 = (rev & 0x0000f000) >> 12;
+               b2 = (rev & 0x00000f00) >> 8;
+               b3 = (rev & 0x000000c0) >> 6;
+               b4 = (rev & 0x00000030) >> 4;
+
+               switch (b4) {
+               case 0:
+                       c = 'N';
+                       break;
+               case 1:
+                       c = 'A';
+                       break;
+               case 2:
+                       c = 'B';
+                       break;
+               default:
+                       c = 0;
+                       break;
+               }
+               b4 = (rev & 0x0000000f);
+
+               if (psli->sli_flag & LPFC_SLI2_ACTIVE)
+                       fwname = vp->rev.sli2FwName;
+               else
+                       fwname = vp->rev.sli1FwName;
+
+               for (i = 0; i < 16; i++)
+                       if (fwname[i] == 0x20)
+                               fwname[i] = 0;
+
+               ptr = (uint32_t*)fwname;
+
+               for (i = 0; i < 3; i++)
+                       str[i] = be32_to_cpu(*ptr++);
+
+               if (c == 0) {
+                       if (flag)
+                               sprintf(fwrevision, "%d.%d%d (%s)",
+                                       b1, b2, b3, (char *)str);
+                       else
+                               sprintf(fwrevision, "%d.%d%d", b1,
+                                       b2, b3);
+               } else {
+                       if (flag)
+                               sprintf(fwrevision, "%d.%d%d%c%d (%s)",
+                                       b1, b2, b3, c,
+                                       b4, (char *)str);
+                       else
+                               sprintf(fwrevision, "%d.%d%d%c%d",
+                                       b1, b2, b3, c, b4);
+               }
+       } else {
+               rev = vp->rev.smFwRev;
+
+               b1 = (rev & 0xff000000) >> 24;
+               b2 = (rev & 0x00f00000) >> 20;
+               b3 = (rev & 0x000f0000) >> 16;
+               c  = (rev & 0x0000ff00) >> 8;
+               b4 = (rev & 0x000000ff);
+
+               if (flag)
+                       sprintf(fwrevision, "%d.%d%d%c%d ", b1,
+                               b2, b3, c, b4);
+               else
+                       sprintf(fwrevision, "%d.%d%d%c%d ", b1,
+                               b2, b3, c, b4);
+       }
+       return;
+}
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
new file mode 100644 (file)
index 0000000..adccc99
--- /dev/null
@@ -0,0 +1,206 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Enterprise Fibre Channel Host Bus Adapters.                     *
+ * Refer to the README file included with this package for         *
+ * driver version and adapter support.                             *
+ * Copyright (C) 2004 Emulex Corporation.                          *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of the GNU General Public License     *
+ * as published by the Free Software Foundation; either version 2  *
+ * of the License, or (at your option) any later version.          *
+ *                                                                 *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of  *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the   *
+ * GNU General Public License for more details, a copy of which    *
+ * can be found in the file COPYING included with this package.    *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_disc.h 1.61 2005/04/07 08:46:52EDT sf_support Exp  $
+ */
+
+#define FC_MAX_HOLD_RSCN     32              /* max number of deferred RSCNs */
+#define FC_MAX_NS_RSP        65536    /* max size NameServer rsp */
+#define FC_MAXLOOP           126      /* max devices supported on a fc loop */
+#define LPFC_DISC_FLOGI_TMO  10              /* Discovery FLOGI ratov */
+
+
+/* This is the protocol dependent definition for a Node List Entry.
+ * This is used by Fibre Channel protocol to support FCP.
+ */
+
+/* structure used to queue event to the discovery tasklet */
+struct lpfc_work_evt {
+       struct list_head      evt_listp;
+       void                * evt_arg1;
+       void                * evt_arg2;
+       uint32_t              evt;
+};
+
+#define LPFC_EVT_NODEV_TMO     0x1
+#define LPFC_EVT_ONLINE                0x2
+#define LPFC_EVT_OFFLINE       0x3
+#define LPFC_EVT_ELS_RETRY     0x4
+
+struct lpfc_nodelist {
+       struct list_head nlp_listp;
+       struct lpfc_name nlp_portname;          /* port name */
+       struct lpfc_name nlp_nodename;          /* node name */
+       uint32_t         nlp_flag;              /* entry  flags */
+       uint32_t         nlp_DID;               /* FC D_ID of entry */
+       uint32_t         nlp_last_elscmd;       /* Last ELS cmd sent */
+       uint16_t         nlp_type;
+#define NLP_FC_NODE        0x1                 /* entry is an FC node */
+#define NLP_FABRIC         0x4                 /* entry rep a Fabric entity */
+#define NLP_FCP_TARGET     0x8                 /* entry is an FCP target */
+#define NLP_FCP_INITIATOR  0x10                        /* entry is an FCP Initiator */
+
+       uint16_t        nlp_rpi;
+       uint16_t        nlp_state;              /* state transition indicator */
+       uint16_t        nlp_xri;                /* output exchange id for RPI */
+       uint16_t        nlp_sid;                /* scsi id */
+#define NLP_NO_SID             0xffff
+       uint16_t        nlp_maxframe;           /* Max RCV frame size */
+       uint8_t         nlp_class_sup;          /* Supported Classes */
+       uint8_t         nlp_retry;              /* used for ELS retries */
+       uint8_t         nlp_disc_refcnt;        /* used for DSM */
+       uint8_t         nlp_fcp_info;           /* class info, bits 0-3 */
+#define NLP_FCP_2_DEVICE   0x10                        /* FCP-2 device */
+
+       struct timer_list   nlp_delayfunc;      /* Used for delayed ELS cmds */
+       struct timer_list   nlp_tmofunc;        /* Used for nodev tmo */
+       struct fc_rport *rport;                 /* Corresponding FC transport
+                                                  port structure */
+       struct lpfc_nodelist *nlp_rpi_hash_next;
+       struct lpfc_hba      *nlp_phba;
+       struct lpfc_work_evt nodev_timeout_evt;
+       struct lpfc_work_evt els_retry_evt;
+};
+
+/* Defines for nlp_flag (uint32) */
+#define NLP_NO_LIST        0x0         /* Indicates immediately free node */
+#define NLP_UNUSED_LIST    0x1         /* Flg to indicate node will be freed */
+#define NLP_PLOGI_LIST     0x2         /* Flg to indicate sent PLOGI */
+#define NLP_ADISC_LIST     0x3         /* Flg to indicate sent ADISC */
+#define NLP_REGLOGIN_LIST  0x4         /* Flg to indicate sent REG_LOGIN */
+#define NLP_PRLI_LIST      0x5         /* Flg to indicate sent PRLI */
+#define NLP_UNMAPPED_LIST  0x6         /* Node is now unmapped */
+#define NLP_MAPPED_LIST    0x7         /* Node is now mapped */
+#define NLP_NPR_LIST       0x8         /* Node is in NPort Recovery state */
+#define NLP_JUST_DQ        0x9         /* just deque ndlp in lpfc_nlp_list */
+#define NLP_LIST_MASK      0xf         /* mask to see what list node is on */
+#define NLP_PLOGI_SND      0x20                /* sent PLOGI request for this entry */
+#define NLP_PRLI_SND       0x40                /* sent PRLI request for this entry */
+#define NLP_ADISC_SND      0x80                /* sent ADISC request for this entry */
+#define NLP_LOGO_SND       0x100       /* sent LOGO request for this entry */
+#define NLP_RNID_SND       0x400       /* sent RNID request for this entry */
+#define NLP_ELS_SND_MASK   0x7e0       /* sent ELS request for this entry */
+#define NLP_NODEV_TMO      0x10000     /* nodev timeout is running for node */
+#define NLP_DELAY_TMO      0x20000     /* delay timeout is running for node */
+#define NLP_NPR_2B_DISC    0x40000     /* node is included in num_disc_nodes */
+#define NLP_RCV_PLOGI      0x80000     /* Rcv'ed PLOGI from remote system */
+#define NLP_LOGO_ACC       0x100000    /* Process LOGO after ACC completes */
+#define NLP_TGT_NO_SCSIID  0x200000    /* good PRLI but no binding for scsid */
+#define NLP_ACC_REGLOGIN   0x1000000   /* Issue Reg Login after successful
+                                          ACC */
+#define NLP_NPR_ADISC      0x2000000   /* Issue ADISC when dq'ed from
+                                          NPR list */
+#define NLP_DELAY_REMOVE   0x4000000   /* Defer removal till end of DSM */
+
+/* Defines for list searchs */
+#define NLP_SEARCH_MAPPED    0x1       /* search mapped */
+#define NLP_SEARCH_UNMAPPED  0x2       /* search unmapped */
+#define NLP_SEARCH_PLOGI     0x4       /* search plogi */
+#define NLP_SEARCH_ADISC     0x8       /* search adisc */
+#define NLP_SEARCH_REGLOGIN  0x10      /* search reglogin */
+#define NLP_SEARCH_PRLI      0x20      /* search prli */
+#define NLP_SEARCH_NPR       0x40      /* search npr */
+#define NLP_SEARCH_UNUSED    0x80      /* search mapped */
+#define NLP_SEARCH_ALL       0xff      /* search all lists */
+
+/* There are 4 different double linked lists nodelist entries can reside on.
+ * The Port Login (PLOGI) list and Address Discovery (ADISC) list are used
+ * when Link Up discovery or Registered State Change Notification (RSCN)
+ * processing is needed.  Each list holds the nodes that require a PLOGI or
+ * ADISC Extended Link Service (ELS) request.  These lists keep track of the
+ * nodes affected by an RSCN, or a Link Up (Typically, all nodes are effected
+ * by Link Up) event.  The unmapped_list contains all nodes that have
+ * successfully logged into at the Fibre Channel level.  The
+ * mapped_list will contain all nodes that are mapped FCP targets.
+ *
+ * The bind list is a list of undiscovered (potentially non-existent) nodes
+ * that we have saved binding information on. This information is used when
+ * nodes transition from the unmapped to the mapped list.
+ */
+
+/* Defines for nlp_state */
+#define NLP_STE_UNUSED_NODE       0x0  /* node is just allocated */
+#define NLP_STE_PLOGI_ISSUE       0x1  /* PLOGI was sent to NL_PORT */
+#define NLP_STE_ADISC_ISSUE       0x2  /* ADISC was sent to NL_PORT */
+#define NLP_STE_REG_LOGIN_ISSUE   0x3  /* REG_LOGIN was issued for NL_PORT */
+#define NLP_STE_PRLI_ISSUE        0x4  /* PRLI was sent to NL_PORT */
+#define NLP_STE_UNMAPPED_NODE     0x5  /* PRLI completed from NL_PORT */
+#define NLP_STE_MAPPED_NODE       0x6  /* Identified as a FCP Target */
+#define NLP_STE_NPR_NODE          0x7  /* NPort disappeared */
+#define NLP_STE_MAX_STATE         0x8
+#define NLP_STE_FREED_NODE        0xff /* node entry was freed to MEM_NLP */
+
+/* For UNUSED_NODE state, the node has just been allocated.
+ * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on
+ * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list
+ * and put on the unmapped list. For ADISC processing, the node is taken off
+ * the ADISC list and placed on either the mapped or unmapped list (depending
+ * on its previous state). Once on the unmapped list, a PRLI is issued and the
+ * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is
+ * changed to PRLI_COMPL. If the completion indicates a mapped
+ * node, the node is taken off the unmapped list. The binding list is checked
+ * for a valid binding, or a binding is automatically assigned. If binding
+ * assignment is unsuccessful, the node is left on the unmapped list. If
+ * binding assignment is successful, the associated binding list entry (if
+ * any) is removed, and the node is placed on the mapped list.
+ */
+/*
+ * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
+ * lists will receive a DEVICE_RECOVERY event. If the linkdown or nodev timers
+ * expire, all effected nodes will receive a DEVICE_RM event.
+ */
+/*
+ * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
+ * to either the ADISC or PLOGI list.  After a Nameserver query or ALPA loopmap
+ * check, additional nodes may be added (DEVICE_ADD) or removed (DEVICE_RM) to /
+ * from the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated,
+ * we will first process the ADISC list.  32 entries are processed initially and
+ * ADISC is initited for each one.  Completions / Events for each node are
+ * funnelled thru the state machine.  As each node finishes ADISC processing, it
+ * starts ADISC for any nodes waiting for ADISC processing. If no nodes are
+ * waiting, and the ADISC list count is identically 0, then we are done. For
+ * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we
+ * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI
+ * list.  32 entries are processed initially and PLOGI is initited for each one.
+ * Completions / Events for each node are funnelled thru the state machine.  As
+ * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting
+ * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is
+ * identically 0, then we are done. We have now completed discovery / RSCN
+ * handling. Upon completion, ALL nodes should be on either the mapped or
+ * unmapped lists.
+ */
+
+/* Defines for Node List Entry Events that could happen */
+#define NLP_EVT_RCV_PLOGI         0x0  /* Rcv'd an ELS PLOGI command */
+#define NLP_EVT_RCV_PRLI          0x1  /* Rcv'd an ELS PRLI  command */
+#define NLP_EVT_RCV_LOGO          0x2  /* Rcv'd an ELS LOGO  command */
+#define NLP_EVT_RCV_ADISC         0x3  /* Rcv'd an ELS ADISC command */
+#define NLP_EVT_RCV_PDISC         0x4  /* Rcv'd an ELS PDISC command */
+#define NLP_EVT_RCV_PRLO          0x5  /* Rcv'd an ELS PRLO  command */
+#define NLP_EVT_CMPL_PLOGI        0x6  /* Sent an ELS PLOGI command */
+#define NLP_EVT_CMPL_PRLI         0x7  /* Sent an ELS PRLI  command */
+#define NLP_EVT_CMPL_LOGO         0x8  /* Sent an ELS LOGO  command */
+#define NLP_EVT_CMPL_ADISC        0x9  /* Sent an ELS ADISC command */
+#define NLP_EVT_CMPL_REG_LOGIN    0xa  /* REG_LOGIN mbox cmd completed */
+#define NLP_EVT_DEVICE_RM         0xb  /* Device not found in NS / ALPAmap */
+#define NLP_EVT_DEVICE_RECOVERY   0xc  /* Device existence unknown */
+#define NLP_EVT_MAX_EVENT         0xd
+
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
new file mode 100644 (file)
index 0000000..68d1b77
--- /dev/null
@@ -0,0 +1,3258 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Enterprise Fibre Channel Host Bus Adapters.                     *
+ * Refer to the README file included with this package for         *
+ * driver version and adapter support.                             *
+ * Copyright (C) 2004 Emulex Corporation.                          *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of the GNU General Public License     *
+ * as published by the Free Software Foundation; either version 2  *
+ * of the License, or (at your option) any later version.          *
+ *                                                                 *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of  *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the   *
+ * GNU General Public License for more details, a copy of which    *
+ * can be found in the file COPYING included with this package.    *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_els.c 1.186 2005/04/13 14:26:55EDT sf_support Exp  $
+ */
+
+#include <linux/blkdev.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+
+static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
+                         struct lpfc_iocbq *);
+static int lpfc_max_els_tries = 3;
+
+static int
+lpfc_els_chk_latt(struct lpfc_hba * phba)
+{
+       struct lpfc_sli *psli;
+       LPFC_MBOXQ_t *mbox;
+       uint32_t ha_copy;
+       int rc;
+
+       psli = &phba->sli;
+
+       if ((phba->hba_state >= LPFC_HBA_READY) ||
+           (phba->hba_state == LPFC_LINK_DOWN))
+               return 0;
+
+       /* Read the HBA Host Attention Register */
+       spin_lock_irq(phba->host->host_lock);
+       ha_copy = readl(phba->HAregaddr);
+       spin_unlock_irq(phba->host->host_lock);
+
+       if (!(ha_copy & HA_LATT))
+               return 0;
+
+       /* Pending Link Event during Discovery */
+       lpfc_printf_log(phba, KERN_WARNING, LOG_DISCOVERY,
+                       "%d:0237 Pending Link Event during "
+                       "Discovery: State x%x\n",
+                       phba->brd_no, phba->hba_state);
+
+       /* CLEAR_LA should re-enable link attention events and
+        * we should then imediately take a LATT event. The
+        * LATT processing should call lpfc_linkdown() which
+        * will cleanup any left over in-progress discovery
+        * events.
+        */
+       spin_lock_irq(phba->host->host_lock);
+       phba->fc_flag |= FC_ABORT_DISCOVERY;
+       spin_unlock_irq(phba->host->host_lock);
+
+       if (phba->hba_state != LPFC_CLEAR_LA) {
+               if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
+                       phba->hba_state = LPFC_CLEAR_LA;
+                       lpfc_clear_la(phba, mbox);
+                       mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
+                       rc = lpfc_sli_issue_mbox (phba, mbox,
+                                                 (MBX_NOWAIT | MBX_STOP_IOCB));
+                       if (rc == MBX_NOT_FINISHED) {
+                               mempool_free(mbox, phba->mbox_mem_pool);
+                               phba->hba_state = LPFC_HBA_ERROR;
+                       }
+               }
+       }
+
+       return (1);
+
+}
+
+static struct lpfc_iocbq *
+lpfc_prep_els_iocb(struct lpfc_hba * phba,
+                  uint8_t expectRsp,
+                  uint16_t cmdSize,
+                  uint8_t retry, struct lpfc_nodelist * ndlp, uint32_t elscmd)
+{
+       struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
+       struct lpfc_sli_ring *pring;
+       struct lpfc_iocbq *elsiocb = NULL;
+       struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
+       struct ulp_bde64 *bpl;
+       IOCB_t *icmd;
+
+       pring = &phba->sli.ring[LPFC_ELS_RING];
+
+       if (phba->hba_state < LPFC_LINK_UP)
+               return  NULL;
+
+
+       /* Allocate buffer for  command iocb */
+       spin_lock_irq(phba->host->host_lock);
+       list_remove_head(lpfc_iocb_list, elsiocb, struct lpfc_iocbq, list);
+       spin_unlock_irq(phba->host->host_lock);
+
+       if (elsiocb == NULL)
+               return NULL;
+       memset(elsiocb, 0, sizeof (struct lpfc_iocbq));
+       icmd = &elsiocb->iocb;
+
+       /* fill in BDEs for command */
+       /* Allocate buffer for command payload */
+       if (((pcmd = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL)) == 0) ||
+           ((pcmd->virt = lpfc_mbuf_alloc(phba,
+                                          MEM_PRI, &(pcmd->phys))) == 0)) {
+               if (pcmd)
+                       kfree(pcmd);
+
+               list_add_tail(&elsiocb->list, lpfc_iocb_list);
+               return NULL;
+       }
+
+       INIT_LIST_HEAD(&pcmd->list);
+
+       /* Allocate buffer for response payload */
+       if (expectRsp) {
+               prsp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+               if (prsp)
+                       prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
+                                                    &prsp->phys);
+               if (prsp == 0 || prsp->virt == 0) {
+                       if (prsp)
+                               kfree(prsp);
+                       lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
+                       kfree(pcmd);
+                       list_add_tail(&elsiocb->list, lpfc_iocb_list);
+                       return NULL;
+               }
+               INIT_LIST_HEAD(&prsp->list);
+       } else {
+               prsp = NULL;
+       }
+
+       /* Allocate buffer for Buffer ptr list */
+       pbuflist = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+       if (pbuflist)
+           pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
+                                            &pbuflist->phys);
+       if (pbuflist == 0 || pbuflist->virt == 0) {
+               list_add_tail(&elsiocb->list, lpfc_iocb_list);
+               lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
+               lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
+               kfree(pcmd);
+               kfree(prsp);
+               if (pbuflist)
+                       kfree(pbuflist);
+               return NULL;
+       }
+
+       INIT_LIST_HEAD(&pbuflist->list);
+
+       icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
+       icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
+       icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
+       if (expectRsp) {
+               icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
+               icmd->un.elsreq64.remoteID = ndlp->nlp_DID;     /* DID */
+               icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
+       } else {
+               icmd->un.elsreq64.bdl.bdeSize = sizeof (struct ulp_bde64);
+               icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
+       }
+
+       icmd->ulpBdeCount = 1;
+       icmd->ulpLe = 1;
+       icmd->ulpClass = CLASS3;
+
+       bpl = (struct ulp_bde64 *) pbuflist->virt;
+       bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
+       bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
+       bpl->tus.f.bdeSize = cmdSize;
+       bpl->tus.f.bdeFlags = 0;
+       bpl->tus.w = le32_to_cpu(bpl->tus.w);
+
+       if (expectRsp) {
+               bpl++;
+               bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
+               bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
+               bpl->tus.f.bdeSize = FCELSSIZE;
+               bpl->tus.f.bdeFlags = BUFF_USE_RCV;
+               bpl->tus.w = le32_to_cpu(bpl->tus.w);
+       }
+
+       /* Save for completion so we can release these resources */
+       elsiocb->context1 = (uint8_t *) ndlp;
+       elsiocb->context2 = (uint8_t *) pcmd;
+       elsiocb->context3 = (uint8_t *) pbuflist;
+       elsiocb->retry = retry;
+       elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
+
+       if (prsp) {
+               list_add(&prsp->list, &pcmd->list);
+       }
+
+       if (expectRsp) {
+               /* Xmit ELS command <elsCmd> to remote NPORT <did> */
+               lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+                               "%d:0116 Xmit ELS command x%x to remote "
+                               "NPORT x%x Data: x%x x%x\n",
+                               phba->brd_no, elscmd,
+                               ndlp->nlp_DID, icmd->ulpIoTag, phba->hba_state);
+       } else {
+               /* Xmit ELS response <elsCmd> to remote NPORT <did> */
+               lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+                               "%d:0117 Xmit ELS response x%x to remote "
+                               "NPORT x%x Data: x%x x%x\n",
+                               phba->brd_no, elscmd,
+                               ndlp->nlp_DID, icmd->ulpIoTag, cmdSize);
+       }
+
+       return (elsiocb);
+}
+
+
+static int
+lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+               struct serv_parm *sp, IOCB_t *irsp)
+{
+       LPFC_MBOXQ_t *mbox;
+       int rc;
+
+       spin_lock_irq(phba->host->host_lock);
+       phba->fc_flag |= FC_FABRIC;
+       spin_unlock_irq(phba->host->host_lock);
+
+       phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
+       if (sp->cmn.edtovResolution)    /* E_D_TOV ticks are in nanoseconds */
+               phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
+
+       phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
+
+       if (phba->fc_topology == TOPOLOGY_LOOP) {
+               spin_lock_irq(phba->host->host_lock);
+               phba->fc_flag |= FC_PUBLIC_LOOP;
+               spin_unlock_irq(phba->host->host_lock);
+       } else {
+               /*
+                * If we are a N-port connected to a Fabric, fixup sparam's so
+                * logins to devices on remote loops work.
+                */
+               phba->fc_sparam.cmn.altBbCredit = 1;
+       }
+
+       phba->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
+       memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
+       memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
+       ndlp->nlp_class_sup = 0;
+       if (sp->cls1.classValid)
+               ndlp->nlp_class_sup |= FC_COS_CLASS1;
+       if (sp->cls2.classValid)
+               ndlp->nlp_class_sup |= FC_COS_CLASS2;
+       if (sp->cls3.classValid)
+               ndlp->nlp_class_sup |= FC_COS_CLASS3;
+       if (sp->cls4.classValid)
+               ndlp->nlp_class_sup |= FC_COS_CLASS4;
+       ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
+                               sp->cmn.bbRcvSizeLsb;
+       memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
+
+       mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mbox)
+               goto fail;
+
+       phba->hba_state = LPFC_FABRIC_CFG_LINK;
+       lpfc_config_link(phba, mbox);
+       mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+
+       rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);
+       if (rc == MBX_NOT_FINISHED)
+               goto fail_free_mbox;
+
+       mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mbox)
+               goto fail;
+
+       if (lpfc_reg_login(phba, Fabric_DID, (uint8_t *) sp, mbox, 0))
+               goto fail_free_mbox;
+
+       /*
+        * set_slim mailbox command needs to execute first,
+        * queue this command to be processed later.
+        */
+       mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
+       mbox->context2 = ndlp;
+
+       rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);
+       if (rc == MBX_NOT_FINISHED)
+               goto fail_free_mbox;
+
+       return 0;
+
+ fail_free_mbox:
+       mempool_free(mbox, phba->mbox_mem_pool);
+ fail:
+       return -ENXIO;
+}
+
+/*
+ * We FLOGIed into an NPort, initiate pt2pt protocol
+ */
+static int
+lpfc_cmpl_els_flogi_nport(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+               struct serv_parm *sp)
+{
+       LPFC_MBOXQ_t *mbox;
+       int rc;
+
+       spin_lock_irq(phba->host->host_lock);
+       phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+       spin_unlock_irq(phba->host->host_lock);
+
+       phba->fc_edtov = FF_DEF_EDTOV;
+       phba->fc_ratov = FF_DEF_RATOV;
+       rc = memcmp(&phba->fc_portname, &sp->portName,
+                       sizeof(struct lpfc_name));
+       if (rc >= 0) {
+               /* This side will initiate the PLOGI */
+               spin_lock_irq(phba->host->host_lock);
+               phba->fc_flag |= FC_PT2PT_PLOGI;
+               spin_unlock_irq(phba->host->host_lock);
+
+               /*
+                * N_Port ID cannot be 0, set our to LocalID the other
+                * side will be RemoteID.
+                */
+
+               /* not equal */
+               if (rc)
+                       phba->fc_myDID = PT2PT_LocalID;
+
+               mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+               if (!mbox)
+                       goto fail;
+
+               lpfc_config_link(phba, mbox);
+
+               mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+               rc = lpfc_sli_issue_mbox(phba, mbox,
+                               MBX_NOWAIT | MBX_STOP_IOCB);
+               if (rc == MBX_NOT_FINISHED) {
+                       mempool_free(mbox, phba->mbox_mem_pool);
+                       goto fail;
+               }
+               mempool_free(ndlp, phba->nlp_mem_pool);
+
+               ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, PT2PT_RemoteID);
+               if (!ndlp) {
+                       /*
+                        * Cannot find existing Fabric ndlp, so allocate a
+                        * new one
+                        */
+                       ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+                       if (!ndlp)
+                               goto fail;
+
+                       lpfc_nlp_init(phba, ndlp, PT2PT_RemoteID);
+               }
+
+               memcpy(&ndlp->nlp_portname, &sp->portName,
+                               sizeof(struct lpfc_name));
+               memcpy(&ndlp->nlp_nodename, &sp->nodeName,
+                               sizeof(struct lpfc_name));
+               ndlp->nlp_state = NLP_STE_NPR_NODE;
+               lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+               ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+       } else {
+               /* This side will wait for the PLOGI */
+               mempool_free( ndlp, phba->nlp_mem_pool);
+       }
+
+       spin_lock_irq(phba->host->host_lock);
+       phba->fc_flag |= FC_PT2PT;
+       spin_unlock_irq(phba->host->host_lock);
+
+       /* Start discovery - this should just do CLEAR_LA */
+       lpfc_disc_start(phba);
+       return 0;
+ fail:
+       return -ENXIO;
+}
+
+static void
+lpfc_cmpl_els_flogi(struct lpfc_hba * phba,
+                   struct lpfc_iocbq * cmdiocb, struct lpfc_iocbq * rspiocb)
+{
+       IOCB_t *irsp = &rspiocb->iocb;
+       struct lpfc_nodelist *ndlp = cmdiocb->context1;
+       struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
+       struct serv_parm *sp;
+       int rc;
+
+       /* Check to see if link went down during discovery */
+       if (lpfc_els_chk_latt(phba)) {
+               lpfc_nlp_remove(phba, ndlp);
+               goto out;
+       }
+
+       if (irsp->ulpStatus) {
+               /* Check for retry */
+               if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
+                       /* ELS command is being retried */
+                       goto out;
+               }
+               /* FLOGI failed, so there is no fabric */
+               spin_lock_irq(phba->host->host_lock);
+               phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+               spin_unlock_irq(phba->host->host_lock);
+
+               /* If private loop, then allow max outstandting els to be
+                * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
+                * alpa map would take too long otherwise.
+                */
+               if (phba->alpa_map[0] == 0) {
+                       phba->cfg_discovery_threads =
+                           LPFC_MAX_DISC_THREADS;
+               }
+
+               /* FLOGI failure */
+               lpfc_printf_log(phba,
+                               KERN_INFO,
+                               LOG_ELS,
+                               "%d:0100 FLOGI failure Data: x%x x%x\n",
+                               phba->brd_no,
+                               irsp->ulpStatus, irsp->un.ulpWord[4]);
+               goto flogifail;
+       }
+
+       /*
+        * The FLogI succeeded.  Sync the data for the CPU before
+        * accessing it.
+        */
+       prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
+
+       sp = prsp->virt + sizeof(uint32_t);
+
+       /* FLOGI completes successfully */
+       lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+                       "%d:0101 FLOGI completes sucessfully "
+                       "Data: x%x x%x x%x x%x\n",
+                       phba->brd_no,
+                       irsp->un.ulpWord[4], sp->cmn.e_d_tov,
+                       sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution);
+
+       if (phba->hba_state == LPFC_FLOGI) {
+               /*
+                * If Common Service Parameters indicate Nport
+                * we are point to point, if Fport we are Fabric.
+                */
+               if (sp->cmn.fPort)
+                       rc = lpfc_cmpl_els_flogi_fabric(phba, ndlp, sp, irsp);
+               else
+                       rc = lpfc_cmpl_els_flogi_nport(phba, ndlp, sp);
+
+               if (!rc)
+                       goto out;
+       }
+
+flogifail:
+       lpfc_nlp_remove(phba, ndlp);
+
+       if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT ||
+           (irsp->un.ulpWord[4] != IOERR_SLI_ABORTED &&
+            irsp->un.ulpWord[4] != IOERR_SLI_DOWN)) {
+               /* FLOGI failed, so just use loop map to make discovery list */
+               lpfc_disc_list_loopmap(phba);
+
+               /* Start discovery */
+               lpfc_disc_start(phba);
+       }
+
+out:
+       lpfc_els_free_iocb(phba, cmdiocb);
+}
+
+static int
+lpfc_issue_els_flogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+                    uint8_t retry)
+{
+       struct serv_parm *sp;
+       IOCB_t *icmd;
+       struct lpfc_iocbq *elsiocb;
+       struct lpfc_sli_ring *pring;
+       uint8_t *pcmd;
+       uint16_t cmdsize;
+       uint32_t tmo;
+       int rc;
+
+       pring = &phba->sli.ring[LPFC_ELS_RING];
+
+       cmdsize = (sizeof (uint32_t) + sizeof (struct serv_parm));
+       if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
+                                         ndlp, ELS_CMD_FLOGI)) == 0) {
+               return (1);
+       }
+
+       icmd = &elsiocb->iocb;
+       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+       /* For FLOGI request, remainder of payload is service parameters */
+       *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
+       pcmd += sizeof (uint32_t);
+       memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm));
+       sp = (struct serv_parm *) pcmd;
+
+       /* Setup CSPs accordingly for Fabric */
+       sp->cmn.e_d_tov = 0;
+       sp->cmn.w2.r_a_tov = 0;
+       sp->cls1.classValid = 0;
+       sp->cls2.seqDelivery = 1;
+       sp->cls3.seqDelivery = 1;
+       if (sp->cmn.fcphLow < FC_PH3)
+               sp->cmn.fcphLow = FC_PH3;
+       if (sp->cmn.fcphHigh < FC_PH3)
+               sp->cmn.fcphHigh = FC_PH3;
+
+       tmo = phba->fc_ratov;
+       phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
+       lpfc_set_disctmo(phba);
+       phba->fc_ratov = tmo;
+
+       phba->fc_stat.elsXmitFLOGI++;
+       elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
+       spin_lock_irq(phba->host->host_lock);
+       rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+       spin_unlock_irq(phba->host->host_lock);
+       if (rc == IOCB_ERROR) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               return (1);
+       }
+       return (0);
+}
+
+int
+lpfc_els_abort_flogi(struct lpfc_hba * phba)
+{
+       struct lpfc_sli_ring *pring;
+       struct lpfc_iocbq *iocb, *next_iocb;
+       struct lpfc_nodelist *ndlp;
+       IOCB_t *icmd;
+
+       /* Abort outstanding I/O on NPort <nlp_DID> */
+       lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+                       "%d:0201 Abort outstanding I/O on NPort x%x\n",
+                       phba->brd_no, Fabric_DID);
+
+       pring = &phba->sli.ring[LPFC_ELS_RING];
+
+       /*
+        * Check the txcmplq for an iocb that matches the nport the driver is
+        * searching for.
+        */
+       spin_lock_irq(phba->host->host_lock);
+       list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
+               icmd = &iocb->iocb;
+               if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
+                       ndlp = (struct lpfc_nodelist *)(iocb->context1);
+                       if (ndlp && (ndlp->nlp_DID == Fabric_DID)) {
+                               list_del(&iocb->list);
+                               pring->txcmplq_cnt--;
+
+                               if ((icmd->un.elsreq64.bdl.ulpIoTag32)) {
+                                       lpfc_sli_issue_abort_iotag32
+                                               (phba, pring, iocb);
+                               }
+                               if (iocb->iocb_cmpl) {
+                                       icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+                                       icmd->un.ulpWord[4] =
+                                           IOERR_SLI_ABORTED;
+                                       spin_unlock_irq(phba->host->host_lock);
+                                       (iocb->iocb_cmpl) (phba, iocb, iocb);
+                                       spin_lock_irq(phba->host->host_lock);
+                               } else {
+                                       list_add_tail(&iocb->list,
+                                                     &phba->lpfc_iocb_list);
+                               }
+                       }
+               }
+       }
+       spin_unlock_irq(phba->host->host_lock);
+
+       return 0;
+}
+
+int
+lpfc_initial_flogi(struct lpfc_hba * phba)
+{
+       struct lpfc_nodelist *ndlp;
+
+       /* First look for Fabric ndlp on the unmapped list */
+
+       if ((ndlp =
+            lpfc_findnode_did(phba, NLP_SEARCH_UNMAPPED,
+                              Fabric_DID)) == 0) {
+               /* Cannot find existing Fabric ndlp, so allocate a new one */
+               if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL))
+                   == 0) {
+                       return (0);
+               }
+               lpfc_nlp_init(phba, ndlp, Fabric_DID);
+       }
+       else {
+               phba->fc_unmap_cnt--;
+               list_del(&ndlp->nlp_listp);
+               spin_lock_irq(phba->host->host_lock);
+               ndlp->nlp_flag &= ~NLP_LIST_MASK;
+               spin_unlock_irq(phba->host->host_lock);
+       }
+       if (lpfc_issue_els_flogi(phba, ndlp, 0)) {
+               mempool_free( ndlp, phba->nlp_mem_pool);
+       }
+       return (1);
+}
+
+static void
+lpfc_more_plogi(struct lpfc_hba * phba)
+{
+       int sentplogi;
+
+       if (phba->num_disc_nodes)
+               phba->num_disc_nodes--;
+
+       /* Continue discovery with <num_disc_nodes> PLOGIs to go */
+       lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+                       "%d:0232 Continue discovery with %d PLOGIs to go "
+                       "Data: x%x x%x x%x\n",
+                       phba->brd_no, phba->num_disc_nodes, phba->fc_plogi_cnt,
+                       phba->fc_flag, phba->hba_state);
+
+       /* Check to see if there are more PLOGIs to be sent */
+       if (phba->fc_flag & FC_NLP_MORE) {
+               /* go thru NPR list and issue any remaining ELS PLOGIs */
+               sentplogi = lpfc_els_disc_plogi(phba);
+       }
+       return;
+}
+
+static void
+lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+                   struct lpfc_iocbq * rspiocb)
+{
+       IOCB_t *irsp;
+       struct lpfc_sli *psli;
+       struct lpfc_nodelist *ndlp;
+       int disc, rc, did, type;
+
+       psli = &phba->sli;
+
+       /* we pass cmdiocb to state machine which needs rspiocb as well */
+       cmdiocb->context_un.rsp_iocb = rspiocb;
+
+       irsp = &rspiocb->iocb;
+       ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+       spin_lock_irq(phba->host->host_lock);
+       ndlp->nlp_flag &= ~NLP_PLOGI_SND;
+       spin_unlock_irq(phba->host->host_lock);
+
+       /* Since ndlp can be freed in the disc state machine, note if this node
+        * is being used during discovery.
+        */
+       disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
+       rc   = 0;
+
+       /* PLOGI completes to NPort <nlp_DID> */
+       lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+                       "%d:0102 PLOGI completes to NPort x%x "
+                       "Data: x%x x%x x%x x%x\n",
+                       phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
+                       irsp->un.ulpWord[4], disc, phba->num_disc_nodes);
+
+       /* Check to see if link went down during discovery */
+       if (lpfc_els_chk_latt(phba)) {
+               spin_lock_irq(phba->host->host_lock);
+               ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+               spin_unlock_irq(phba->host->host_lock);
+               goto out;
+       }
+
+       /* ndlp could be freed in DSM, save these values now */
+       type = ndlp->nlp_type;
+       did = ndlp->nlp_DID;
+
+       if (irsp->ulpStatus) {
+               /* Check for retry */
+               if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
+                       /* ELS command is being retried */
+                       if (disc) {
+                               spin_lock_irq(phba->host->host_lock);
+                               ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+                               spin_unlock_irq(phba->host->host_lock);
+                       }
+                       goto out;
+               }
+
+               /* PLOGI failed */
+               /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
+               if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+                  ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
+                  (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
+                       disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
+               }
+               else {
+                       rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb,
+                                       NLP_EVT_CMPL_PLOGI);
+               }
+       } else {
+               /* Good status, call state machine */
+               rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb,
+                                       NLP_EVT_CMPL_PLOGI);
+       }
+
+       if (type & NLP_FABRIC) {
+               /* If we cannot login to Nameserver, kick off discovery now */
+               if ((did == NameServer_DID) && (rc == NLP_STE_FREED_NODE)) {
+                       lpfc_disc_start(phba);
+               }
+               goto out;
+       }
+
+       if (disc && phba->num_disc_nodes) {
+               /* Check to see if there are more PLOGIs to be sent */
+               lpfc_more_plogi(phba);
+       }
+
+       if (rc != NLP_STE_FREED_NODE) {
+               spin_lock_irq(phba->host->host_lock);
+               ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+               spin_unlock_irq(phba->host->host_lock);
+       }
+
+       if (phba->num_disc_nodes == 0) {
+               if(disc) {
+                       spin_lock_irq(phba->host->host_lock);
+                       phba->fc_flag &= ~FC_NDISC_ACTIVE;
+                       spin_unlock_irq(phba->host->host_lock);
+               }
+               lpfc_can_disctmo(phba);
+               if (phba->fc_flag & FC_RSCN_MODE) {
+                       /* Check to see if more RSCNs came in while we were
+                        * processing this one.
+                        */
+                       if ((phba->fc_rscn_id_cnt == 0) &&
+                           (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
+                               spin_lock_irq(phba->host->host_lock);
+                               phba->fc_flag &= ~FC_RSCN_MODE;
+                               spin_unlock_irq(phba->host->host_lock);
+                       } else {
+                               lpfc_els_handle_rscn(phba);
+                       }
+               }
+       }
+
+out:
+       lpfc_els_free_iocb(phba, cmdiocb);
+       return;
+}
+
+int
+lpfc_issue_els_plogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+                    uint8_t retry)
+{
+       struct serv_parm *sp;
+       IOCB_t *icmd;
+       struct lpfc_iocbq *elsiocb;
+       struct lpfc_sli_ring *pring;
+       struct lpfc_sli *psli;
+       uint8_t *pcmd;
+       uint16_t cmdsize;
+
+       psli = &phba->sli;
+       pring = &psli->ring[LPFC_ELS_RING];     /* ELS ring */
+
+       cmdsize = (sizeof (uint32_t) + sizeof (struct serv_parm));
+       if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
+                                         ndlp, ELS_CMD_PLOGI)) == 0) {
+               return (1);
+       }
+
+       icmd = &elsiocb->iocb;
+       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+       /* For PLOGI request, remainder of payload is service parameters */
+       *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
+       pcmd += sizeof (uint32_t);
+       memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm));
+       sp = (struct serv_parm *) pcmd;
+
+       if (sp->cmn.fcphLow < FC_PH_4_3)
+               sp->cmn.fcphLow = FC_PH_4_3;
+
+       if (sp->cmn.fcphHigh < FC_PH3)
+               sp->cmn.fcphHigh = FC_PH3;
+
+       phba->fc_stat.elsXmitPLOGI++;
+       elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
+       spin_lock_irq(phba->host->host_lock);
+       ndlp->nlp_flag |= NLP_PLOGI_SND;
+       if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+               ndlp->nlp_flag &= ~NLP_PLOGI_SND;
+               spin_unlock_irq(phba->host->host_lock);
+               lpfc_els_free_iocb(phba, elsiocb);
+               return (1);
+       }
+       spin_unlock_irq(phba->host->host_lock);
+       return (0);
+}
+
+static void
+lpfc_cmpl_els_prli(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+                  struct lpfc_iocbq * rspiocb)
+{
+       IOCB_t *irsp;
+       struct lpfc_sli *psli;
+       struct lpfc_nodelist *ndlp;
+
+       psli = &phba->sli;
+       /* we pass cmdiocb to state machine which needs rspiocb as well */
+       cmdiocb->context_un.rsp_iocb = rspiocb;
+
+       irsp = &(rspiocb->iocb);
+       ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+       spin_lock_irq(phba->host->host_lock);
+       ndlp->nlp_flag &= ~NLP_PRLI_SND;
+       spin_unlock_irq(phba->host->host_lock);
+
+       /* PRLI completes to NPort <nlp_DID> */
+       lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+                       "%d:0103 PRLI completes to NPort x%x "
+                       "Data: x%x x%x x%x\n",
+                       phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
+                       irsp->un.ulpWord[4], phba->num_disc_nodes);
+
+       phba->fc_prli_sent--;
+       /* Check to see if link went down during discovery */
+       if (lpfc_els_chk_latt(phba))
+               goto out;
+
+       if (irsp->ulpStatus) {
+               /* Check for retry */
+               if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
+                       /* ELS command is being retried */
+                       goto out;
+               }
+               /* PRLI failed */
+               /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
+               if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+                  ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
+                  (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
+                       goto out;
+               }
+               else {
+                       lpfc_disc_state_machine(phba, ndlp, cmdiocb,
+                                       NLP_EVT_CMPL_PRLI);
+               }
+       } else {
+               /* Good status, call state machine */
+               lpfc_disc_state_machine(phba, ndlp, cmdiocb, NLP_EVT_CMPL_PRLI);
+       }
+
+out:
+       lpfc_els_free_iocb(phba, cmdiocb);
+       return;
+}
+
+int
+lpfc_issue_els_prli(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+                   uint8_t retry)
+{
+       PRLI *npr;
+       IOCB_t *icmd;
+       struct lpfc_iocbq *elsiocb;
+       struct lpfc_sli_ring *pring;
+       struct lpfc_sli *psli;
+       uint8_t *pcmd;
+       uint16_t cmdsize;
+
+       psli = &phba->sli;
+       pring = &psli->ring[LPFC_ELS_RING];     /* ELS ring */
+
+       cmdsize = (sizeof (uint32_t) + sizeof (PRLI));
+       if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
+                                         ndlp, ELS_CMD_PRLI)) == 0) {
+               return (1);
+       }
+
+       icmd = &elsiocb->iocb;
+       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+       /* For PRLI request, remainder of payload is service parameters */
+       memset(pcmd, 0, (sizeof (PRLI) + sizeof (uint32_t)));
+       *((uint32_t *) (pcmd)) = ELS_CMD_PRLI;
+       pcmd += sizeof (uint32_t);
+
+       /* For PRLI, remainder of payload is PRLI parameter page */
+       npr = (PRLI *) pcmd;
+       /*
+        * If our firmware version is 3.20 or later,
+        * set the following bits for FC-TAPE support.
+        */
+       if (phba->vpd.rev.feaLevelHigh >= 0x02) {
+               npr->ConfmComplAllowed = 1;
+               npr->Retry = 1;
+               npr->TaskRetryIdReq = 1;
+       }
+       npr->estabImagePair = 1;
+       npr->readXferRdyDis = 1;
+
+       /* For FCP support */
+       npr->prliType = PRLI_FCP_TYPE;
+       npr->initiatorFunc = 1;
+
+       phba->fc_stat.elsXmitPRLI++;
+       elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
+       spin_lock_irq(phba->host->host_lock);
+       ndlp->nlp_flag |= NLP_PRLI_SND;
+       if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+               ndlp->nlp_flag &= ~NLP_PRLI_SND;
+               spin_unlock_irq(phba->host->host_lock);
+               lpfc_els_free_iocb(phba, elsiocb);
+               return (1);
+       }
+       spin_unlock_irq(phba->host->host_lock);
+       phba->fc_prli_sent++;
+       return (0);
+}
+
+static void
+lpfc_more_adisc(struct lpfc_hba * phba)
+{
+       int sentadisc;
+
+       if (phba->num_disc_nodes)
+               phba->num_disc_nodes--;
+
+       /* Continue discovery with <num_disc_nodes> ADISCs to go */
+       lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+                       "%d:0210 Continue discovery with %d ADISCs to go "
+                       "Data: x%x x%x x%x\n",
+                       phba->brd_no, phba->num_disc_nodes, phba->fc_adisc_cnt,
+                       phba->fc_flag, phba->hba_state);
+
+       /* Check to see if there are more ADISCs to be sent */
+       if (phba->fc_flag & FC_NLP_MORE) {
+               lpfc_set_disctmo(phba);
+
+               /* go thru NPR list and issue any remaining ELS ADISCs */
+               sentadisc = lpfc_els_disc_adisc(phba);
+       }
+       return;
+}
+
+static void
+lpfc_rscn_disc(struct lpfc_hba * phba)
+{
+       /* RSCN discovery */
+       /* go thru NPR list and issue ELS PLOGIs */
+       if (phba->fc_npr_cnt) {
+               if (lpfc_els_disc_plogi(phba))
+                       return;
+       }
+       if (phba->fc_flag & FC_RSCN_MODE) {
+               /* Check to see if more RSCNs came in while we were
+                * processing this one.
+                */
+               if ((phba->fc_rscn_id_cnt == 0) &&
+                   (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
+                       spin_lock_irq(phba->host->host_lock);
+                       phba->fc_flag &= ~FC_RSCN_MODE;
+                       spin_unlock_irq(phba->host->host_lock);
+               } else {
+                       lpfc_els_handle_rscn(phba);
+               }
+       }
+}
+
+static void
+lpfc_cmpl_els_adisc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+                   struct lpfc_iocbq * rspiocb)
+{
+       IOCB_t *irsp;
+       struct lpfc_sli *psli;
+       struct lpfc_nodelist *ndlp;
+       LPFC_MBOXQ_t *mbox;
+       int disc, rc;
+
+       psli = &phba->sli;
+
+       /* we pass cmdiocb to state machine which needs rspiocb as well */
+       cmdiocb->context_un.rsp_iocb = rspiocb;
+
+       irsp = &(rspiocb->iocb);
+       ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+       spin_lock_irq(phba->host->host_lock);
+       ndlp->nlp_flag &= ~NLP_ADISC_SND;
+       spin_unlock_irq(phba->host->host_lock);
+
+       /* Since ndlp can be freed in the disc state machine, note if this node
+        * is being used during discovery.
+        */
+       disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
+
+       /* ADISC completes to NPort <nlp_DID> */
+       lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+                       "%d:0104 ADISC completes to NPort x%x "
+                       "Data: x%x x%x x%x x%x\n",
+                       phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
+                       irsp->un.ulpWord[4], disc, phba->num_disc_nodes);
+
+       /* Check to see if link went down during discovery */
+       if (lpfc_els_chk_latt(phba)) {
+               spin_lock_irq(phba->host->host_lock);
+               ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+               spin_unlock_irq(phba->host->host_lock);
+               goto out;
+       }
+
+       if (irsp->ulpStatus) {
+               /* Check for retry */
+               if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
+                       /* ELS command is being retried */
+                       if (disc) {
+                               spin_lock_irq(phba->host->host_lock);
+                               ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+                               spin_unlock_irq(phba->host->host_lock);
+                               lpfc_set_disctmo(phba);
+                       }
+                       goto out;
+               }
+               /* ADISC failed */
+               /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
+               if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+                  ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
+                  (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
+                       disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
+               }
+               else {
+                       lpfc_disc_state_machine(phba, ndlp, cmdiocb,
+                                       NLP_EVT_CMPL_ADISC);
+               }
+       } else {
+               /* Good status, call state machine */
+               lpfc_disc_state_machine(phba, ndlp, cmdiocb,
+                                       NLP_EVT_CMPL_ADISC);
+       }
+
+       if (disc && phba->num_disc_nodes) {
+               /* Check to see if there are more ADISCs to be sent */
+               lpfc_more_adisc(phba);
+
+               /* Check to see if we are done with ADISC authentication */
+               if (phba->num_disc_nodes == 0) {
+                       lpfc_can_disctmo(phba);
+                       /* If we get here, there is nothing left to wait for */
+                       if ((phba->hba_state < LPFC_HBA_READY) &&
+                           (phba->hba_state != LPFC_CLEAR_LA)) {
+                               /* Link up discovery */
+                               if ((mbox = mempool_alloc(phba->mbox_mem_pool,
+                                                         GFP_KERNEL))) {
+                                       phba->hba_state = LPFC_CLEAR_LA;
+                                       lpfc_clear_la(phba, mbox);
+                                       mbox->mbox_cmpl =
+                                           lpfc_mbx_cmpl_clear_la;
+                                       rc = lpfc_sli_issue_mbox
+                                               (phba, mbox,
+                                                (MBX_NOWAIT | MBX_STOP_IOCB));
+                                       if (rc == MBX_NOT_FINISHED) {
+                                               mempool_free(mbox,
+                                                    phba->mbox_mem_pool);
+                                               lpfc_disc_flush_list(phba);
+                                               psli->ring[(psli->ip_ring)].
+                                                   flag &=
+                                                   ~LPFC_STOP_IOCB_EVENT;
+                                               psli->ring[(psli->fcp_ring)].
+                                                   flag &=
+                                                   ~LPFC_STOP_IOCB_EVENT;
+                                               psli->ring[(psli->next_ring)].
+                                                   flag &=
+                                                   ~LPFC_STOP_IOCB_EVENT;
+                                               phba->hba_state =
+                                                   LPFC_HBA_READY;
+                                       }
+                               }
+                       } else {
+                               lpfc_rscn_disc(phba);
+                       }
+               }
+       }
+       spin_lock_irq(phba->host->host_lock);
+       ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+       spin_unlock_irq(phba->host->host_lock);
+out:
+       lpfc_els_free_iocb(phba, cmdiocb);
+       return;
+}
+
+int
+lpfc_issue_els_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+                    uint8_t retry)
+{
+       ADISC *ap;
+       IOCB_t *icmd;
+       struct lpfc_iocbq *elsiocb;
+       struct lpfc_sli_ring *pring;
+       struct lpfc_sli *psli;
+       uint8_t *pcmd;
+       uint16_t cmdsize;
+
+       psli = &phba->sli;
+       pring = &psli->ring[LPFC_ELS_RING];     /* ELS ring */
+
+       cmdsize = (sizeof (uint32_t) + sizeof (ADISC));
+       if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
+                                         ndlp, ELS_CMD_ADISC)) == 0) {
+               return (1);
+       }
+
+       icmd = &elsiocb->iocb;
+       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+       /* For ADISC request, remainder of payload is service parameters */
+       *((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
+       pcmd += sizeof (uint32_t);
+
+       /* Fill in ADISC payload */
+       ap = (ADISC *) pcmd;
+       ap->hardAL_PA = phba->fc_pref_ALPA;
+       memcpy(&ap->portName, &phba->fc_portname, sizeof (struct lpfc_name));
+       memcpy(&ap->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
+       ap->DID = be32_to_cpu(phba->fc_myDID);
+
+       phba->fc_stat.elsXmitADISC++;
+       elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
+       spin_lock_irq(phba->host->host_lock);
+       ndlp->nlp_flag |= NLP_ADISC_SND;
+       if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+               ndlp->nlp_flag &= ~NLP_ADISC_SND;
+               spin_unlock_irq(phba->host->host_lock);
+               lpfc_els_free_iocb(phba, elsiocb);
+               return (1);
+       }
+       spin_unlock_irq(phba->host->host_lock);
+       return (0);
+}
+
+static void
+lpfc_cmpl_els_logo(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+                  struct lpfc_iocbq * rspiocb)
+{
+       IOCB_t *irsp;
+       struct lpfc_sli *psli;
+       struct lpfc_nodelist *ndlp;
+
+       psli = &phba->sli;
+       /* we pass cmdiocb to state machine which needs rspiocb as well */
+       cmdiocb->context_un.rsp_iocb = rspiocb;
+
+       irsp = &(rspiocb->iocb);
+       ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+       spin_lock_irq(phba->host->host_lock);
+       ndlp->nlp_flag &= ~NLP_LOGO_SND;
+       spin_unlock_irq(phba->host->host_lock);
+
+       /* LOGO completes to NPort <nlp_DID> */
+       lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+                       "%d:0105 LOGO completes to NPort x%x "
+                       "Data: x%x x%x x%x\n",
+                       phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
+                       irsp->un.ulpWord[4], phba->num_disc_nodes);
+
+       /* Check to see if link went down during discovery */
+       if (lpfc_els_chk_latt(phba))
+               goto out;
+
+       if (irsp->ulpStatus) {
+               /* Check for retry */
+               if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
+                       /* ELS command is being retried */
+                       goto out;
+               }
+               /* LOGO failed */
+               /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
+               if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+                  ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
+                  (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
+                       goto out;
+               }
+               else {
+                       lpfc_disc_state_machine(phba, ndlp, cmdiocb,
+                                       NLP_EVT_CMPL_LOGO);
+               }
+       } else {
+               /* Good status, call state machine */
+               lpfc_disc_state_machine(phba, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO);
+
+               if (ndlp->nlp_flag & NLP_DELAY_TMO) {
+                       lpfc_unreg_rpi(phba, ndlp);
+               }
+       }
+
+out:
+       lpfc_els_free_iocb(phba, cmdiocb);
+       return;
+}
+
+int
+lpfc_issue_els_logo(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+                   uint8_t retry)
+{
+       IOCB_t *icmd;
+       struct lpfc_iocbq *elsiocb;
+       struct lpfc_sli_ring *pring;
+       struct lpfc_sli *psli;
+       uint8_t *pcmd;
+       uint16_t cmdsize;
+
+       psli = &phba->sli;
+       pring = &psli->ring[LPFC_ELS_RING];
+
+       cmdsize = 2 * (sizeof (uint32_t) + sizeof (struct lpfc_name));
+       if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
+                                         ndlp, ELS_CMD_LOGO)) == 0) {
+               return (1);
+       }
+
+       icmd = &elsiocb->iocb;
+       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+       *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
+       pcmd += sizeof (uint32_t);
+
+       /* Fill in LOGO payload */
+       *((uint32_t *) (pcmd)) = be32_to_cpu(phba->fc_myDID);
+       pcmd += sizeof (uint32_t);
+       memcpy(pcmd, &phba->fc_portname, sizeof (struct lpfc_name));
+
+       phba->fc_stat.elsXmitLOGO++;
+       elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
+       spin_lock_irq(phba->host->host_lock);
+       ndlp->nlp_flag |= NLP_LOGO_SND;
+       if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+               ndlp->nlp_flag &= ~NLP_LOGO_SND;
+               spin_unlock_irq(phba->host->host_lock);
+               lpfc_els_free_iocb(phba, elsiocb);
+               return (1);
+       }
+       spin_unlock_irq(phba->host->host_lock);
+       return (0);
+}
+
+static void
+lpfc_cmpl_els_cmd(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+                 struct lpfc_iocbq * rspiocb)
+{
+       IOCB_t *irsp;
+
+       irsp = &rspiocb->iocb;
+
+       /* ELS cmd tag <ulpIoTag> completes */
+       lpfc_printf_log(phba,
+                       KERN_INFO,
+                       LOG_ELS,
+                       "%d:0106 ELS cmd tag x%x completes Data: x%x x%x\n",
+                       phba->brd_no,
+                       irsp->ulpIoTag, irsp->ulpStatus, irsp->un.ulpWord[4]);
+
+       /* Check to see if link went down during discovery */
+       lpfc_els_chk_latt(phba);
+       lpfc_els_free_iocb(phba, cmdiocb);
+       return;
+}
+
+int
+lpfc_issue_els_scr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
+{
+       IOCB_t *icmd;
+       struct lpfc_iocbq *elsiocb;
+       struct lpfc_sli_ring *pring;
+       struct lpfc_sli *psli;
+       uint8_t *pcmd;
+       uint16_t cmdsize;
+       struct lpfc_nodelist *ndlp;
+
+       psli = &phba->sli;
+       pring = &psli->ring[LPFC_ELS_RING];     /* ELS ring */
+       cmdsize = (sizeof (uint32_t) + sizeof (SCR));
+       if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL)) == 0) {
+               return (1);
+       }
+
+       lpfc_nlp_init(phba, ndlp, nportid);
+
+       if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
+                                         ndlp, ELS_CMD_SCR)) == 0) {
+               mempool_free( ndlp, phba->nlp_mem_pool);
+               return (1);
+       }
+
+       icmd = &elsiocb->iocb;
+       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+       *((uint32_t *) (pcmd)) = ELS_CMD_SCR;
+       pcmd += sizeof (uint32_t);
+
+       /* For SCR, remainder of payload is SCR parameter page */
+       memset(pcmd, 0, sizeof (SCR));
+       ((SCR *) pcmd)->Function = SCR_FUNC_FULL;
+
+       phba->fc_stat.elsXmitSCR++;
+       elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
+       spin_lock_irq(phba->host->host_lock);
+       if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+               spin_unlock_irq(phba->host->host_lock);
+               mempool_free( ndlp, phba->nlp_mem_pool);
+               lpfc_els_free_iocb(phba, elsiocb);
+               return (1);
+       }
+       spin_unlock_irq(phba->host->host_lock);
+       mempool_free( ndlp, phba->nlp_mem_pool);
+       return (0);
+}
+
+static int
+lpfc_issue_els_farpr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
+{
+       IOCB_t *icmd;
+       struct lpfc_iocbq *elsiocb;
+       struct lpfc_sli_ring *pring;
+       struct lpfc_sli *psli;
+       FARP *fp;
+       uint8_t *pcmd;
+       uint32_t *lp;
+       uint16_t cmdsize;
+       struct lpfc_nodelist *ondlp;
+       struct lpfc_nodelist *ndlp;
+
+       psli = &phba->sli;
+       pring = &psli->ring[LPFC_ELS_RING];     /* ELS ring */
+       cmdsize = (sizeof (uint32_t) + sizeof (FARP));
+       if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL)) == 0) {
+               return (1);
+       }
+       lpfc_nlp_init(phba, ndlp, nportid);
+
+       if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
+                                         ndlp, ELS_CMD_RNID)) == 0) {
+               mempool_free( ndlp, phba->nlp_mem_pool);
+               return (1);
+       }
+
+       icmd = &elsiocb->iocb;
+       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+       *((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
+       pcmd += sizeof (uint32_t);
+
+       /* Fill in FARPR payload */
+       fp = (FARP *) (pcmd);
+       memset(fp, 0, sizeof (FARP));
+       lp = (uint32_t *) pcmd;
+       *lp++ = be32_to_cpu(nportid);
+       *lp++ = be32_to_cpu(phba->fc_myDID);
+       fp->Rflags = 0;
+       fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
+
+       memcpy(&fp->RportName, &phba->fc_portname, sizeof (struct lpfc_name));
+       memcpy(&fp->RnodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
+       if ((ondlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, nportid))) {
+               memcpy(&fp->OportName, &ondlp->nlp_portname,
+                      sizeof (struct lpfc_name));
+               memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
+                      sizeof (struct lpfc_name));
+       }
+
+       phba->fc_stat.elsXmitFARPR++;
+       elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
+       spin_lock_irq(phba->host->host_lock);
+       if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+               spin_unlock_irq(phba->host->host_lock);
+               mempool_free( ndlp, phba->nlp_mem_pool);
+               lpfc_els_free_iocb(phba, elsiocb);
+               return (1);
+       }
+       spin_unlock_irq(phba->host->host_lock);
+       mempool_free( ndlp, phba->nlp_mem_pool);
+       return (0);
+}
+
+void
+lpfc_els_retry_delay(unsigned long ptr)
+{
+       struct lpfc_nodelist *ndlp;
+       struct lpfc_hba *phba;
+       unsigned long iflag;
+       struct lpfc_work_evt  *evtp;
+
+       ndlp = (struct lpfc_nodelist *)ptr;
+       phba = ndlp->nlp_phba;
+       evtp = &ndlp->els_retry_evt;
+
+       spin_lock_irqsave(phba->host->host_lock, iflag);
+       if (!list_empty(&evtp->evt_listp)) {
+               spin_unlock_irqrestore(phba->host->host_lock, iflag);
+               return;
+       }
+
+       evtp->evt_arg1  = ndlp;
+       evtp->evt       = LPFC_EVT_ELS_RETRY;
+       list_add_tail(&evtp->evt_listp, &phba->work_list);
+       if (phba->work_wait)
+               wake_up(phba->work_wait);
+
+       spin_unlock_irqrestore(phba->host->host_lock, iflag);
+       return;
+}
+
+void
+lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
+{
+       struct lpfc_hba *phba;
+       uint32_t cmd;
+       uint32_t did;
+       uint8_t retry;
+
+       phba = ndlp->nlp_phba;
+       spin_lock_irq(phba->host->host_lock);
+       did = (uint32_t) (ndlp->nlp_DID);
+       cmd = (uint32_t) (ndlp->nlp_last_elscmd);
+
+       if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
+               spin_unlock_irq(phba->host->host_lock);
+               return;
+       }
+
+       ndlp->nlp_flag &= ~NLP_DELAY_TMO;
+       spin_unlock_irq(phba->host->host_lock);
+       retry = ndlp->nlp_retry;
+
+       switch (cmd) {
+       case ELS_CMD_FLOGI:
+               lpfc_issue_els_flogi(phba, ndlp, retry);
+               break;
+       case ELS_CMD_PLOGI:
+               ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
+               lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
+               lpfc_issue_els_plogi(phba, ndlp, retry);
+               break;
+       case ELS_CMD_ADISC:
+               ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
+               lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST);
+               lpfc_issue_els_adisc(phba, ndlp, retry);
+               break;
+       case ELS_CMD_PRLI:
+               ndlp->nlp_state = NLP_STE_PRLI_ISSUE;
+               lpfc_nlp_list(phba, ndlp, NLP_PRLI_LIST);
+               lpfc_issue_els_prli(phba, ndlp, retry);
+               break;
+       case ELS_CMD_LOGO:
+               ndlp->nlp_state = NLP_STE_NPR_NODE;
+               lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+               lpfc_issue_els_logo(phba, ndlp, retry);
+               break;
+       }
+       return;
+}
+
+static int
+lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+              struct lpfc_iocbq * rspiocb)
+{
+       IOCB_t *irsp;
+       struct lpfc_dmabuf *pcmd;
+       struct lpfc_nodelist *ndlp;
+       uint32_t *elscmd;
+       struct ls_rjt stat;
+       int retry, maxretry;
+       int delay;
+       uint32_t cmd;
+
+       retry = 0;
+       delay = 0;
+       maxretry = lpfc_max_els_tries;
+       irsp = &rspiocb->iocb;
+       ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+       pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+       cmd = 0;
+       /* Note: context2 may be 0 for internal driver abort
+        * of delays ELS command.
+        */
+
+       if (pcmd && pcmd->virt) {
+               elscmd = (uint32_t *) (pcmd->virt);
+               cmd = *elscmd++;
+       }
+
+       switch (irsp->ulpStatus) {
+       case IOSTAT_FCP_RSP_ERROR:
+       case IOSTAT_REMOTE_STOP:
+               break;
+
+       case IOSTAT_LOCAL_REJECT:
+               switch ((irsp->un.ulpWord[4] & 0xff)) {
+               case IOERR_LOOP_OPEN_FAILURE:
+                       if (cmd == ELS_CMD_PLOGI) {
+                               if (cmdiocb->retry == 0) {
+                                       delay = 1;
+                               }
+                       }
+                       retry = 1;
+                       break;
+
+               case IOERR_SEQUENCE_TIMEOUT:
+                       retry = 1;
+                       if ((cmd == ELS_CMD_FLOGI)
+                           && (phba->fc_topology != TOPOLOGY_LOOP)) {
+                               delay = 1;
+                               maxretry = 48;
+                       }
+                       break;
+
+               case IOERR_NO_RESOURCES:
+                       if (cmd == ELS_CMD_PLOGI) {
+                               delay = 1;
+                       }
+                       retry = 1;
+                       break;
+
+               case IOERR_INVALID_RPI:
+                       retry = 1;
+                       break;
+               }
+               break;
+
+       case IOSTAT_NPORT_RJT:
+       case IOSTAT_FABRIC_RJT:
+               if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
+                       retry = 1;
+                       break;
+               }
+               break;
+
+       case IOSTAT_NPORT_BSY:
+       case IOSTAT_FABRIC_BSY:
+               retry = 1;
+               break;
+
+       case IOSTAT_LS_RJT:
+               stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]);
+               /* Added for Vendor specifc support
+                * Just keep retrying for these Rsn / Exp codes
+                */
+               switch (stat.un.b.lsRjtRsnCode) {
+               case LSRJT_UNABLE_TPC:
+                       if (stat.un.b.lsRjtRsnCodeExp ==
+                           LSEXP_CMD_IN_PROGRESS) {
+                               if (cmd == ELS_CMD_PLOGI) {
+                                       delay = 1;
+                                       maxretry = 48;
+                               }
+                               retry = 1;
+                               break;
+                       }
+                       if (cmd == ELS_CMD_PLOGI) {
+                               delay = 1;
+                               maxretry = lpfc_max_els_tries + 1;
+                               retry = 1;
+                               break;
+                       }
+                       break;
+
+               case LSRJT_LOGICAL_BSY:
+                       if (cmd == ELS_CMD_PLOGI) {
+                               delay = 1;
+                               maxretry = 48;
+                       }
+                       retry = 1;
+                       break;
+               }
+               break;
+
+       case IOSTAT_INTERMED_RSP:
+       case IOSTAT_BA_RJT:
+               break;
+
+       default:
+               break;
+       }
+
+       if (ndlp->nlp_DID == FDMI_DID) {
+               retry = 1;
+       }
+
+       if ((++cmdiocb->retry) >= maxretry) {
+               phba->fc_stat.elsRetryExceeded++;
+               retry = 0;
+       }
+
+       if (retry) {
+
+               /* Retry ELS command <elsCmd> to remote NPORT <did> */
+               lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+                               "%d:0107 Retry ELS command x%x to remote "
+                               "NPORT x%x Data: x%x x%x\n",
+                               phba->brd_no,
+                               cmd, ndlp->nlp_DID, cmdiocb->retry, delay);
+
+               if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) {
+                       /* If discovery / RSCN timer is running, reset it */
+                       if (timer_pending(&phba->fc_disctmo) ||
+                             (phba->fc_flag & FC_RSCN_MODE)) {
+                               lpfc_set_disctmo(phba);
+                       }
+               }
+
+               phba->fc_stat.elsXmitRetry++;
+               if (delay) {
+                       phba->fc_stat.elsDelayRetry++;
+                       ndlp->nlp_retry = cmdiocb->retry;
+
+                       mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+                       ndlp->nlp_flag |= NLP_DELAY_TMO;
+
+                       ndlp->nlp_state = NLP_STE_NPR_NODE;
+                       lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+                       ndlp->nlp_last_elscmd = cmd;
+
+                       return (1);
+               }
+               switch (cmd) {
+               case ELS_CMD_FLOGI:
+                       lpfc_issue_els_flogi(phba, ndlp, cmdiocb->retry);
+                       return (1);
+               case ELS_CMD_PLOGI:
+                       ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
+                       lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
+                       lpfc_issue_els_plogi(phba, ndlp, cmdiocb->retry);
+                       return (1);
+               case ELS_CMD_ADISC:
+                       ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
+                       lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST);
+                       lpfc_issue_els_adisc(phba, ndlp, cmdiocb->retry);
+                       return (1);
+               case ELS_CMD_PRLI:
+                       ndlp->nlp_state = NLP_STE_PRLI_ISSUE;
+                       lpfc_nlp_list(phba, ndlp, NLP_PRLI_LIST);
+                       lpfc_issue_els_prli(phba, ndlp, cmdiocb->retry);
+                       return (1);
+               case ELS_CMD_LOGO:
+                       ndlp->nlp_state = NLP_STE_NPR_NODE;
+                       lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+                       lpfc_issue_els_logo(phba, ndlp, cmdiocb->retry);
+                       return (1);
+               }
+       }
+
+       /* No retry ELS command <elsCmd> to remote NPORT <did> */
+       lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+                       "%d:0108 No retry ELS command x%x to remote NPORT x%x "
+                       "Data: x%x x%x\n",
+                       phba->brd_no,
+                       cmd, ndlp->nlp_DID, cmdiocb->retry, ndlp->nlp_flag);
+
+       return (0);
+}
+
+int
+lpfc_els_free_iocb(struct lpfc_hba * phba, struct lpfc_iocbq * elsiocb)
+{
+       struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
+
+       /* context2  = cmd,  context2->next = rsp, context3 = bpl */
+       if (elsiocb->context2) {
+               buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
+               /* Free the response before processing the command.  */
+               if (!list_empty(&buf_ptr1->list)) {
+                       list_remove_head(&buf_ptr1->list, buf_ptr,
+                                        struct lpfc_dmabuf,
+                                        list);
+                       lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+                       kfree(buf_ptr);
+               }
+               lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
+               kfree(buf_ptr1);
+       }
+
+       if (elsiocb->context3) {
+               buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
+               lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+               kfree(buf_ptr);
+       }
+       spin_lock_irq(phba->host->host_lock);
+       list_add_tail(&elsiocb->list, &phba->lpfc_iocb_list);
+       spin_unlock_irq(phba->host->host_lock);
+       return 0;
+}
+
+static void
+lpfc_cmpl_els_logo_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+                      struct lpfc_iocbq * rspiocb)
+{
+       struct lpfc_nodelist *ndlp;
+
+       ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+
+       /* ACC to LOGO completes to NPort <nlp_DID> */
+       lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+                       "%d:0109 ACC to LOGO completes to NPort x%x "
+                       "Data: x%x x%x x%x\n",
+                       phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
+                       ndlp->nlp_state, ndlp->nlp_rpi);
+
+       spin_lock_irq(phba->host->host_lock);
+       ndlp->nlp_flag &= ~NLP_LOGO_ACC;
+       spin_unlock_irq(phba->host->host_lock);
+
+       switch (ndlp->nlp_state) {
+       case NLP_STE_UNUSED_NODE:       /* node is just allocated */
+               lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+               break;
+       case NLP_STE_NPR_NODE:          /* NPort Recovery mode */
+               lpfc_unreg_rpi(phba, ndlp);
+               break;
+       default:
+               break;
+       }
+       lpfc_els_free_iocb(phba, cmdiocb);
+       return;
+}
+
+static void
+lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+                 struct lpfc_iocbq * rspiocb)
+{
+       struct lpfc_nodelist *ndlp;
+       LPFC_MBOXQ_t *mbox = NULL;
+
+       ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+       if (cmdiocb->context_un.mbox)
+               mbox = cmdiocb->context_un.mbox;
+
+
+       /* Check to see if link went down during discovery */
+       if ((lpfc_els_chk_latt(phba)) || !ndlp) {
+               if (mbox) {
+                       mempool_free( mbox, phba->mbox_mem_pool);
+               }
+               goto out;
+       }
+
+       /* ELS response tag <ulpIoTag> completes */
+       lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+                       "%d:0110 ELS response tag x%x completes "
+                       "Data: x%x x%x x%x x%x x%x x%x\n",
+                       phba->brd_no,
+                       cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
+                       rspiocb->iocb.un.ulpWord[4], ndlp->nlp_DID,
+                       ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
+
+       if (mbox) {
+               if ((rspiocb->iocb.ulpStatus == 0)
+                   && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
+                       /* set_slim mailbox command needs to execute first,
+                        * queue this command to be processed later.
+                        */
+                       lpfc_unreg_rpi(phba, ndlp);
+                       mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
+                       mbox->context2 = ndlp;
+                       ndlp->nlp_state = NLP_STE_REG_LOGIN_ISSUE;
+                       lpfc_nlp_list(phba, ndlp, NLP_REGLOGIN_LIST);
+                       if (lpfc_sli_issue_mbox(phba, mbox,
+                                               (MBX_NOWAIT | MBX_STOP_IOCB))
+                           != MBX_NOT_FINISHED) {
+                               goto out;
+                       }
+                       /* NOTE: we should have messages for unsuccessful
+                          reglogin */
+                       mempool_free( mbox, phba->mbox_mem_pool);
+               } else {
+                       mempool_free( mbox, phba->mbox_mem_pool);
+                       if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
+                               lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+                       }
+               }
+       }
+out:
+       if (ndlp) {
+               spin_lock_irq(phba->host->host_lock);
+               ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
+               spin_unlock_irq(phba->host->host_lock);
+       }
+       lpfc_els_free_iocb(phba, cmdiocb);
+       return;
+}
+
+int
+lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
+                struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp,
+                LPFC_MBOXQ_t * mbox, uint8_t newnode)
+{
+       IOCB_t *icmd;
+       IOCB_t *oldcmd;
+       struct lpfc_iocbq *elsiocb;
+       struct lpfc_sli_ring *pring;
+       struct lpfc_sli *psli;
+       uint8_t *pcmd;
+       uint16_t cmdsize;
+       int rc;
+
+       psli = &phba->sli;
+       pring = &psli->ring[LPFC_ELS_RING];     /* ELS ring */
+       oldcmd = &oldiocb->iocb;
+
+       switch (flag) {
+       case ELS_CMD_ACC:
+               cmdsize = sizeof (uint32_t);
+               if ((elsiocb =
+                    lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
+                                       ndlp, ELS_CMD_ACC)) == 0) {
+                       return (1);
+               }
+               icmd = &elsiocb->iocb;
+               icmd->ulpContext = oldcmd->ulpContext;  /* Xri */
+               pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+               *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+               pcmd += sizeof (uint32_t);
+               break;
+       case ELS_CMD_PLOGI:
+               cmdsize = (sizeof (struct serv_parm) + sizeof (uint32_t));
+               if ((elsiocb =
+                    lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
+                                       ndlp, ELS_CMD_ACC)) == 0) {
+                       return (1);
+               }
+               icmd = &elsiocb->iocb;
+               icmd->ulpContext = oldcmd->ulpContext;  /* Xri */
+               pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+               if (mbox)
+                       elsiocb->context_un.mbox = mbox;
+
+               *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+               pcmd += sizeof (uint32_t);
+               memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm));
+               break;
+       default:
+               return (1);
+       }
+
+       if (newnode)
+               elsiocb->context1 = NULL;
+
+       /* Xmit ELS ACC response tag <ulpIoTag> */
+       lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+                       "%d:0128 Xmit ELS ACC response tag x%x "
+                       "Data: x%x x%x x%x x%x x%x\n",
+                       phba->brd_no,
+                       elsiocb->iocb.ulpIoTag,
+                       elsiocb->iocb.ulpContext, ndlp->nlp_DID,
+                       ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
+
+       if (ndlp->nlp_flag & NLP_LOGO_ACC) {
+               elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
+       } else {
+               elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
+       }
+
+       phba->fc_stat.elsXmitACC++;
+       spin_lock_irq(phba->host->host_lock);
+       rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+       spin_unlock_irq(phba->host->host_lock);
+       if (rc == IOCB_ERROR) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               return (1);
+       }
+       return (0);
+}
+
+int
+lpfc_els_rsp_reject(struct lpfc_hba * phba, uint32_t rejectError,
+                   struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
+{
+       IOCB_t *icmd;
+       IOCB_t *oldcmd;
+       struct lpfc_iocbq *elsiocb;
+       struct lpfc_sli_ring *pring;
+       struct lpfc_sli *psli;
+       uint8_t *pcmd;
+       uint16_t cmdsize;
+       int rc;
+
+       psli = &phba->sli;
+       pring = &psli->ring[LPFC_ELS_RING];     /* ELS ring */
+
+       cmdsize = 2 * sizeof (uint32_t);
+       if ((elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
+                                         ndlp, ELS_CMD_LS_RJT)) == 0) {
+               return (1);
+       }
+
+       icmd = &elsiocb->iocb;
+       oldcmd = &oldiocb->iocb;
+       icmd->ulpContext = oldcmd->ulpContext;  /* Xri */
+       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+       *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
+       pcmd += sizeof (uint32_t);
+       *((uint32_t *) (pcmd)) = rejectError;
+
+       /* Xmit ELS RJT <err> response tag <ulpIoTag> */
+       lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+                       "%d:0129 Xmit ELS RJT x%x response tag x%x "
+                       "Data: x%x x%x x%x x%x x%x\n",
+                       phba->brd_no,
+                       rejectError, elsiocb->iocb.ulpIoTag,
+                       elsiocb->iocb.ulpContext, ndlp->nlp_DID,
+                       ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
+
+       phba->fc_stat.elsXmitLSRJT++;
+       elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
+       spin_lock_irq(phba->host->host_lock);
+       rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+       spin_unlock_irq(phba->host->host_lock);
+       if (rc == IOCB_ERROR) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               return (1);
+       }
+       return (0);
+}
+
+int
+lpfc_els_rsp_adisc_acc(struct lpfc_hba * phba,
+                      struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
+{
+       ADISC *ap;
+       IOCB_t *icmd;
+       IOCB_t *oldcmd;
+       struct lpfc_iocbq *elsiocb;
+       struct lpfc_sli_ring *pring;
+       struct lpfc_sli *psli;
+       uint8_t *pcmd;
+       uint16_t cmdsize;
+       int rc;
+
+       psli = &phba->sli;
+       pring = &psli->ring[LPFC_ELS_RING];     /* ELS ring */
+
+       cmdsize = sizeof (uint32_t) + sizeof (ADISC);
+       if ((elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
+                                         ndlp, ELS_CMD_ACC)) == 0) {
+               return (1);
+       }
+
+       /* Xmit ADISC ACC response tag <ulpIoTag> */
+       lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+                       "%d:0130 Xmit ADISC ACC response tag x%x "
+                       "Data: x%x x%x x%x x%x x%x\n",
+                       phba->brd_no,
+                       elsiocb->iocb.ulpIoTag,
+                       elsiocb->iocb.ulpContext, ndlp->nlp_DID,
+                       ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
+
+       icmd = &elsiocb->iocb;
+       oldcmd = &oldiocb->iocb;
+       icmd->ulpContext = oldcmd->ulpContext;  /* Xri */
+       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+       *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+       pcmd += sizeof (uint32_t);
+
+       ap = (ADISC *) (pcmd);
+       ap->hardAL_PA = phba->fc_pref_ALPA;
+       memcpy(&ap->portName, &phba->fc_portname, sizeof (struct lpfc_name));
+       memcpy(&ap->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
+       ap->DID = be32_to_cpu(phba->fc_myDID);
+
+       phba->fc_stat.elsXmitACC++;
+       elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
+       spin_lock_irq(phba->host->host_lock);
+       rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+       spin_unlock_irq(phba->host->host_lock);
+       if (rc == IOCB_ERROR) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               return (1);
+       }
+       return (0);
+}
+
+int
+lpfc_els_rsp_prli_acc(struct lpfc_hba * phba,
+                     struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
+{
+       PRLI *npr;
+       lpfc_vpd_t *vpd;
+       IOCB_t *icmd;
+       IOCB_t *oldcmd;
+       struct lpfc_iocbq *elsiocb;
+       struct lpfc_sli_ring *pring;
+       struct lpfc_sli *psli;
+       uint8_t *pcmd;
+       uint16_t cmdsize;
+       int rc;
+
+       psli = &phba->sli;
+       pring = &psli->ring[LPFC_ELS_RING];     /* ELS ring */
+
+       cmdsize = sizeof (uint32_t) + sizeof (PRLI);
+       if ((elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
+                                         ndlp,
+                                         (ELS_CMD_ACC |
+                                          (ELS_CMD_PRLI & ~ELS_RSP_MASK)))) ==
+           0) {
+               return (1);
+       }
+
+       /* Xmit PRLI ACC response tag <ulpIoTag> */
+       lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+                       "%d:0131 Xmit PRLI ACC response tag x%x "
+                       "Data: x%x x%x x%x x%x x%x\n",
+                       phba->brd_no,
+                       elsiocb->iocb.ulpIoTag,
+                       elsiocb->iocb.ulpContext, ndlp->nlp_DID,
+                       ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
+
+       icmd = &elsiocb->iocb;
+       oldcmd = &oldiocb->iocb;
+       icmd->ulpContext = oldcmd->ulpContext;  /* Xri */
+       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+       *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
+       pcmd += sizeof (uint32_t);
+
+       /* For PRLI, remainder of payload is PRLI parameter page */
+       memset(pcmd, 0, sizeof (PRLI));
+
+       npr = (PRLI *) pcmd;
+       vpd = &phba->vpd;
+       /*
+        * If our firmware version is 3.20 or later,
+        * set the following bits for FC-TAPE support.
+        */
+       if (vpd->rev.feaLevelHigh >= 0x02) {
+               npr->ConfmComplAllowed = 1;
+               npr->Retry = 1;
+               npr->TaskRetryIdReq = 1;
+       }
+
+       npr->acceptRspCode = PRLI_REQ_EXECUTED;
+       npr->estabImagePair = 1;
+       npr->readXferRdyDis = 1;
+       npr->ConfmComplAllowed = 1;
+
+       npr->prliType = PRLI_FCP_TYPE;
+       npr->initiatorFunc = 1;
+
+       phba->fc_stat.elsXmitACC++;
+       elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
+
+       spin_lock_irq(phba->host->host_lock);
+       rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+       spin_unlock_irq(phba->host->host_lock);
+       if (rc == IOCB_ERROR) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               return (1);
+       }
+       return (0);
+}
+
+static int
+lpfc_els_rsp_rnid_acc(struct lpfc_hba * phba,
+                     uint8_t format,
+                     struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
+{
+       RNID *rn;
+       IOCB_t *icmd;
+       IOCB_t *oldcmd;
+       struct lpfc_iocbq *elsiocb;
+       struct lpfc_sli_ring *pring;
+       struct lpfc_sli *psli;
+       uint8_t *pcmd;
+       uint16_t cmdsize;
+       int rc;
+
+       psli = &phba->sli;
+       pring = &psli->ring[LPFC_ELS_RING];
+
+       cmdsize = sizeof (uint32_t) + sizeof (uint32_t)
+               + (2 * sizeof (struct lpfc_name));
+       if (format)
+               cmdsize += sizeof (RNID_TOP_DISC);
+
+       if ((elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
+                                         ndlp, ELS_CMD_ACC)) == 0) {
+               return (1);
+       }
+
+       /* Xmit RNID ACC response tag <ulpIoTag> */
+       lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+                       "%d:0132 Xmit RNID ACC response tag x%x "
+                       "Data: x%x\n",
+                       phba->brd_no,
+                       elsiocb->iocb.ulpIoTag,
+                       elsiocb->iocb.ulpContext);
+
+       icmd = &elsiocb->iocb;
+       oldcmd = &oldiocb->iocb;
+       icmd->ulpContext = oldcmd->ulpContext;  /* Xri */
+       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+       *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+       pcmd += sizeof (uint32_t);
+
+       memset(pcmd, 0, sizeof (RNID));
+       rn = (RNID *) (pcmd);
+       rn->Format = format;
+       rn->CommonLen = (2 * sizeof (struct lpfc_name));
+       memcpy(&rn->portName, &phba->fc_portname, sizeof (struct lpfc_name));
+       memcpy(&rn->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
+       switch (format) {
+       case 0:
+               rn->SpecificLen = 0;
+               break;
+       case RNID_TOPOLOGY_DISC:
+               rn->SpecificLen = sizeof (RNID_TOP_DISC);
+               memcpy(&rn->un.topologyDisc.portName,
+                      &phba->fc_portname, sizeof (struct lpfc_name));
+               rn->un.topologyDisc.unitType = RNID_HBA;
+               rn->un.topologyDisc.physPort = 0;
+               rn->un.topologyDisc.attachedNodes = 0;
+               break;
+       default:
+               rn->CommonLen = 0;
+               rn->SpecificLen = 0;
+               break;
+       }
+
+       phba->fc_stat.elsXmitACC++;
+       elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
+       elsiocb->context1 = NULL;  /* Don't need ndlp for cmpl,
+                                   * it could be freed */
+
+       spin_lock_irq(phba->host->host_lock);
+       rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+       spin_unlock_irq(phba->host->host_lock);
+       if (rc == IOCB_ERROR) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               return (1);
+       }
+       return (0);
+}
+
+int
+lpfc_els_disc_adisc(struct lpfc_hba * phba)
+{
+       int sentadisc;
+       struct lpfc_nodelist *ndlp, *next_ndlp;
+
+       sentadisc = 0;
+       /* go thru NPR list and issue any remaining ELS ADISCs */
+       list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
+                       nlp_listp) {
+               if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+                       if (ndlp->nlp_flag & NLP_NPR_ADISC) {
+                               ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+                               ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
+                               lpfc_nlp_list(phba, ndlp,
+                                       NLP_ADISC_LIST);
+                               lpfc_issue_els_adisc(phba, ndlp, 0);
+                               sentadisc++;
+                               phba->num_disc_nodes++;
+                               if (phba->num_disc_nodes >=
+                                   phba->cfg_discovery_threads) {
+                                       spin_lock_irq(phba->host->host_lock);
+                                       phba->fc_flag |= FC_NLP_MORE;
+                                       spin_unlock_irq(phba->host->host_lock);
+                                       break;
+                               }
+                       }
+               }
+       }
+       if (sentadisc == 0) {
+               spin_lock_irq(phba->host->host_lock);
+               phba->fc_flag &= ~FC_NLP_MORE;
+               spin_unlock_irq(phba->host->host_lock);
+       }
+       return(sentadisc);
+}
+
+int
+lpfc_els_disc_plogi(struct lpfc_hba * phba)
+{
+       int sentplogi;
+       struct lpfc_nodelist *ndlp, *next_ndlp;
+
+       sentplogi = 0;
+       /* go thru NPR list and issue any remaining ELS PLOGIs */
+       list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
+                               nlp_listp) {
+               if ((ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
+                  (!(ndlp->nlp_flag & NLP_DELAY_TMO))) {
+                       if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
+                               ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
+                               lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
+                               lpfc_issue_els_plogi(phba, ndlp, 0);
+                               sentplogi++;
+                               phba->num_disc_nodes++;
+                               if (phba->num_disc_nodes >=
+                                   phba->cfg_discovery_threads) {
+                                       spin_lock_irq(phba->host->host_lock);
+                                       phba->fc_flag |= FC_NLP_MORE;
+                                       spin_unlock_irq(phba->host->host_lock);
+                                       break;
+                               }
+                       }
+               }
+       }
+       if (sentplogi == 0) {
+               spin_lock_irq(phba->host->host_lock);
+               phba->fc_flag &= ~FC_NLP_MORE;
+               spin_unlock_irq(phba->host->host_lock);
+       }
+       return(sentplogi);
+}
+
+int
+lpfc_els_flush_rscn(struct lpfc_hba * phba)
+{
+       struct lpfc_dmabuf *mp;
+       int i;
+
+       for (i = 0; i < phba->fc_rscn_id_cnt; i++) {
+               mp = phba->fc_rscn_id_list[i];
+               lpfc_mbuf_free(phba, mp->virt, mp->phys);
+               kfree(mp);
+               phba->fc_rscn_id_list[i] = NULL;
+       }
+       phba->fc_rscn_id_cnt = 0;
+       spin_lock_irq(phba->host->host_lock);
+       phba->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
+       spin_unlock_irq(phba->host->host_lock);
+       lpfc_can_disctmo(phba);
+       return (0);
+}
+
+int
+lpfc_rscn_payload_check(struct lpfc_hba * phba, uint32_t did)
+{
+       D_ID ns_did;
+       D_ID rscn_did;
+       struct lpfc_dmabuf *mp;
+       uint32_t *lp;
+       uint32_t payload_len, cmd, i, match;
+
+       ns_did.un.word = did;
+       match = 0;
+
+       /* Never match fabric nodes for RSCNs */
+       if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
+               return(0);
+
+       /* If we are doing a FULL RSCN rediscovery, match everything */
+       if (phba->fc_flag & FC_RSCN_DISCOVERY) {
+               return (did);
+       }
+
+       for (i = 0; i < phba->fc_rscn_id_cnt; i++) {
+               mp = phba->fc_rscn_id_list[i];
+               lp = (uint32_t *) mp->virt;
+               cmd = *lp++;
+               payload_len = be32_to_cpu(cmd) & 0xffff; /* payload length */
+               payload_len -= sizeof (uint32_t);       /* take off word 0 */
+               while (payload_len) {
+                       rscn_did.un.word = *lp++;
+                       rscn_did.un.word = be32_to_cpu(rscn_did.un.word);
+                       payload_len -= sizeof (uint32_t);
+                       switch (rscn_did.un.b.resv) {
+                       case 0: /* Single N_Port ID effected */
+                               if (ns_did.un.word == rscn_did.un.word) {
+                                       match = did;
+                               }
+                               break;
+                       case 1: /* Whole N_Port Area effected */
+                               if ((ns_did.un.b.domain == rscn_did.un.b.domain)
+                                   && (ns_did.un.b.area == rscn_did.un.b.area))
+                                       {
+                                               match = did;
+                                       }
+                               break;
+                       case 2: /* Whole N_Port Domain effected */
+                               if (ns_did.un.b.domain == rscn_did.un.b.domain)
+                                       {
+                                               match = did;
+                                       }
+                               break;
+                       case 3: /* Whole Fabric effected */
+                               match = did;
+                               break;
+                       default:
+                               /* Unknown Identifier in RSCN list */
+                               lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+                                               "%d:0217 Unknown Identifier in "
+                                               "RSCN payload Data: x%x\n",
+                                               phba->brd_no, rscn_did.un.word);
+                               break;
+                       }
+                       if (match) {
+                               break;
+                       }
+               }
+       }
+       return (match);
+}
+
+static int
+lpfc_rscn_recovery_check(struct lpfc_hba * phba)
+{
+       struct lpfc_nodelist *ndlp = NULL, *next_ndlp;
+       struct list_head *listp;
+       struct list_head *node_list[7];
+       int i;
+
+       /* Look at all nodes effected by pending RSCNs and move
+        * them to NPR list.
+        */
+       node_list[0] = &phba->fc_npr_list;  /* MUST do this list first */
+       node_list[1] = &phba->fc_nlpmap_list;
+       node_list[2] = &phba->fc_nlpunmap_list;
+       node_list[3] = &phba->fc_prli_list;
+       node_list[4] = &phba->fc_reglogin_list;
+       node_list[5] = &phba->fc_adisc_list;
+       node_list[6] = &phba->fc_plogi_list;
+       for (i = 0; i < 7; i++) {
+               listp = node_list[i];
+               if (list_empty(listp))
+                       continue;
+
+               list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
+                       if (!(lpfc_rscn_payload_check(phba, ndlp->nlp_DID)))
+                               continue;
+
+                       lpfc_disc_state_machine(phba, ndlp, NULL,
+                                       NLP_EVT_DEVICE_RECOVERY);
+                       if (ndlp->nlp_flag & NLP_DELAY_TMO) {
+                               ndlp->nlp_flag &= ~NLP_DELAY_TMO;
+                               del_timer_sync(&ndlp->nlp_delayfunc);
+                               if (!list_empty(&ndlp->
+                                               els_retry_evt.evt_listp))
+                                       list_del_init(&ndlp->
+                                               els_retry_evt.evt_listp);
+                       }
+               }
+       }
+       return (0);
+}
+
+static int
+lpfc_els_rcv_rscn(struct lpfc_hba * phba,
+                 struct lpfc_iocbq * cmdiocb,
+                 struct lpfc_nodelist * ndlp, uint8_t newnode)
+{
+       struct lpfc_dmabuf *pcmd;
+       uint32_t *lp;
+       IOCB_t *icmd;
+       uint32_t payload_len, cmd;
+
+       icmd = &cmdiocb->iocb;
+       pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+       lp = (uint32_t *) pcmd->virt;
+
+       cmd = *lp++;
+       payload_len = be32_to_cpu(cmd) & 0xffff;        /* payload length */
+       payload_len -= sizeof (uint32_t);       /* take off word 0 */
+       cmd &= ELS_CMD_MASK;
+
+       /* RSCN received */
+       lpfc_printf_log(phba,
+                       KERN_INFO,
+                       LOG_DISCOVERY,
+                       "%d:0214 RSCN received Data: x%x x%x x%x x%x\n",
+                       phba->brd_no,
+                       phba->fc_flag, payload_len, *lp, phba->fc_rscn_id_cnt);
+
+       /* If we are about to begin discovery, just ACC the RSCN.
+        * Discovery processing will satisfy it.
+        */
+       if (phba->hba_state < LPFC_NS_QRY) {
+               lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL,
+                                                               newnode);
+               return (0);
+       }
+
+       /* If we are already processing an RSCN, save the received
+        * RSCN payload buffer, cmdiocb->context2 to process later.
+        */
+       if (phba->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
+               if ((phba->fc_rscn_id_cnt < FC_MAX_HOLD_RSCN) &&
+                   !(phba->fc_flag & FC_RSCN_DISCOVERY)) {
+                       spin_lock_irq(phba->host->host_lock);
+                       phba->fc_flag |= FC_RSCN_MODE;
+                       spin_unlock_irq(phba->host->host_lock);
+                       phba->fc_rscn_id_list[phba->fc_rscn_id_cnt++] = pcmd;
+
+                       /* If we zero, cmdiocb->context2, the calling
+                        * routine will not try to free it.
+                        */
+                       cmdiocb->context2 = NULL;
+
+                       /* Deferred RSCN */
+                       lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+                                       "%d:0235 Deferred RSCN "
+                                       "Data: x%x x%x x%x\n",
+                                       phba->brd_no, phba->fc_rscn_id_cnt,
+                                       phba->fc_flag, phba->hba_state);
+               } else {
+                       spin_lock_irq(phba->host->host_lock);
+                       phba->fc_flag |= FC_RSCN_DISCOVERY;
+                       spin_unlock_irq(phba->host->host_lock);
+                       /* ReDiscovery RSCN */
+                       lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+                                       "%d:0234 ReDiscovery RSCN "
+                                       "Data: x%x x%x x%x\n",
+                                       phba->brd_no, phba->fc_rscn_id_cnt,
+                                       phba->fc_flag, phba->hba_state);
+               }
+               /* Send back ACC */
+               lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL,
+                                                               newnode);
+
+               /* send RECOVERY event for ALL nodes that match RSCN payload */
+               lpfc_rscn_recovery_check(phba);
+               return (0);
+       }
+
+       phba->fc_flag |= FC_RSCN_MODE;
+       phba->fc_rscn_id_list[phba->fc_rscn_id_cnt++] = pcmd;
+       /*
+        * If we zero, cmdiocb->context2, the calling routine will
+        * not try to free it.
+        */
+       cmdiocb->context2 = NULL;
+
+       lpfc_set_disctmo(phba);
+
+       /* Send back ACC */
+       lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, newnode);
+
+       /* send RECOVERY event for ALL nodes that match RSCN payload */
+       lpfc_rscn_recovery_check(phba);
+
+       return (lpfc_els_handle_rscn(phba));
+}
+
+int
+lpfc_els_handle_rscn(struct lpfc_hba * phba)
+{
+       struct lpfc_nodelist *ndlp;
+
+       /* Start timer for RSCN processing */
+       lpfc_set_disctmo(phba);
+
+       /* RSCN processed */
+       lpfc_printf_log(phba,
+                       KERN_INFO,
+                       LOG_DISCOVERY,
+                       "%d:0215 RSCN processed Data: x%x x%x x%x x%x\n",
+                       phba->brd_no,
+                       phba->fc_flag, 0, phba->fc_rscn_id_cnt,
+                       phba->hba_state);
+
+       /* To process RSCN, first compare RSCN data with NameServer */
+       phba->fc_ns_retry = 0;
+       if ((ndlp = lpfc_findnode_did(phba, NLP_SEARCH_UNMAPPED,
+                                     NameServer_DID))) {
+               /* Good ndlp, issue CT Request to NameServer */
+               if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT) == 0) {
+                       /* Wait for NameServer query cmpl before we can
+                          continue */
+                       return (1);
+               }
+       } else {
+               /* If login to NameServer does not exist, issue one */
+               /* Good status, issue PLOGI to NameServer */
+               if ((ndlp =
+                    lpfc_findnode_did(phba, NLP_SEARCH_ALL, NameServer_DID))) {
+                       /* Wait for NameServer login cmpl before we can
+                          continue */
+                       return (1);
+               }
+               if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL))
+                   == 0) {
+                       lpfc_els_flush_rscn(phba);
+                       return (0);
+               } else {
+                       lpfc_nlp_init(phba, ndlp, NameServer_DID);
+                       ndlp->nlp_type |= NLP_FABRIC;
+                       ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
+                       lpfc_issue_els_plogi(phba, ndlp, 0);
+                       /* Wait for NameServer login cmpl before we can
+                          continue */
+                       return (1);
+               }
+       }
+
+       lpfc_els_flush_rscn(phba);
+       return (0);
+}
+
+static int
+lpfc_els_rcv_flogi(struct lpfc_hba * phba,
+                  struct lpfc_iocbq * cmdiocb,
+                  struct lpfc_nodelist * ndlp, uint8_t newnode)
+{
+       struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+       uint32_t *lp = (uint32_t *) pcmd->virt;
+       IOCB_t *icmd = &cmdiocb->iocb;
+       struct serv_parm *sp;
+       LPFC_MBOXQ_t *mbox;
+       struct ls_rjt stat;
+       uint32_t cmd, did;
+       int rc;
+
+       cmd = *lp++;
+       sp = (struct serv_parm *) lp;
+
+       /* FLOGI received */
+
+       lpfc_set_disctmo(phba);
+
+       if (phba->fc_topology == TOPOLOGY_LOOP) {
+               /* We should never receive a FLOGI in loop mode, ignore it */
+               did = icmd->un.elsreq64.remoteID;
+
+               /* An FLOGI ELS command <elsCmd> was received from DID <did> in
+                  Loop Mode */
+               lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+                               "%d:0113 An FLOGI ELS command x%x was received "
+                               "from DID x%x in Loop Mode\n",
+                               phba->brd_no, cmd, did);
+               return (1);
+       }
+
+       did = Fabric_DID;
+
+       if ((lpfc_check_sparm(phba, ndlp, sp, CLASS3))) {
+               /* For a FLOGI we accept, then if our portname is greater
+                * then the remote portname we initiate Nport login.
+                */
+
+               rc = memcmp(&phba->fc_portname, &sp->portName,
+                           sizeof (struct lpfc_name));
+
+               if (!rc) {
+                       if ((mbox = mempool_alloc(phba->mbox_mem_pool,
+                                                 GFP_KERNEL)) == 0) {
+                               return (1);
+                       }
+                       lpfc_linkdown(phba);
+                       lpfc_init_link(phba, mbox,
+                                      phba->cfg_topology,
+                                      phba->cfg_link_speed);
+                       mbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
+                       mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+                       rc = lpfc_sli_issue_mbox
+                               (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
+                       if (rc == MBX_NOT_FINISHED) {
+                               mempool_free( mbox, phba->mbox_mem_pool);
+                       }
+                       return (1);
+               }
+               else if (rc > 0) {      /* greater than */
+                       spin_lock_irq(phba->host->host_lock);
+                       phba->fc_flag |= FC_PT2PT_PLOGI;
+                       spin_unlock_irq(phba->host->host_lock);
+               }
+               phba->fc_flag |= FC_PT2PT;
+               phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+       } else {
+               /* Reject this request because invalid parameters */
+               stat.un.b.lsRjtRsvd0 = 0;
+               stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+               stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
+               stat.un.b.vendorUnique = 0;
+               lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+               return (1);
+       }
+
+       /* Send back ACC */
+       lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, newnode);
+
+       return (0);
+}
+
+static int
+lpfc_els_rcv_rnid(struct lpfc_hba * phba,
+                 struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp)
+{
+       struct lpfc_dmabuf *pcmd;
+       uint32_t *lp;
+       IOCB_t *icmd;
+       RNID *rn;
+       struct ls_rjt stat;
+       uint32_t cmd, did;
+
+       icmd = &cmdiocb->iocb;
+       did = icmd->un.elsreq64.remoteID;
+       pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+       lp = (uint32_t *) pcmd->virt;
+
+       cmd = *lp++;
+       rn = (RNID *) lp;
+
+       /* RNID received */
+
+       switch (rn->Format) {
+       case 0:
+       case RNID_TOPOLOGY_DISC:
+               /* Send back ACC */
+               lpfc_els_rsp_rnid_acc(phba, rn->Format, cmdiocb, ndlp);
+               break;
+       default:
+               /* Reject this request because format not supported */
+               stat.un.b.lsRjtRsvd0 = 0;
+               stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+               stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
+               stat.un.b.vendorUnique = 0;
+               lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+       }
+       return (0);
+}
+
+static int
+lpfc_els_rcv_rrq(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+                struct lpfc_nodelist * ndlp)
+{
+       struct lpfc_dmabuf *pcmd;
+       uint32_t *lp;
+       IOCB_t *icmd;
+       struct lpfc_sli_ring *pring;
+       struct lpfc_sli *psli;
+       RRQ *rrq;
+       uint32_t cmd, did;
+
+       psli = &phba->sli;
+       pring = &psli->ring[LPFC_FCP_RING];
+       icmd = &cmdiocb->iocb;
+       did = icmd->un.elsreq64.remoteID;
+       pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+       lp = (uint32_t *) pcmd->virt;
+
+       cmd = *lp++;
+       rrq = (RRQ *) lp;
+
+       /* RRQ received */
+       /* Get oxid / rxid from payload and abort it */
+       spin_lock_irq(phba->host->host_lock);
+       if ((rrq->SID == be32_to_cpu(phba->fc_myDID))) {
+               lpfc_sli_abort_iocb(phba, pring, 0, 0, rrq->Oxid,
+                                                       LPFC_CTX_CTX);
+       } else {
+               lpfc_sli_abort_iocb(phba, pring, 0, 0, rrq->Rxid,
+                                                       LPFC_CTX_CTX);
+       }
+
+       spin_unlock_irq(phba->host->host_lock);
+       /* ACCEPT the rrq request */
+       lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+
+       return 0;
+}
+
+static int
+lpfc_els_rcv_farp(struct lpfc_hba * phba,
+                 struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp)
+{
+       struct lpfc_dmabuf *pcmd;
+       uint32_t *lp;
+       IOCB_t *icmd;
+       FARP *fp;
+       uint32_t cmd, cnt, did;
+
+       icmd = &cmdiocb->iocb;
+       did = icmd->un.elsreq64.remoteID;
+       pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+       lp = (uint32_t *) pcmd->virt;
+
+       cmd = *lp++;
+       fp = (FARP *) lp;
+
+       /* FARP-REQ received from DID <did> */
+       lpfc_printf_log(phba,
+                        KERN_INFO,
+                        LOG_IP,
+                        "%d:0601 FARP-REQ received from DID x%x\n",
+                        phba->brd_no, did);
+
+       /* We will only support match on WWPN or WWNN */
+       if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
+               return (0);
+       }
+
+       cnt = 0;
+       /* If this FARP command is searching for my portname */
+       if (fp->Mflags & FARP_MATCH_PORT) {
+               if (memcmp(&fp->RportName, &phba->fc_portname,
+                          sizeof (struct lpfc_name)) == 0)
+                       cnt = 1;
+       }
+
+       /* If this FARP command is searching for my nodename */
+       if (fp->Mflags & FARP_MATCH_NODE) {
+               if (memcmp(&fp->RnodeName, &phba->fc_nodename,
+                          sizeof (struct lpfc_name)) == 0)
+                       cnt = 1;
+       }
+
+       if (cnt) {
+               if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
+                  (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
+                       /* Log back into the node before sending the FARP. */
+                       if (fp->Rflags & FARP_REQUEST_PLOGI) {
+                               ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
+                               lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
+                               lpfc_issue_els_plogi(phba, ndlp, 0);
+                       }
+
+                       /* Send a FARP response to that node */
+                       if (fp->Rflags & FARP_REQUEST_FARPR) {
+                               lpfc_issue_els_farpr(phba, did, 0);
+                       }
+               }
+       }
+       return (0);
+}
+
+static int
+lpfc_els_rcv_farpr(struct lpfc_hba * phba,
+                  struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp)
+{
+       struct lpfc_dmabuf *pcmd;
+       uint32_t *lp;
+       IOCB_t *icmd;
+       uint32_t cmd, did;
+
+       icmd = &cmdiocb->iocb;
+       did = icmd->un.elsreq64.remoteID;
+       pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+       lp = (uint32_t *) pcmd->virt;
+
+       cmd = *lp++;
+       /* FARP-RSP received from DID <did> */
+       lpfc_printf_log(phba,
+                        KERN_INFO,
+                        LOG_IP,
+                        "%d:0600 FARP-RSP received from DID x%x\n",
+                        phba->brd_no, did);
+
+       /* ACCEPT the Farp resp request */
+       lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+
+       return 0;
+}
+
+static int
+lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+                struct lpfc_nodelist * ndlp)
+{
+       struct lpfc_dmabuf *pcmd;
+       uint32_t *lp;
+       IOCB_t *icmd;
+       FAN *fp;
+       uint32_t cmd, did;
+
+       icmd = &cmdiocb->iocb;
+       did = icmd->un.elsreq64.remoteID;
+       pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+       lp = (uint32_t *) pcmd->virt;
+
+       cmd = *lp++;
+       fp = (FAN *) lp;
+
+       /* FAN received */
+
+       /* ACCEPT the FAN request */
+       lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+
+       if (phba->hba_state == LPFC_LOCAL_CFG_LINK) {
+               /* The discovery state machine needs to take a different
+                * action if this node has switched fabrics
+                */
+               if ((memcmp(&fp->FportName, &phba->fc_fabparam.portName,
+                           sizeof (struct lpfc_name)) != 0)
+                   ||
+                   (memcmp(&fp->FnodeName, &phba->fc_fabparam.nodeName,
+                           sizeof (struct lpfc_name)) != 0)) {
+                       /* This node has switched fabrics.  An FLOGI is required
+                        * after the timeout
+                        */
+                       return (0);
+               }
+
+               /* Start discovery */
+               lpfc_disc_start(phba);
+       }
+
+       return (0);
+}
+
+void
+lpfc_els_timeout(unsigned long ptr)
+{
+       struct lpfc_hba *phba;
+       unsigned long iflag;
+
+       phba = (struct lpfc_hba *)ptr;
+       if (phba == 0)
+               return;
+       spin_lock_irqsave(phba->host->host_lock, iflag);
+       if (!(phba->work_hba_events & WORKER_ELS_TMO)) {
+               phba->work_hba_events |= WORKER_ELS_TMO;
+               if (phba->work_wait)
+                       wake_up(phba->work_wait);
+       }
+       spin_unlock_irqrestore(phba->host->host_lock, iflag);
+       return;
+}
+
+void
+lpfc_els_timeout_handler(struct lpfc_hba *phba)
+{
+       struct lpfc_sli_ring *pring;
+       struct lpfc_iocbq *tmp_iocb, *piocb;
+       IOCB_t *cmd = NULL;
+       struct lpfc_dmabuf *pcmd;
+       struct list_head *dlp;
+       uint32_t *elscmd;
+       uint32_t els_command;
+       uint32_t timeout;
+       uint32_t remote_ID;
+
+       if (phba == 0)
+               return;
+       spin_lock_irq(phba->host->host_lock);
+       /* If the timer is already canceled do nothing */
+       if (!(phba->work_hba_events & WORKER_ELS_TMO)) {
+               spin_unlock_irq(phba->host->host_lock);
+               return;
+       }
+       timeout = (uint32_t)(phba->fc_ratov << 1);
+
+       pring = &phba->sli.ring[LPFC_ELS_RING];
+       dlp = &pring->txcmplq;
+
+       list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
+               cmd = &piocb->iocb;
+
+               if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
+                       continue;
+               }
+               pcmd = (struct lpfc_dmabuf *) piocb->context2;
+               elscmd = (uint32_t *) (pcmd->virt);
+               els_command = *elscmd;
+
+               if ((els_command == ELS_CMD_FARP)
+                   || (els_command == ELS_CMD_FARPR)) {
+                       continue;
+               }
+
+               if (piocb->drvrTimeout > 0) {
+                       if (piocb->drvrTimeout >= timeout) {
+                               piocb->drvrTimeout -= timeout;
+                       } else {
+                               piocb->drvrTimeout = 0;
+                       }
+                       continue;
+               }
+
+               list_del(&piocb->list);
+               pring->txcmplq_cnt--;
+
+               if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR) {
+                       struct lpfc_nodelist *ndlp;
+
+                       ndlp = lpfc_findnode_rpi(phba, cmd->ulpContext);
+                       remote_ID = ndlp->nlp_DID;
+                       if (cmd->un.elsreq64.bdl.ulpIoTag32) {
+                               lpfc_sli_issue_abort_iotag32(phba,
+                                       pring, piocb);
+                       }
+               } else {
+                       remote_ID = cmd->un.elsreq64.remoteID;
+               }
+
+               lpfc_printf_log(phba,
+                               KERN_ERR,
+                               LOG_ELS,
+                               "%d:0127 ELS timeout Data: x%x x%x x%x x%x\n",
+                               phba->brd_no, els_command,
+                               remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
+
+               /*
+                * The iocb has timed out; abort it.
+                */
+               if (piocb->iocb_cmpl) {
+                       cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+                       cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+                       spin_unlock_irq(phba->host->host_lock);
+                       (piocb->iocb_cmpl) (phba, piocb, piocb);
+                       spin_lock_irq(phba->host->host_lock);
+               } else {
+                       list_add_tail(&piocb->list, &phba->lpfc_iocb_list);
+               }
+       }
+       if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt) {
+               phba->els_tmofunc.expires = jiffies + HZ * timeout;
+               add_timer(&phba->els_tmofunc);
+       }
+       spin_unlock_irq(phba->host->host_lock);
+}
+
+void
+lpfc_els_flush_cmd(struct lpfc_hba * phba)
+{
+       struct lpfc_sli_ring *pring;
+       struct lpfc_iocbq *tmp_iocb, *piocb;
+       IOCB_t *cmd = NULL;
+       struct lpfc_dmabuf *pcmd;
+       uint32_t *elscmd;
+       uint32_t els_command;
+       uint32_t remote_ID;
+
+       pring = &phba->sli.ring[LPFC_ELS_RING];
+       spin_lock_irq(phba->host->host_lock);
+       list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
+               cmd = &piocb->iocb;
+
+               if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
+                       continue;
+               }
+
+               /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
+               if ((cmd->ulpCommand == CMD_QUE_RING_BUF_CN) ||
+                   (cmd->ulpCommand == CMD_QUE_RING_BUF64_CN) ||
+                   (cmd->ulpCommand == CMD_CLOSE_XRI_CN) ||
+                   (cmd->ulpCommand == CMD_ABORT_XRI_CN)) {
+                       continue;
+               }
+
+               pcmd = (struct lpfc_dmabuf *) piocb->context2;
+               elscmd = (uint32_t *) (pcmd->virt);
+               els_command = *elscmd;
+
+               if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR) {
+                       struct lpfc_nodelist *ndlp;
+
+                       ndlp = lpfc_findnode_rpi(phba, cmd->ulpContext);
+                       remote_ID = ndlp->nlp_DID;
+                       if (phba->hba_state == LPFC_HBA_READY) {
+                               continue;
+                       }
+               } else {
+                       remote_ID = cmd->un.elsreq64.remoteID;
+               }
+
+               list_del(&piocb->list);
+               pring->txcmplq_cnt--;
+
+               cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+               cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+
+               if (piocb->iocb_cmpl) {
+                       spin_unlock_irq(phba->host->host_lock);
+                       (piocb->iocb_cmpl) (phba, piocb, piocb);
+                       spin_lock_irq(phba->host->host_lock);
+               }
+               else
+                       list_add_tail(&piocb->list, &phba->lpfc_iocb_list);
+       }
+
+       list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
+               cmd = &piocb->iocb;
+
+               if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
+                       continue;
+               }
+               pcmd = (struct lpfc_dmabuf *) piocb->context2;
+               elscmd = (uint32_t *) (pcmd->virt);
+               els_command = *elscmd;
+
+               if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR) {
+                       struct lpfc_nodelist *ndlp;
+
+                       ndlp = lpfc_findnode_rpi(phba, cmd->ulpContext);
+                       remote_ID = ndlp->nlp_DID;
+                       if (phba->hba_state == LPFC_HBA_READY) {
+                               continue;
+                       }
+               } else {
+                       remote_ID = cmd->un.elsreq64.remoteID;
+               }
+
+               list_del(&piocb->list);
+               pring->txcmplq_cnt--;
+
+               cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+               cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+
+               if (piocb->iocb_cmpl) {
+                       spin_unlock_irq(phba->host->host_lock);
+                       (piocb->iocb_cmpl) (phba, piocb, piocb);
+                       spin_lock_irq(phba->host->host_lock);
+               }
+               else
+                       list_add_tail(&piocb->list, &phba->lpfc_iocb_list);
+       }
+       spin_unlock_irq(phba->host->host_lock);
+       return;
+}
+
+void
+lpfc_els_unsol_event(struct lpfc_hba * phba,
+                    struct lpfc_sli_ring * pring, struct lpfc_iocbq * elsiocb)
+{
+       struct lpfc_sli *psli;
+       struct lpfc_nodelist *ndlp;
+       struct lpfc_dmabuf *mp;
+       uint32_t *lp;
+       IOCB_t *icmd;
+       struct ls_rjt stat;
+       uint32_t cmd;
+       uint32_t did;
+       uint32_t newnode;
+       uint32_t drop_cmd = 0;  /* by default do NOT drop received cmd */
+       uint32_t rjt_err = 0;
+
+       psli = &phba->sli;
+       icmd = &elsiocb->iocb;
+
+       if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+               ((icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING)) {
+               /* Not enough posted buffers; Try posting more buffers */
+               phba->fc_stat.NoRcvBuf++;
+               lpfc_post_buffer(phba, pring, 0, 1);
+               return;
+       }
+
+       /* If there are no BDEs associated with this IOCB,
+        * there is nothing to do.
+        */
+       if (icmd->ulpBdeCount == 0)
+               return;
+
+       /* type of ELS cmd is first 32bit word in packet */
+       mp = lpfc_sli_ringpostbuf_get(phba, pring, getPaddr(icmd->un.
+                                                           cont64[0].
+                                                           addrHigh,
+                                                           icmd->un.
+                                                           cont64[0].addrLow));
+       if (mp == 0) {
+               drop_cmd = 1;
+               goto dropit;
+       }
+
+       newnode = 0;
+       lp = (uint32_t *) mp->virt;
+       cmd = *lp++;
+       lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], 1, 1);
+
+       if (icmd->ulpStatus) {
+               lpfc_mbuf_free(phba, mp->virt, mp->phys);
+               kfree(mp);
+               drop_cmd = 1;
+               goto dropit;
+       }
+
+       /* Check to see if link went down during discovery */
+       if (lpfc_els_chk_latt(phba)) {
+               lpfc_mbuf_free(phba, mp->virt, mp->phys);
+               kfree(mp);
+               drop_cmd = 1;
+               goto dropit;
+       }
+
+       did = icmd->un.rcvels.remoteID;
+       if ((ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, did)) == 0) {
+               /* Cannot find existing Fabric ndlp, so allocate a new one */
+               if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL))
+                   == 0) {
+                       lpfc_mbuf_free(phba, mp->virt, mp->phys);
+                       kfree(mp);
+                       drop_cmd = 1;
+                       goto dropit;
+               }
+
+               lpfc_nlp_init(phba, ndlp, did);
+               newnode = 1;
+               if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) {
+                       ndlp->nlp_type |= NLP_FABRIC;
+               }
+       }
+
+       phba->fc_stat.elsRcvFrame++;
+       elsiocb->context1 = ndlp;
+       elsiocb->context2 = mp;
+
+       if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
+               cmd &= ELS_CMD_MASK;
+       }
+       /* ELS command <elsCmd> received from NPORT <did> */
+       lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+                       "%d:0112 ELS command x%x received from NPORT x%x "
+                       "Data: x%x\n", phba->brd_no, cmd, did, phba->hba_state);
+
+       switch (cmd) {
+       case ELS_CMD_PLOGI:
+               phba->fc_stat.elsRcvPLOGI++;
+               if (phba->hba_state < LPFC_DISC_AUTH) {
+                       rjt_err = LSEXP_NOTHING_MORE;
+                       break;
+               }
+               lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PLOGI);
+               break;
+       case ELS_CMD_FLOGI:
+               phba->fc_stat.elsRcvFLOGI++;
+               lpfc_els_rcv_flogi(phba, elsiocb, ndlp, newnode);
+               if (newnode) {
+                       mempool_free( ndlp, phba->nlp_mem_pool);
+               }
+               break;
+       case ELS_CMD_LOGO:
+               phba->fc_stat.elsRcvLOGO++;
+               if (phba->hba_state < LPFC_DISC_AUTH) {
+                       rjt_err = LSEXP_NOTHING_MORE;
+                       break;
+               }
+               lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
+               break;
+       case ELS_CMD_PRLO:
+               phba->fc_stat.elsRcvPRLO++;
+               if (phba->hba_state < LPFC_DISC_AUTH) {
+                       rjt_err = LSEXP_NOTHING_MORE;
+                       break;
+               }
+               lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
+               break;
+       case ELS_CMD_RSCN:
+               phba->fc_stat.elsRcvRSCN++;
+               lpfc_els_rcv_rscn(phba, elsiocb, ndlp, newnode);
+               if (newnode) {
+                       mempool_free( ndlp, phba->nlp_mem_pool);
+               }
+               break;
+       case ELS_CMD_ADISC:
+               phba->fc_stat.elsRcvADISC++;
+               if (phba->hba_state < LPFC_DISC_AUTH) {
+                       rjt_err = LSEXP_NOTHING_MORE;
+                       break;
+               }
+               lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_ADISC);
+               break;
+       case ELS_CMD_PDISC:
+               phba->fc_stat.elsRcvPDISC++;
+               if (phba->hba_state < LPFC_DISC_AUTH) {
+                       rjt_err = LSEXP_NOTHING_MORE;
+                       break;
+               }
+               lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PDISC);
+               break;
+       case ELS_CMD_FARPR:
+               phba->fc_stat.elsRcvFARPR++;
+               lpfc_els_rcv_farpr(phba, elsiocb, ndlp);
+               break;
+       case ELS_CMD_FARP:
+               phba->fc_stat.elsRcvFARP++;
+               lpfc_els_rcv_farp(phba, elsiocb, ndlp);
+               break;
+       case ELS_CMD_FAN:
+               phba->fc_stat.elsRcvFAN++;
+               lpfc_els_rcv_fan(phba, elsiocb, ndlp);
+               break;
+       case ELS_CMD_RRQ:
+               phba->fc_stat.elsRcvRRQ++;
+               lpfc_els_rcv_rrq(phba, elsiocb, ndlp);
+               break;
+       case ELS_CMD_PRLI:
+               phba->fc_stat.elsRcvPRLI++;
+               if (phba->hba_state < LPFC_DISC_AUTH) {
+                       rjt_err = LSEXP_NOTHING_MORE;
+                       break;
+               }
+               lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
+               break;
+       case ELS_CMD_RNID:
+               phba->fc_stat.elsRcvRNID++;
+               lpfc_els_rcv_rnid(phba, elsiocb, ndlp);
+               break;
+       default:
+               /* Unsupported ELS command, reject */
+               rjt_err = LSEXP_NOTHING_MORE;
+
+               /* Unknown ELS command <elsCmd> received from NPORT <did> */
+               lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+                               "%d:0115 Unknown ELS command x%x received from "
+                               "NPORT x%x\n", phba->brd_no, cmd, did);
+               if (newnode) {
+                       mempool_free( ndlp, phba->nlp_mem_pool);
+               }
+               break;
+       }
+
+       /* check if need to LS_RJT received ELS cmd */
+       if (rjt_err) {
+               stat.un.b.lsRjtRsvd0 = 0;
+               stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+               stat.un.b.lsRjtRsnCodeExp = rjt_err;
+               stat.un.b.vendorUnique = 0;
+               lpfc_els_rsp_reject(phba, stat.un.lsRjtError, elsiocb, ndlp);
+       }
+
+       if (elsiocb->context2) {
+               lpfc_mbuf_free(phba, mp->virt, mp->phys);
+               kfree(mp);
+       }
+dropit:
+       /* check if need to drop received ELS cmd */
+       if (drop_cmd == 1) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+                               "%d:0111 Dropping received ELS cmd "
+                               "Data: x%x x%x\n", phba->brd_no,
+                               icmd->ulpStatus, icmd->un.ulpWord[4]);
+               phba->fc_stat.elsRcvDrop++;
+       }
+       return;
+}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
new file mode 100644 (file)
index 0000000..d546206
--- /dev/null
@@ -0,0 +1,2537 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Enterprise Fibre Channel Host Bus Adapters.                     *
+ * Refer to the README file included with this package for         *
+ * driver version and adapter support.                             *
+ * Copyright (C) 2004 Emulex Corporation.                          *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of the GNU General Public License     *
+ * as published by the Free Software Foundation; either version 2  *
+ * of the License, or (at your option) any later version.          *
+ *                                                                 *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of  *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the   *
+ * GNU General Public License for more details, a copy of which    *
+ * can be found in the file COPYING included with this package.    *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_hbadisc.c 1.266 2005/04/13 11:59:06EDT sf_support Exp  $
+ */
+
+#include <linux/blkdev.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "lpfc_hw.h"
+#include "lpfc_disc.h"
+#include "lpfc_sli.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+
+/* AlpaArray for assignment of scsid for scan-down and bind_method */
+static uint8_t lpfcAlpaArray[] = {
+       0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
+       0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
+       0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
+       0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
+       0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
+       0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
+       0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
+       0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
+       0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
+       0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
+       0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
+       0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
+       0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
+};
+
+static void lpfc_disc_timeout_handler(struct lpfc_hba *);
+
+static void
+lpfc_process_nodev_timeout(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+{
+       if (!(ndlp->nlp_type & NLP_FABRIC)) {
+               /* Nodev timeout on NPort <nlp_DID> */
+               lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+                       "%d:0203 Nodev timeout on NPort x%x "
+                       "Data: x%x x%x x%x\n",
+                       phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
+                       ndlp->nlp_state, ndlp->nlp_rpi);
+       }
+
+       spin_lock_irq(phba->host->host_lock);
+       if (!(ndlp->nlp_flag & NLP_NODEV_TMO)) {
+               spin_unlock_irq(phba->host->host_lock);
+               return;
+       }
+
+       ndlp->nlp_flag &= ~NLP_NODEV_TMO;
+
+       if (ndlp->nlp_sid != NLP_NO_SID) {
+               /* flush the target */
+               lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
+                       ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
+       }
+       spin_unlock_irq(phba->host->host_lock);
+
+       lpfc_disc_state_machine(phba, ndlp, NULL, NLP_EVT_DEVICE_RM);
+       return;
+}
+
+static void
+lpfc_work_list_done(struct lpfc_hba * phba)
+{
+       struct lpfc_work_evt  *evtp = NULL;
+       struct lpfc_nodelist  *ndlp;
+       int free_evt;
+
+       spin_lock_irq(phba->host->host_lock);
+       while(!list_empty(&phba->work_list)) {
+               list_remove_head((&phba->work_list), evtp, typeof(*evtp),
+                                evt_listp);
+               spin_unlock_irq(phba->host->host_lock);
+               free_evt = 1;
+               switch(evtp->evt) {
+               case LPFC_EVT_NODEV_TMO:
+                       ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
+                       lpfc_process_nodev_timeout(phba, ndlp);
+                       free_evt = 0;
+                       break;
+               case LPFC_EVT_ELS_RETRY:
+                       ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
+                       lpfc_els_retry_delay_handler(ndlp);
+                       free_evt = 0;
+                       break;
+               case LPFC_EVT_ONLINE:
+                       *(int *)(evtp->evt_arg1)  = lpfc_online(phba);
+                       complete((struct completion *)(evtp->evt_arg2));
+                       break;
+               case LPFC_EVT_OFFLINE:
+                       *(int *)(evtp->evt_arg1)  = lpfc_offline(phba);
+                       complete((struct completion *)(evtp->evt_arg2));
+                       break;
+               }
+               if (free_evt)
+                       kfree(evtp);
+               spin_lock_irq(phba->host->host_lock);
+       }
+       spin_unlock_irq(phba->host->host_lock);
+
+}
+
+static void
+lpfc_work_done(struct lpfc_hba * phba)
+{
+       struct lpfc_sli_ring *pring;
+       int i;
+       uint32_t ha_copy;
+       uint32_t control;
+       uint32_t work_hba_events;
+
+       spin_lock_irq(phba->host->host_lock);
+       ha_copy = phba->work_ha;
+       phba->work_ha = 0;
+       work_hba_events=phba->work_hba_events;
+       spin_unlock_irq(phba->host->host_lock);
+
+       if(ha_copy & HA_ERATT)
+               lpfc_handle_eratt(phba);
+
+       if(ha_copy & HA_MBATT)
+               lpfc_sli_handle_mb_event(phba);
+
+       if(ha_copy & HA_LATT)
+               lpfc_handle_latt(phba);
+
+       if (work_hba_events & WORKER_DISC_TMO)
+               lpfc_disc_timeout_handler(phba);
+
+       if (work_hba_events & WORKER_ELS_TMO)
+               lpfc_els_timeout_handler(phba);
+
+       if (work_hba_events & WORKER_MBOX_TMO)
+               lpfc_mbox_timeout_handler(phba);
+
+       if (work_hba_events & WORKER_FDMI_TMO)
+               lpfc_fdmi_tmo_handler(phba);
+
+       spin_lock_irq(phba->host->host_lock);
+       phba->work_hba_events &= ~work_hba_events;
+       spin_unlock_irq(phba->host->host_lock);
+
+       for (i = 0; i < phba->sli.num_rings; i++, ha_copy >>= 4) {
+               pring = &phba->sli.ring[i];
+               if ((ha_copy & HA_RXATT)
+                   || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
+                       if (pring->flag & LPFC_STOP_IOCB_MASK) {
+                               pring->flag |= LPFC_DEFERRED_RING_EVENT;
+                       } else {
+                               lpfc_sli_handle_slow_ring_event(phba, pring,
+                                                               (ha_copy &
+                                                                HA_RXMASK));
+                               pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
+                       }
+                       /*
+                        * Turn on Ring interrupts
+                        */
+                       spin_lock_irq(phba->host->host_lock);
+                       control = readl(phba->HCregaddr);
+                       control |= (HC_R0INT_ENA << i);
+                       writel(control, phba->HCregaddr);
+                       readl(phba->HCregaddr); /* flush */
+                       spin_unlock_irq(phba->host->host_lock);
+               }
+       }
+
+       lpfc_work_list_done (phba);
+
+}
+
+static int
+check_work_wait_done(struct lpfc_hba *phba) {
+
+       spin_lock_irq(phba->host->host_lock);
+       if (phba->work_ha ||
+           phba->work_hba_events ||
+           (!list_empty(&phba->work_list)) ||
+           kthread_should_stop()) {
+               spin_unlock_irq(phba->host->host_lock);
+               return 1;
+       } else {
+               spin_unlock_irq(phba->host->host_lock);
+               return 0;
+       }
+}
+
+int
+lpfc_do_work(void *p)
+{
+       struct lpfc_hba *phba = p;
+       int rc;
+       DECLARE_WAIT_QUEUE_HEAD(work_waitq);
+
+       set_user_nice(current, -20);
+       phba->work_wait = &work_waitq;
+
+       while (1) {
+
+               rc = wait_event_interruptible(work_waitq,
+                                               check_work_wait_done(phba));
+               BUG_ON(rc);
+
+               if (kthread_should_stop())
+                       break;
+
+               lpfc_work_done(phba);
+
+       }
+       phba->work_wait = NULL;
+       return 0;
+}
+
+/*
+ * This is only called to handle FC worker events. Since this a rare
+ * occurance, we allocate a struct lpfc_work_evt structure here instead of
+ * embedding it in the IOCB.
+ */
+int
+lpfc_workq_post_event(struct lpfc_hba * phba, void *arg1, void *arg2,
+                     uint32_t evt)
+{
+       struct lpfc_work_evt  *evtp;
+
+       /*
+        * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
+        * be queued to worker thread for processing
+        */
+       evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_KERNEL);
+       if (!evtp)
+               return 0;
+
+       evtp->evt_arg1  = arg1;
+       evtp->evt_arg2  = arg2;
+       evtp->evt       = evt;
+
+       list_add_tail(&evtp->evt_listp, &phba->work_list);
+       spin_lock_irq(phba->host->host_lock);
+       if (phba->work_wait)
+               wake_up(phba->work_wait);
+       spin_unlock_irq(phba->host->host_lock);
+
+       return 1;
+}
+
+int
+lpfc_linkdown(struct lpfc_hba * phba)
+{
+       struct lpfc_sli       *psli;
+       struct lpfc_nodelist  *ndlp, *next_ndlp;
+       struct list_head *listp;
+       struct list_head *node_list[7];
+       LPFC_MBOXQ_t     *mb;
+       int               rc, i;
+
+       psli = &phba->sli;
+
+       spin_lock_irq(phba->host->host_lock);
+       phba->hba_state = LPFC_LINK_DOWN;
+       spin_unlock_irq(phba->host->host_lock);
+
+       /* Clean up any firmware default rpi's */
+       if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
+               lpfc_unreg_did(phba, 0xffffffff, mb);
+               mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
+               if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
+                   == MBX_NOT_FINISHED) {
+                       mempool_free( mb, phba->mbox_mem_pool);
+               }
+       }
+
+       /* Cleanup any outstanding RSCN activity */
+       lpfc_els_flush_rscn(phba);
+
+       /* Cleanup any outstanding ELS commands */
+       lpfc_els_flush_cmd(phba);
+
+       /* Issue a LINK DOWN event to all nodes */
+       node_list[0] = &phba->fc_npr_list;  /* MUST do this list first */
+       node_list[1] = &phba->fc_nlpmap_list;
+       node_list[2] = &phba->fc_nlpunmap_list;
+       node_list[3] = &phba->fc_prli_list;
+       node_list[4] = &phba->fc_reglogin_list;
+       node_list[5] = &phba->fc_adisc_list;
+       node_list[6] = &phba->fc_plogi_list;
+       for (i = 0; i < 7; i++) {
+               listp = node_list[i];
+               if (list_empty(listp))
+                       continue;
+
+               list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
+                       /* Fabric nodes are not handled thru state machine for
+                          link down */
+                       if (ndlp->nlp_type & NLP_FABRIC) {
+                               /* Remove ALL Fabric nodes except Fabric_DID */
+                               if (ndlp->nlp_DID != Fabric_DID) {
+                                       /* Take it off current list and free */
+                                       lpfc_nlp_list(phba, ndlp,
+                                               NLP_NO_LIST);
+                               }
+                       }
+                       else {
+
+                               rc = lpfc_disc_state_machine(phba, ndlp, NULL,
+                                                    NLP_EVT_DEVICE_RECOVERY);
+
+                               /* Check config parameter use-adisc or FCP-2 */
+                               if ((rc != NLP_STE_FREED_NODE) &&
+                                       (phba->cfg_use_adisc == 0) &&
+                                       !(ndlp->nlp_fcp_info &
+                                               NLP_FCP_2_DEVICE)) {
+                                       /* We know we will have to relogin, so
+                                        * unreglogin the rpi right now to fail
+                                        * any outstanding I/Os quickly.
+                                        */
+                                       lpfc_unreg_rpi(phba, ndlp);
+                               }
+                       }
+               }
+       }
+
+       /* free any ndlp's on unused list */
+       list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
+                               nlp_listp) {
+               lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+       }
+
+       /* Setup myDID for link up if we are in pt2pt mode */
+       if (phba->fc_flag & FC_PT2PT) {
+               phba->fc_myDID = 0;
+               if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
+                       lpfc_config_link(phba, mb);
+                       mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
+                       if (lpfc_sli_issue_mbox
+                           (phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
+                           == MBX_NOT_FINISHED) {
+                               mempool_free( mb, phba->mbox_mem_pool);
+                       }
+               }
+               spin_lock_irq(phba->host->host_lock);
+               phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
+               spin_unlock_irq(phba->host->host_lock);
+       }
+       spin_lock_irq(phba->host->host_lock);
+       phba->fc_flag &= ~FC_LBIT;
+       spin_unlock_irq(phba->host->host_lock);
+
+       /* Turn off discovery timer if its running */
+       lpfc_can_disctmo(phba);
+
+       /* Must process IOCBs on all rings to handle ABORTed I/Os */
+       return (0);
+}
+
+static int
+lpfc_linkup(struct lpfc_hba * phba)
+{
+       struct lpfc_nodelist *ndlp, *next_ndlp;
+
+       spin_lock_irq(phba->host->host_lock);
+       phba->hba_state = LPFC_LINK_UP;
+       phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
+                          FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
+       phba->fc_flag |= FC_NDISC_ACTIVE;
+       phba->fc_ns_retry = 0;
+       spin_unlock_irq(phba->host->host_lock);
+
+
+       /*
+        * Clean up old Fabric NLP_FABRIC logins.
+        */
+       list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpunmap_list,
+                               nlp_listp) {
+               if (ndlp->nlp_DID == Fabric_DID) {
+                       /* Take it off current list and free */
+                       lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+               }
+       }
+
+       /* free any ndlp's on unused list */
+       list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
+                               nlp_listp) {
+               lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+       }
+
+       return 0;
+}
+
+/*
+ * This routine handles processing a CLEAR_LA mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is
+ * handed off to the SLI layer.
+ */
+void
+lpfc_mbx_cmpl_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+       struct lpfc_sli *psli;
+       MAILBOX_t *mb;
+       uint32_t control;
+
+       psli = &phba->sli;
+       mb = &pmb->mb;
+       /* Since we don't do discovery right now, turn these off here */
+       psli->ring[psli->ip_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
+       psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
+       psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
+
+       /* Check for error */
+       if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
+               /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
+               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+                               "%d:0320 CLEAR_LA mbxStatus error x%x hba "
+                               "state x%x\n",
+                               phba->brd_no, mb->mbxStatus, phba->hba_state);
+
+               phba->hba_state = LPFC_HBA_ERROR;
+               goto out;
+       }
+
+       if (phba->fc_flag & FC_ABORT_DISCOVERY)
+               goto out;
+
+       phba->num_disc_nodes = 0;
+       /* go thru NPR list and issue ELS PLOGIs */
+       if (phba->fc_npr_cnt) {
+               lpfc_els_disc_plogi(phba);
+       }
+
+       if(!phba->num_disc_nodes) {
+               spin_lock_irq(phba->host->host_lock);
+               phba->fc_flag &= ~FC_NDISC_ACTIVE;
+               spin_unlock_irq(phba->host->host_lock);
+       }
+
+       phba->hba_state = LPFC_HBA_READY;
+
+out:
+       /* Device Discovery completes */
+       lpfc_printf_log(phba,
+                        KERN_INFO,
+                        LOG_DISCOVERY,
+                        "%d:0225 Device Discovery completes\n",
+                        phba->brd_no);
+
+       mempool_free( pmb, phba->mbox_mem_pool);
+
+       spin_lock_irq(phba->host->host_lock);
+       phba->fc_flag &= ~FC_ABORT_DISCOVERY;
+       if (phba->fc_flag & FC_ESTABLISH_LINK) {
+               phba->fc_flag &= ~FC_ESTABLISH_LINK;
+       }
+       spin_unlock_irq(phba->host->host_lock);
+
+       del_timer_sync(&phba->fc_estabtmo);
+
+       lpfc_can_disctmo(phba);
+
+       /* turn on Link Attention interrupts */
+       spin_lock_irq(phba->host->host_lock);
+       psli->sli_flag |= LPFC_PROCESS_LA;
+       control = readl(phba->HCregaddr);
+       control |= HC_LAINT_ENA;
+       writel(control, phba->HCregaddr);
+       readl(phba->HCregaddr); /* flush */
+       spin_unlock_irq(phba->host->host_lock);
+
+       return;
+}
+
+static void
+lpfc_mbx_cmpl_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+       struct lpfc_sli *psli;
+       MAILBOX_t *mb;
+
+       psli = &phba->sli;
+       mb = &pmb->mb;
+       /* Check for error */
+       if (mb->mbxStatus) {
+               /* CONFIG_LINK mbox error <mbxStatus> state <hba_state> */
+               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+                               "%d:0306 CONFIG_LINK mbxStatus error x%x "
+                               "HBA state x%x\n",
+                               phba->brd_no, mb->mbxStatus, phba->hba_state);
+
+               lpfc_linkdown(phba);
+               phba->hba_state = LPFC_HBA_ERROR;
+               goto out;
+       }
+
+       if (phba->hba_state == LPFC_LOCAL_CFG_LINK) {
+               if (phba->fc_topology == TOPOLOGY_LOOP) {
+                       /* If we are public loop and L bit was set */
+                       if ((phba->fc_flag & FC_PUBLIC_LOOP) &&
+                           !(phba->fc_flag & FC_LBIT)) {
+                               /* Need to wait for FAN - use discovery timer
+                                * for timeout.  hba_state is identically
+                                * LPFC_LOCAL_CFG_LINK while waiting for FAN
+                                */
+                               lpfc_set_disctmo(phba);
+                               mempool_free( pmb, phba->mbox_mem_pool);
+                               return;
+                       }
+               }
+
+               /* Start discovery by sending a FLOGI hba_state is identically
+                * LPFC_FLOGI while waiting for FLOGI cmpl
+                */
+               phba->hba_state = LPFC_FLOGI;
+               lpfc_set_disctmo(phba);
+               lpfc_initial_flogi(phba);
+               mempool_free( pmb, phba->mbox_mem_pool);
+               return;
+       }
+       if (phba->hba_state == LPFC_FABRIC_CFG_LINK) {
+               mempool_free( pmb, phba->mbox_mem_pool);
+               return;
+       }
+
+out:
+       /* CONFIG_LINK bad hba state <hba_state> */
+       lpfc_printf_log(phba,
+                       KERN_ERR,
+                       LOG_DISCOVERY,
+                       "%d:0200 CONFIG_LINK bad hba state x%x\n",
+                       phba->brd_no, phba->hba_state);
+
+       if (phba->hba_state != LPFC_CLEAR_LA) {
+               lpfc_clear_la(phba, pmb);
+               pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
+               if (lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB))
+                   == MBX_NOT_FINISHED) {
+                       mempool_free( pmb, phba->mbox_mem_pool);
+                       lpfc_disc_flush_list(phba);
+                       psli->ring[(psli->ip_ring)].flag &=
+                               ~LPFC_STOP_IOCB_EVENT;
+                       psli->ring[(psli->fcp_ring)].flag &=
+                               ~LPFC_STOP_IOCB_EVENT;
+                       psli->ring[(psli->next_ring)].flag &=
+                               ~LPFC_STOP_IOCB_EVENT;
+                       phba->hba_state = LPFC_HBA_READY;
+               }
+       } else {
+               mempool_free( pmb, phba->mbox_mem_pool);
+       }
+       return;
+}
+
+static void
+lpfc_mbx_cmpl_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+       struct lpfc_sli *psli = &phba->sli;
+       MAILBOX_t *mb = &pmb->mb;
+       struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
+
+
+       /* Check for error */
+       if (mb->mbxStatus) {
+               /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
+               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+                               "%d:0319 READ_SPARAM mbxStatus error x%x "
+                               "hba state x%x>\n",
+                               phba->brd_no, mb->mbxStatus, phba->hba_state);
+
+               lpfc_linkdown(phba);
+               phba->hba_state = LPFC_HBA_ERROR;
+               goto out;
+       }
+
+       memcpy((uint8_t *) & phba->fc_sparam, (uint8_t *) mp->virt,
+              sizeof (struct serv_parm));
+       memcpy((uint8_t *) & phba->fc_nodename,
+              (uint8_t *) & phba->fc_sparam.nodeName,
+              sizeof (struct lpfc_name));
+       memcpy((uint8_t *) & phba->fc_portname,
+              (uint8_t *) & phba->fc_sparam.portName,
+              sizeof (struct lpfc_name));
+       lpfc_mbuf_free(phba, mp->virt, mp->phys);
+       kfree(mp);
+       mempool_free( pmb, phba->mbox_mem_pool);
+       return;
+
+out:
+       pmb->context1 = NULL;
+       lpfc_mbuf_free(phba, mp->virt, mp->phys);
+       kfree(mp);
+       if (phba->hba_state != LPFC_CLEAR_LA) {
+               lpfc_clear_la(phba, pmb);
+               pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
+               if (lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB))
+                   == MBX_NOT_FINISHED) {
+                       mempool_free( pmb, phba->mbox_mem_pool);
+                       lpfc_disc_flush_list(phba);
+                       psli->ring[(psli->ip_ring)].flag &=
+                           ~LPFC_STOP_IOCB_EVENT;
+                       psli->ring[(psli->fcp_ring)].flag &=
+                           ~LPFC_STOP_IOCB_EVENT;
+                       psli->ring[(psli->next_ring)].flag &=
+                           ~LPFC_STOP_IOCB_EVENT;
+                       phba->hba_state = LPFC_HBA_READY;
+               }
+       } else {
+               mempool_free( pmb, phba->mbox_mem_pool);
+       }
+       return;
+}
+
+static void
+lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
+{
+       int i;
+       LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
+       sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+
+       spin_lock_irq(phba->host->host_lock);
+       switch(la->UlnkSpeed) {
+               case LA_1GHZ_LINK:
+                       phba->fc_linkspeed = LA_1GHZ_LINK;
+                       break;
+               case LA_2GHZ_LINK:
+                       phba->fc_linkspeed = LA_2GHZ_LINK;
+                       break;
+               case LA_4GHZ_LINK:
+                       phba->fc_linkspeed = LA_4GHZ_LINK;
+                       break;
+               default:
+                       phba->fc_linkspeed = LA_UNKNW_LINK;
+                       break;
+       }
+
+       phba->fc_topology = la->topology;
+
+       if (phba->fc_topology == TOPOLOGY_LOOP) {
+       /* Get Loop Map information */
+
+               if (la->il)
+                       phba->fc_flag |= FC_LBIT;
+
+               phba->fc_myDID = la->granted_AL_PA;
+               i = la->un.lilpBde64.tus.f.bdeSize;
+
+               if (i == 0) {
+                       phba->alpa_map[0] = 0;
+               } else {
+                       if (phba->cfg_log_verbose & LOG_LINK_EVENT) {
+                               int numalpa, j, k;
+                               union {
+                                       uint8_t pamap[16];
+                                       struct {
+                                               uint32_t wd1;
+                                               uint32_t wd2;
+                                               uint32_t wd3;
+                                               uint32_t wd4;
+                                       } pa;
+                               } un;
+                               numalpa = phba->alpa_map[0];
+                               j = 0;
+                               while (j < numalpa) {
+                                       memset(un.pamap, 0, 16);
+                                       for (k = 1; j < numalpa; k++) {
+                                               un.pamap[k - 1] =
+                                                       phba->alpa_map[j + 1];
+                                               j++;
+                                               if (k == 16)
+                                                       break;
+                                       }
+                                       /* Link Up Event ALPA map */
+                                       lpfc_printf_log(phba,
+                                               KERN_WARNING,
+                                               LOG_LINK_EVENT,
+                                               "%d:1304 Link Up Event "
+                                               "ALPA map Data: x%x "
+                                               "x%x x%x x%x\n",
+                                               phba->brd_no,
+                                               un.pa.wd1, un.pa.wd2,
+                                               un.pa.wd3, un.pa.wd4);
+                               }
+                       }
+               }
+       } else {
+               phba->fc_myDID = phba->fc_pref_DID;
+               phba->fc_flag |= FC_LBIT;
+       }
+       spin_unlock_irq(phba->host->host_lock);
+
+       lpfc_linkup(phba);
+       if (sparam_mbox) {
+               lpfc_read_sparam(phba, sparam_mbox);
+               sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
+               lpfc_sli_issue_mbox(phba, sparam_mbox,
+                                               (MBX_NOWAIT | MBX_STOP_IOCB));
+       }
+
+       if (cfglink_mbox) {
+               phba->hba_state = LPFC_LOCAL_CFG_LINK;
+               lpfc_config_link(phba, cfglink_mbox);
+               cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_config_link;
+               lpfc_sli_issue_mbox(phba, cfglink_mbox,
+                                               (MBX_NOWAIT | MBX_STOP_IOCB));
+       }
+}
+
+static void
+lpfc_mbx_issue_link_down(struct lpfc_hba *phba) {
+       uint32_t control;
+       struct lpfc_sli *psli = &phba->sli;
+
+       lpfc_linkdown(phba);
+
+       /* turn on Link Attention interrupts - no CLEAR_LA needed */
+       spin_lock_irq(phba->host->host_lock);
+       psli->sli_flag |= LPFC_PROCESS_LA;
+       control = readl(phba->HCregaddr);
+       control |= HC_LAINT_ENA;
+       writel(control, phba->HCregaddr);
+       readl(phba->HCregaddr); /* flush */
+       spin_unlock_irq(phba->host->host_lock);
+}
+
+/*
+ * This routine handles processing a READ_LA mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is
+ * handed off to the SLI layer.
+ */
+void
+lpfc_mbx_cmpl_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+       READ_LA_VAR *la;
+       MAILBOX_t *mb = &pmb->mb;
+       struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
+
+       /* Check for error */
+       if (mb->mbxStatus) {
+               lpfc_printf_log(phba,
+                               KERN_INFO,
+                               LOG_LINK_EVENT,
+                               "%d:1307 READ_LA mbox error x%x state x%x\n",
+                               phba->brd_no,
+                               mb->mbxStatus, phba->hba_state);
+               lpfc_mbx_issue_link_down(phba);
+               phba->hba_state = LPFC_HBA_ERROR;
+               goto lpfc_mbx_cmpl_read_la_free_mbuf;
+       }
+
+       la = (READ_LA_VAR *) & pmb->mb.un.varReadLA;
+
+       memcpy(&phba->alpa_map[0], mp->virt, 128);
+
+       if (((phba->fc_eventTag + 1) < la->eventTag) ||
+            (phba->fc_eventTag == la->eventTag)) {
+               phba->fc_stat.LinkMultiEvent++;
+               if (la->attType == AT_LINK_UP) {
+                       if (phba->fc_eventTag != 0)
+                               lpfc_linkdown(phba);
+               }
+       }
+
+       phba->fc_eventTag = la->eventTag;
+
+       if (la->attType == AT_LINK_UP) {
+               phba->fc_stat.LinkUp++;
+               lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+                               "%d:1303 Link Up Event x%x received "
+                               "Data: x%x x%x x%x x%x\n",
+                               phba->brd_no, la->eventTag, phba->fc_eventTag,
+                               la->granted_AL_PA, la->UlnkSpeed,
+                               phba->alpa_map[0]);
+               lpfc_mbx_process_link_up(phba, la);
+       } else {
+               phba->fc_stat.LinkDown++;
+               lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+                               "%d:1305 Link Down Event x%x received "
+                               "Data: x%x x%x x%x\n",
+                               phba->brd_no, la->eventTag, phba->fc_eventTag,
+                               phba->hba_state, phba->fc_flag);
+               lpfc_mbx_issue_link_down(phba);
+       }
+
+lpfc_mbx_cmpl_read_la_free_mbuf:
+       lpfc_mbuf_free(phba, mp->virt, mp->phys);
+       kfree(mp);
+       mempool_free(pmb, phba->mbox_mem_pool);
+       return;
+}
+
+/*
+ * This routine handles processing a REG_LOGIN mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is
+ * handed off to the SLI layer.
+ */
+void
+lpfc_mbx_cmpl_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+       struct lpfc_sli *psli;
+       MAILBOX_t *mb;
+       struct lpfc_dmabuf *mp;
+       struct lpfc_nodelist *ndlp;
+
+       psli = &phba->sli;
+       mb = &pmb->mb;
+
+       ndlp = (struct lpfc_nodelist *) pmb->context2;
+       mp = (struct lpfc_dmabuf *) (pmb->context1);
+
+       pmb->context1 = NULL;
+
+       /* Good status, call state machine */
+       lpfc_disc_state_machine(phba, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
+       lpfc_mbuf_free(phba, mp->virt, mp->phys);
+       kfree(mp);
+       mempool_free( pmb, phba->mbox_mem_pool);
+
+       return;
+}
+
+/*
+ * This routine handles processing a Fabric REG_LOGIN mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is
+ * handed off to the SLI layer.
+ */
+void
+lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+       struct lpfc_sli *psli;
+       MAILBOX_t *mb;
+       struct lpfc_dmabuf *mp;
+       struct lpfc_nodelist *ndlp;
+       struct lpfc_nodelist *ndlp_fdmi;
+
+
+       psli = &phba->sli;
+       mb = &pmb->mb;
+
+       ndlp = (struct lpfc_nodelist *) pmb->context2;
+       mp = (struct lpfc_dmabuf *) (pmb->context1);
+
+       if (mb->mbxStatus) {
+               lpfc_mbuf_free(phba, mp->virt, mp->phys);
+               kfree(mp);
+               mempool_free( pmb, phba->mbox_mem_pool);
+               mempool_free( ndlp, phba->nlp_mem_pool);
+
+               /* FLOGI failed, so just use loop map to make discovery list */
+               lpfc_disc_list_loopmap(phba);
+
+               /* Start discovery */
+               lpfc_disc_start(phba);
+               return;
+       }
+
+       pmb->context1 = NULL;
+
+       if (ndlp->nlp_rpi != 0)
+               lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
+       ndlp->nlp_rpi = mb->un.varWords[0];
+       lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
+       ndlp->nlp_type |= NLP_FABRIC;
+       ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
+       lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
+
+       if (phba->hba_state == LPFC_FABRIC_CFG_LINK) {
+               /* This NPort has been assigned an NPort_ID by the fabric as a
+                * result of the completed fabric login.  Issue a State Change
+                * Registration (SCR) ELS request to the fabric controller
+                * (SCR_DID) so that this NPort gets RSCN events from the
+                * fabric.
+                */
+               lpfc_issue_els_scr(phba, SCR_DID, 0);
+
+               /* Allocate a new node instance.  If the pool is empty, just
+                * start the discovery process and skip the Nameserver login
+                * process.  This is attempted again later on.  Otherwise, issue
+                * a Port Login (PLOGI) to the NameServer
+                */
+               if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL))
+                   == 0) {
+                       lpfc_disc_start(phba);
+               } else {
+                       lpfc_nlp_init(phba, ndlp, NameServer_DID);
+                       ndlp->nlp_type |= NLP_FABRIC;
+                       ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
+                       lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
+                       lpfc_issue_els_plogi(phba, ndlp, 0);
+                       if (phba->cfg_fdmi_on) {
+                               if ((ndlp_fdmi = mempool_alloc(
+                                                      phba->nlp_mem_pool,
+                                                      GFP_KERNEL))) {
+                                       lpfc_nlp_init(phba, ndlp_fdmi,
+                                               FDMI_DID);
+                                       ndlp_fdmi->nlp_type |= NLP_FABRIC;
+                                       ndlp_fdmi->nlp_state =
+                                           NLP_STE_PLOGI_ISSUE;
+                                       lpfc_issue_els_plogi(phba, ndlp_fdmi,
+                                                            0);
+                               }
+                       }
+               }
+       }
+
+       lpfc_mbuf_free(phba, mp->virt, mp->phys);
+       kfree(mp);
+       mempool_free( pmb, phba->mbox_mem_pool);
+
+       return;
+}
+
+/*
+ * This routine handles processing a NameServer REG_LOGIN mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is
+ * handed off to the SLI layer.
+ */
+void
+lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+       struct lpfc_sli *psli;
+       MAILBOX_t *mb;
+       struct lpfc_dmabuf *mp;
+       struct lpfc_nodelist *ndlp;
+
+       psli = &phba->sli;
+       mb = &pmb->mb;
+
+       ndlp = (struct lpfc_nodelist *) pmb->context2;
+       mp = (struct lpfc_dmabuf *) (pmb->context1);
+
+       if (mb->mbxStatus) {
+               lpfc_mbuf_free(phba, mp->virt, mp->phys);
+               kfree(mp);
+               mempool_free( pmb, phba->mbox_mem_pool);
+               lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+
+               /* RegLogin failed, so just use loop map to make discovery
+                  list */
+               lpfc_disc_list_loopmap(phba);
+
+               /* Start discovery */
+               lpfc_disc_start(phba);
+               return;
+       }
+
+       pmb->context1 = NULL;
+
+       if (ndlp->nlp_rpi != 0)
+               lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
+       ndlp->nlp_rpi = mb->un.varWords[0];
+       lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
+       ndlp->nlp_type |= NLP_FABRIC;
+       ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
+       lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
+
+       if (phba->hba_state < LPFC_HBA_READY) {
+               /* Link up discovery requires Fabrib registration. */
+               lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RNN_ID);
+               lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RSNN_NN);
+               lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFT_ID);
+       }
+
+       phba->fc_ns_retry = 0;
+       /* Good status, issue CT Request to NameServer */
+       if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT)) {
+               /* Cannot issue NameServer Query, so finish up discovery */
+               lpfc_disc_start(phba);
+       }
+
+       lpfc_mbuf_free(phba, mp->virt, mp->phys);
+       kfree(mp);
+       mempool_free( pmb, phba->mbox_mem_pool);
+
+       return;
+}
+
+static void
+lpfc_register_remote_port(struct lpfc_hba * phba,
+                           struct lpfc_nodelist * ndlp)
+{
+       struct fc_rport *rport;
+       struct lpfc_rport_data *rdata;
+       struct fc_rport_identifiers rport_ids;
+       uint64_t wwn;
+
+       /* Remote port has reappeared. Re-register w/ FC transport */
+       memcpy(&wwn, &ndlp->nlp_nodename, sizeof(uint64_t));
+       rport_ids.node_name = be64_to_cpu(wwn);
+       memcpy(&wwn, &ndlp->nlp_portname, sizeof(uint64_t));
+       rport_ids.port_name = be64_to_cpu(wwn);
+       rport_ids.port_id = ndlp->nlp_DID;
+       rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
+       if (ndlp->nlp_type & NLP_FCP_TARGET)
+               rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
+       if (ndlp->nlp_type & NLP_FCP_INITIATOR)
+               rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+
+       ndlp->rport = rport = fc_remote_port_add(phba->host, 0, &rport_ids);
+       if (!rport) {
+               dev_printk(KERN_WARNING, &phba->pcidev->dev,
+                          "Warning: fc_remote_port_add failed\n");
+               return;
+       }
+
+       /* initialize static port data */
+       rport->maxframe_size = ndlp->nlp_maxframe;
+       rport->supported_classes = ndlp->nlp_class_sup;
+       if ((rport->scsi_target_id != -1) &&
+               (rport->scsi_target_id < MAX_FCP_TARGET)) {
+               ndlp->nlp_sid = rport->scsi_target_id;
+       }
+       rdata = rport->dd_data;
+       rdata->pnode = ndlp;
+
+       return;
+}
+
+int
+lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
+{
+       enum { none, unmapped, mapped } rport_add = none, rport_del = none;
+       struct lpfc_sli      *psli;
+
+       psli = &phba->sli;
+       /* Sanity check to ensure we are not moving to / from the same list */
+       if ((nlp->nlp_flag & NLP_LIST_MASK) == list) {
+               if (list != NLP_NO_LIST)
+                       return(0);
+       }
+
+       switch(nlp->nlp_flag & NLP_LIST_MASK) {
+       case NLP_NO_LIST: /* Not on any list */
+               break;
+       case NLP_UNUSED_LIST:
+               phba->fc_unused_cnt--;
+               list_del(&nlp->nlp_listp);
+               break;
+       case NLP_PLOGI_LIST:
+               phba->fc_plogi_cnt--;
+               list_del(&nlp->nlp_listp);
+               break;
+       case NLP_ADISC_LIST:
+               phba->fc_adisc_cnt--;
+               list_del(&nlp->nlp_listp);
+               break;
+       case NLP_REGLOGIN_LIST:
+               phba->fc_reglogin_cnt--;
+               list_del(&nlp->nlp_listp);
+               break;
+       case NLP_PRLI_LIST:
+               phba->fc_prli_cnt--;
+               list_del(&nlp->nlp_listp);
+               break;
+       case NLP_UNMAPPED_LIST:
+               phba->fc_unmap_cnt--;
+               list_del(&nlp->nlp_listp);
+               spin_lock_irq(phba->host->host_lock);
+               nlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
+               nlp->nlp_type &= ~NLP_FC_NODE;
+               spin_unlock_irq(phba->host->host_lock);
+               phba->nport_event_cnt++;
+               if (nlp->rport)
+                       rport_del = unmapped;
+               break;
+       case NLP_MAPPED_LIST:
+               phba->fc_map_cnt--;
+               list_del(&nlp->nlp_listp);
+               phba->nport_event_cnt++;
+               if (nlp->rport)
+                       rport_del = mapped;
+               break;
+       case NLP_NPR_LIST:
+               phba->fc_npr_cnt--;
+               list_del(&nlp->nlp_listp);
+               /* Stop delay tmo if taking node off NPR list */
+               if ((nlp->nlp_flag & NLP_DELAY_TMO) &&
+                  (list != NLP_NPR_LIST)) {
+                       spin_lock_irq(phba->host->host_lock);
+                       nlp->nlp_flag &= ~NLP_DELAY_TMO;
+                       spin_unlock_irq(phba->host->host_lock);
+                       del_timer_sync(&nlp->nlp_delayfunc);
+                       if (!list_empty(&nlp->els_retry_evt.evt_listp))
+                               list_del_init(&nlp->els_retry_evt.evt_listp);
+               }
+               break;
+       }
+
+       spin_lock_irq(phba->host->host_lock);
+       nlp->nlp_flag &= ~NLP_LIST_MASK;
+       spin_unlock_irq(phba->host->host_lock);
+
+       /* Add NPort <did> to <num> list */
+       lpfc_printf_log(phba,
+                       KERN_INFO,
+                       LOG_NODE,
+                       "%d:0904 Add NPort x%x to %d list Data: x%x\n",
+                       phba->brd_no,
+                       nlp->nlp_DID, list, nlp->nlp_flag);
+
+       switch(list) {
+       case NLP_NO_LIST: /* No list, just remove it */
+               lpfc_nlp_remove(phba, nlp);
+               break;
+       case NLP_UNUSED_LIST:
+               spin_lock_irq(phba->host->host_lock);
+               nlp->nlp_flag |= list;
+               spin_unlock_irq(phba->host->host_lock);
+               /* Put it at the end of the unused list */
+               list_add_tail(&nlp->nlp_listp, &phba->fc_unused_list);
+               phba->fc_unused_cnt++;
+               break;
+       case NLP_PLOGI_LIST:
+               spin_lock_irq(phba->host->host_lock);
+               nlp->nlp_flag |= list;
+               spin_unlock_irq(phba->host->host_lock);
+               /* Put it at the end of the plogi list */
+               list_add_tail(&nlp->nlp_listp, &phba->fc_plogi_list);
+               phba->fc_plogi_cnt++;
+               break;
+       case NLP_ADISC_LIST:
+               spin_lock_irq(phba->host->host_lock);
+               nlp->nlp_flag |= list;
+               spin_unlock_irq(phba->host->host_lock);
+               /* Put it at the end of the adisc list */
+               list_add_tail(&nlp->nlp_listp, &phba->fc_adisc_list);
+               phba->fc_adisc_cnt++;
+               break;
+       case NLP_REGLOGIN_LIST:
+               spin_lock_irq(phba->host->host_lock);
+               nlp->nlp_flag |= list;
+               spin_unlock_irq(phba->host->host_lock);
+               /* Put it at the end of the reglogin list */
+               list_add_tail(&nlp->nlp_listp, &phba->fc_reglogin_list);
+               phba->fc_reglogin_cnt++;
+               break;
+       case NLP_PRLI_LIST:
+               spin_lock_irq(phba->host->host_lock);
+               nlp->nlp_flag |= list;
+               spin_unlock_irq(phba->host->host_lock);
+               /* Put it at the end of the prli list */
+               list_add_tail(&nlp->nlp_listp, &phba->fc_prli_list);
+               phba->fc_prli_cnt++;
+               break;
+       case NLP_UNMAPPED_LIST:
+               rport_add = unmapped;
+               /* ensure all vestiges of "mapped" significance are gone */
+               nlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
+               spin_lock_irq(phba->host->host_lock);
+               nlp->nlp_flag |= list;
+               spin_unlock_irq(phba->host->host_lock);
+               /* Put it at the end of the unmap list */
+               list_add_tail(&nlp->nlp_listp, &phba->fc_nlpunmap_list);
+               phba->fc_unmap_cnt++;
+               phba->nport_event_cnt++;
+               /* stop nodev tmo if running */
+               if (nlp->nlp_flag & NLP_NODEV_TMO) {
+                       spin_lock_irq(phba->host->host_lock);
+                       nlp->nlp_flag &= ~NLP_NODEV_TMO;
+                       spin_unlock_irq(phba->host->host_lock);
+                       del_timer_sync(&nlp->nlp_tmofunc);
+                       if (!list_empty(&nlp->nodev_timeout_evt.evt_listp))
+                               list_del_init(&nlp->nodev_timeout_evt.
+                                               evt_listp);
+
+               }
+               nlp->nlp_type |= NLP_FC_NODE;
+               break;
+       case NLP_MAPPED_LIST:
+               rport_add = mapped;
+               spin_lock_irq(phba->host->host_lock);
+               nlp->nlp_flag |= list;
+               spin_unlock_irq(phba->host->host_lock);
+               /* Put it at the end of the map list */
+               list_add_tail(&nlp->nlp_listp, &phba->fc_nlpmap_list);
+               phba->fc_map_cnt++;
+               phba->nport_event_cnt++;
+               /* stop nodev tmo if running */
+               if (nlp->nlp_flag & NLP_NODEV_TMO) {
+                       nlp->nlp_flag &= ~NLP_NODEV_TMO;
+                       del_timer_sync(&nlp->nlp_tmofunc);
+                       if (!list_empty(&nlp->nodev_timeout_evt.evt_listp))
+                               list_del_init(&nlp->nodev_timeout_evt.
+                                               evt_listp);
+
+               }
+               break;
+       case NLP_NPR_LIST:
+               spin_lock_irq(phba->host->host_lock);
+               nlp->nlp_flag |= list;
+               spin_unlock_irq(phba->host->host_lock);
+               /* Put it at the end of the npr list */
+               list_add_tail(&nlp->nlp_listp, &phba->fc_npr_list);
+               phba->fc_npr_cnt++;
+
+               /*
+                * Sanity check for Fabric entity.
+                * Set nodev_tmo for NPR state, for Fabric use 1 sec.
+                */
+               if (nlp->nlp_type & NLP_FABRIC) {
+                       mod_timer(&nlp->nlp_tmofunc, jiffies + HZ);
+               }
+               else {
+                       mod_timer(&nlp->nlp_tmofunc,
+                           jiffies + HZ * phba->cfg_nodev_tmo);
+               }
+               spin_lock_irq(phba->host->host_lock);
+               nlp->nlp_flag |= NLP_NODEV_TMO;
+               nlp->nlp_flag &= ~NLP_RCV_PLOGI;
+               spin_unlock_irq(phba->host->host_lock);
+               break;
+       case NLP_JUST_DQ:
+               break;
+       }
+
+       /*
+        * We make all the calls into the transport after we have
+        * moved the node between lists. This so that we don't
+        * release the lock while in-between lists.
+        */
+
+       /* Don't upcall midlayer if we're unloading */
+       if (!(phba->fc_flag & FC_UNLOADING)) {
+               /*
+                * We revalidate the rport pointer as the "add" function
+                * may have removed the remote port.
+                */
+               if ((rport_del != none) && nlp->rport)
+                       fc_remote_port_block(nlp->rport);
+
+               if (rport_add != none) {
+                       /*
+                        * Tell the fc transport about the port, if we haven't
+                        * already. If we have, and it's a scsi entity, be
+                        * sure to unblock any attached scsi devices
+                        */
+                       if (!nlp->rport)
+                               lpfc_register_remote_port(phba, nlp);
+                       else
+                               fc_remote_port_unblock(nlp->rport);
+
+                       /*
+                        * if we added to Mapped list, but the remote port
+                        * registration failed or assigned a target id outside
+                        * our presentable range - move the node to the
+                        * Unmapped List
+                        */
+                       if ((rport_add == mapped) &&
+                           ((!nlp->rport) ||
+                            (nlp->rport->scsi_target_id == -1) ||
+                            (nlp->rport->scsi_target_id >= MAX_FCP_TARGET))) {
+                               nlp->nlp_state = NLP_STE_UNMAPPED_NODE;
+                               spin_lock_irq(phba->host->host_lock);
+                               nlp->nlp_flag |= NLP_TGT_NO_SCSIID;
+                               spin_unlock_irq(phba->host->host_lock);
+                               lpfc_nlp_list(phba, nlp, NLP_UNMAPPED_LIST);
+                       }
+               }
+       }
+       return (0);
+}
+
+/*
+ * Start / ReStart rescue timer for Discovery / RSCN handling
+ */
+void
+lpfc_set_disctmo(struct lpfc_hba * phba)
+{
+       uint32_t tmo;
+
+       tmo = ((phba->fc_ratov * 2) + 1);
+
+       mod_timer(&phba->fc_disctmo, jiffies + HZ * tmo);
+       spin_lock_irq(phba->host->host_lock);
+       phba->fc_flag |= FC_DISC_TMO;
+       spin_unlock_irq(phba->host->host_lock);
+
+       /* Start Discovery Timer state <hba_state> */
+       lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+                       "%d:0247 Start Discovery Timer state x%x "
+                       "Data: x%x x%lx x%x x%x\n",
+                       phba->brd_no,
+                       phba->hba_state, tmo, (unsigned long)&phba->fc_disctmo,
+                       phba->fc_plogi_cnt, phba->fc_adisc_cnt);
+
+       return;
+}
+
+/*
+ * Cancel rescue timer for Discovery / RSCN handling
+ */
+int
+lpfc_can_disctmo(struct lpfc_hba * phba)
+{
+       /* Turn off discovery timer if its running */
+       if (phba->fc_flag & FC_DISC_TMO) {
+               spin_lock_irq(phba->host->host_lock);
+               phba->fc_flag &= ~FC_DISC_TMO;
+               spin_unlock_irq(phba->host->host_lock);
+               del_timer_sync(&phba->fc_disctmo);
+               phba->work_hba_events &= ~WORKER_DISC_TMO;
+       }
+
+       /* Cancel Discovery Timer state <hba_state> */
+       lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+                       "%d:0248 Cancel Discovery Timer state x%x "
+                       "Data: x%x x%x x%x\n",
+                       phba->brd_no, phba->hba_state, phba->fc_flag,
+                       phba->fc_plogi_cnt, phba->fc_adisc_cnt);
+
+       return (0);
+}
+
+/*
+ * Check specified ring for outstanding IOCB on the SLI queue
+ * Return true if iocb matches the specified nport
+ */
+int
+lpfc_check_sli_ndlp(struct lpfc_hba * phba,
+                   struct lpfc_sli_ring * pring,
+                   struct lpfc_iocbq * iocb, struct lpfc_nodelist * ndlp)
+{
+       struct lpfc_sli *psli;
+       IOCB_t *icmd;
+
+       psli = &phba->sli;
+       icmd = &iocb->iocb;
+       if (pring->ringno == LPFC_ELS_RING) {
+               switch (icmd->ulpCommand) {
+               case CMD_GEN_REQUEST64_CR:
+                       if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi)
+                               return (1);
+               case CMD_ELS_REQUEST64_CR:
+               case CMD_XMIT_ELS_RSP64_CX:
+                       if (iocb->context1 == (uint8_t *) ndlp)
+                               return (1);
+               }
+       } else if (pring->ringno == psli->ip_ring) {
+
+       } else if (pring->ringno == psli->fcp_ring) {
+               /* Skip match check if waiting to relogin to FCP target */
+               if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
+                 (ndlp->nlp_flag & NLP_DELAY_TMO)) {
+                       return (0);
+               }
+               if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
+                       return (1);
+               }
+       } else if (pring->ringno == psli->next_ring) {
+
+       }
+       return (0);
+}
+
+/*
+ * Free resources / clean up outstanding I/Os
+ * associated with nlp_rpi in the LPFC_NODELIST entry.
+ */
+static int
+lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
+{
+       struct lpfc_sli *psli;
+       struct lpfc_sli_ring *pring;
+       struct lpfc_iocbq *iocb, *next_iocb;
+       IOCB_t *icmd;
+       uint32_t rpi, i;
+
+       /*
+        * Everything that matches on txcmplq will be returned
+        * by firmware with a no rpi error.
+        */
+       psli = &phba->sli;
+       rpi = ndlp->nlp_rpi;
+       if (rpi) {
+               /* Now process each ring */
+               for (i = 0; i < psli->num_rings; i++) {
+                       pring = &psli->ring[i];
+
+                       spin_lock_irq(phba->host->host_lock);
+                       list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
+                                               list) {
+                               /*
+                                * Check to see if iocb matches the nport we are
+                                * looking for
+                                */
+                               if ((lpfc_check_sli_ndlp
+                                    (phba, pring, iocb, ndlp))) {
+                                       /* It matches, so deque and call compl
+                                          with an error */
+                                       list_del(&iocb->list);
+                                       pring->txq_cnt--;
+                                       if (iocb->iocb_cmpl) {
+                                               icmd = &iocb->iocb;
+                                               icmd->ulpStatus =
+                                                   IOSTAT_LOCAL_REJECT;
+                                               icmd->un.ulpWord[4] =
+                                                   IOERR_SLI_ABORTED;
+                                               spin_unlock_irq(phba->host->
+                                                               host_lock);
+                                               (iocb->iocb_cmpl) (phba,
+                                                                  iocb, iocb);
+                                               spin_lock_irq(phba->host->
+                                                             host_lock);
+                                       } else {
+                                               list_add_tail(&iocb->list,
+                                                       &phba->lpfc_iocb_list);
+                                       }
+                               }
+                       }
+                       spin_unlock_irq(phba->host->host_lock);
+
+               }
+       }
+       return (0);
+}
+
+/*
+ * Free rpi associated with LPFC_NODELIST entry.
+ * This routine is called from lpfc_freenode(), when we are removing
+ * a LPFC_NODELIST entry. It is also called if the driver initiates a
+ * LOGO that completes successfully, and we are waiting to PLOGI back
+ * to the remote NPort. In addition, it is called after we receive
+ * and unsolicated ELS cmd, send back a rsp, the rsp completes and
+ * we are waiting to PLOGI back to the remote NPort.
+ */
+int
+lpfc_unreg_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
+{
+       LPFC_MBOXQ_t *mbox;
+       int rc;
+
+       if (ndlp->nlp_rpi) {
+               if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
+                       lpfc_unreg_login(phba, ndlp->nlp_rpi, mbox);
+                       mbox->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
+                       rc = lpfc_sli_issue_mbox
+                                   (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
+                       if (rc == MBX_NOT_FINISHED)
+                               mempool_free( mbox, phba->mbox_mem_pool);
+               }
+               lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
+               lpfc_no_rpi(phba, ndlp);
+               ndlp->nlp_rpi = 0;
+               return 1;
+       }
+       return 0;
+}
+
+/*
+ * Free resources associated with LPFC_NODELIST entry
+ * so it can be freed.
+ */
+static int
+lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
+{
+       LPFC_MBOXQ_t       *mb;
+       LPFC_MBOXQ_t       *nextmb;
+       struct lpfc_dmabuf *mp;
+       struct fc_rport *rport;
+
+       /* Cleanup node for NPort <nlp_DID> */
+       lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
+                       "%d:0900 Cleanup node for NPort x%x "
+                       "Data: x%x x%x x%x\n",
+                       phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
+                       ndlp->nlp_state, ndlp->nlp_rpi);
+
+       lpfc_nlp_list(phba, ndlp, NLP_JUST_DQ);
+
+       /*
+        * if unloading the driver - just leave the remote port in place.
+        * The driver unload will force the attached devices to detach
+        * and flush cache's w/o generating flush errors.
+        */
+       if ((ndlp->rport) && !(phba->fc_flag & FC_UNLOADING)) {
+               rport = ndlp->rport;
+               ndlp->rport = NULL;
+               fc_remote_port_unblock(rport);
+               fc_remote_port_delete(rport);
+               ndlp->nlp_sid = NLP_NO_SID;
+       }
+
+       /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
+       if ((mb = phba->sli.mbox_active)) {
+               if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
+                  (ndlp == (struct lpfc_nodelist *) mb->context2)) {
+                       mb->context2 = NULL;
+                       mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+               }
+       }
+       list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
+               if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
+                  (ndlp == (struct lpfc_nodelist *) mb->context2)) {
+                       mp = (struct lpfc_dmabuf *) (mb->context1);
+                       if (mp) {
+                               lpfc_mbuf_free(phba, mp->virt, mp->phys);
+                               kfree(mp);
+                       }
+                       list_del(&mb->list);
+                       mempool_free(mb, phba->mbox_mem_pool);
+               }
+       }
+
+       lpfc_els_abort(phba,ndlp,0);
+       spin_lock_irq(phba->host->host_lock);
+       ndlp->nlp_flag &= ~(NLP_NODEV_TMO|NLP_DELAY_TMO);
+       spin_unlock_irq(phba->host->host_lock);
+       del_timer_sync(&ndlp->nlp_tmofunc);
+
+       del_timer_sync(&ndlp->nlp_delayfunc);
+
+       if (!list_empty(&ndlp->nodev_timeout_evt.evt_listp))
+               list_del_init(&ndlp->nodev_timeout_evt.evt_listp);
+       if (!list_empty(&ndlp->els_retry_evt.evt_listp))
+               list_del_init(&ndlp->els_retry_evt.evt_listp);
+
+       lpfc_unreg_rpi(phba, ndlp);
+
+       return (0);
+}
+
+/*
+ * Check to see if we can free the nlp back to the freelist.
+ * If we are in the middle of using the nlp in the discovery state
+ * machine, defer the free till we reach the end of the state machine.
+ */
+int
+lpfc_nlp_remove(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
+{
+       if (ndlp->nlp_flag & NLP_NODEV_TMO) {
+               spin_lock_irq(phba->host->host_lock);
+               ndlp->nlp_flag &= ~NLP_NODEV_TMO;
+               spin_unlock_irq(phba->host->host_lock);
+               del_timer_sync(&ndlp->nlp_tmofunc);
+               if (!list_empty(&ndlp->nodev_timeout_evt.evt_listp))
+                       list_del_init(&ndlp->nodev_timeout_evt.evt_listp);
+
+       }
+
+
+       if (ndlp->nlp_flag & NLP_DELAY_TMO) {
+               spin_lock_irq(phba->host->host_lock);
+               ndlp->nlp_flag &= ~NLP_DELAY_TMO;
+               spin_unlock_irq(phba->host->host_lock);
+               del_timer_sync(&ndlp->nlp_delayfunc);
+               if (!list_empty(&ndlp->els_retry_evt.evt_listp))
+                       list_del_init(&ndlp->els_retry_evt.evt_listp);
+       }
+
+       if (ndlp->nlp_disc_refcnt) {
+               spin_lock_irq(phba->host->host_lock);
+               ndlp->nlp_flag |= NLP_DELAY_REMOVE;
+               spin_unlock_irq(phba->host->host_lock);
+       }
+       else {
+               lpfc_freenode(phba, ndlp);
+               mempool_free( ndlp, phba->nlp_mem_pool);
+       }
+       return(0);
+}
+
+static int
+lpfc_matchdid(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, uint32_t did)
+{
+       D_ID mydid;
+       D_ID ndlpdid;
+       D_ID matchdid;
+
+       if (did == Bcast_DID)
+               return (0);
+
+       if (ndlp->nlp_DID == 0) {
+               return (0);
+       }
+
+       /* First check for Direct match */
+       if (ndlp->nlp_DID == did)
+               return (1);
+
+       /* Next check for area/domain identically equals 0 match */
+       mydid.un.word = phba->fc_myDID;
+       if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
+               return (0);
+       }
+
+       matchdid.un.word = did;
+       ndlpdid.un.word = ndlp->nlp_DID;
+       if (matchdid.un.b.id == ndlpdid.un.b.id) {
+               if ((mydid.un.b.domain == matchdid.un.b.domain) &&
+                   (mydid.un.b.area == matchdid.un.b.area)) {
+                       if ((ndlpdid.un.b.domain == 0) &&
+                           (ndlpdid.un.b.area == 0)) {
+                               if (ndlpdid.un.b.id)
+                                       return (1);
+                       }
+                       return (0);
+               }
+
+               matchdid.un.word = ndlp->nlp_DID;
+               if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
+                   (mydid.un.b.area == ndlpdid.un.b.area)) {
+                       if ((matchdid.un.b.domain == 0) &&
+                           (matchdid.un.b.area == 0)) {
+                               if (matchdid.un.b.id)
+                                       return (1);
+                       }
+               }
+       }
+       return (0);
+}
+
+/* Search for a nodelist entry on a specific list */
+struct lpfc_nodelist *
+lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
+{
+       struct lpfc_nodelist *ndlp, *next_ndlp;
+       uint32_t data1;
+
+       if (order & NLP_SEARCH_UNMAPPED) {
+               list_for_each_entry_safe(ndlp, next_ndlp,
+                                        &phba->fc_nlpunmap_list, nlp_listp) {
+                       if (lpfc_matchdid(phba, ndlp, did)) {
+                               data1 = (((uint32_t) ndlp->nlp_state << 24) |
+                                        ((uint32_t) ndlp->nlp_xri << 16) |
+                                        ((uint32_t) ndlp->nlp_type << 8) |
+                                        ((uint32_t) ndlp->nlp_rpi & 0xff));
+                               /* FIND node DID unmapped */
+                               lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
+                                               "%d:0929 FIND node DID unmapped"
+                                               " Data: x%p x%x x%x x%x\n",
+                                               phba->brd_no,
+                                               ndlp, ndlp->nlp_DID,
+                                               ndlp->nlp_flag, data1);
+                               return (ndlp);
+                       }
+               }
+       }
+
+       if (order & NLP_SEARCH_MAPPED) {
+               list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpmap_list,
+                                       nlp_listp) {
+                       if (lpfc_matchdid(phba, ndlp, did)) {
+
+                               data1 = (((uint32_t) ndlp->nlp_state << 24) |
+                                        ((uint32_t) ndlp->nlp_xri << 16) |
+                                        ((uint32_t) ndlp->nlp_type << 8) |
+                                        ((uint32_t) ndlp->nlp_rpi & 0xff));
+                               /* FIND node DID mapped */
+                               lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
+                                               "%d:0930 FIND node DID mapped "
+                                               "Data: x%p x%x x%x x%x\n",
+                                               phba->brd_no,
+                                               ndlp, ndlp->nlp_DID,
+                                               ndlp->nlp_flag, data1);
+                               return (ndlp);
+                       }
+               }
+       }
+
+       if (order & NLP_SEARCH_PLOGI) {
+               list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
+                                       nlp_listp) {
+                       if (lpfc_matchdid(phba, ndlp, did)) {
+
+                               data1 = (((uint32_t) ndlp->nlp_state << 24) |
+                                        ((uint32_t) ndlp->nlp_xri << 16) |
+                                        ((uint32_t) ndlp->nlp_type << 8) |
+                                        ((uint32_t) ndlp->nlp_rpi & 0xff));
+                               /* LOG change to PLOGI */
+                               /* FIND node DID plogi */
+                               lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
+                                               "%d:0908 FIND node DID plogi "
+                                               "Data: x%p x%x x%x x%x\n",
+                                               phba->brd_no,
+                                               ndlp, ndlp->nlp_DID,
+                                               ndlp->nlp_flag, data1);
+                               return (ndlp);
+                       }
+               }
+       }
+
+       if (order & NLP_SEARCH_ADISC) {
+               list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
+                                       nlp_listp) {
+                       if (lpfc_matchdid(phba, ndlp, did)) {
+
+                               data1 = (((uint32_t) ndlp->nlp_state << 24) |
+                                        ((uint32_t) ndlp->nlp_xri << 16) |
+                                        ((uint32_t) ndlp->nlp_type << 8) |
+                                        ((uint32_t) ndlp->nlp_rpi & 0xff));
+                               /* LOG change to ADISC */
+                               /* FIND node DID adisc */
+                               lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
+                                               "%d:0931 FIND node DID adisc "
+                                               "Data: x%p x%x x%x x%x\n",
+                                               phba->brd_no,
+                                               ndlp, ndlp->nlp_DID,
+                                               ndlp->nlp_flag, data1);
+                               return (ndlp);
+                       }
+               }
+       }
+
+       if (order & NLP_SEARCH_REGLOGIN) {
+               list_for_each_entry_safe(ndlp, next_ndlp,
+                                        &phba->fc_reglogin_list, nlp_listp) {
+                       if (lpfc_matchdid(phba, ndlp, did)) {
+
+                               data1 = (((uint32_t) ndlp->nlp_state << 24) |
+                                        ((uint32_t) ndlp->nlp_xri << 16) |
+                                        ((uint32_t) ndlp->nlp_type << 8) |
+                                        ((uint32_t) ndlp->nlp_rpi & 0xff));
+                               /* LOG change to REGLOGIN */
+                               /* FIND node DID reglogin */
+                               lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
+                                               "%d:0931 FIND node DID reglogin"
+                                               " Data: x%p x%x x%x x%x\n",
+                                               phba->brd_no,
+                                               ndlp, ndlp->nlp_DID,
+                                               ndlp->nlp_flag, data1);
+                               return (ndlp);
+                       }
+               }
+       }
+
+       if (order & NLP_SEARCH_PRLI) {
+               list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_prli_list,
+                                       nlp_listp) {
+                       if (lpfc_matchdid(phba, ndlp, did)) {
+
+                               data1 = (((uint32_t) ndlp->nlp_state << 24) |
+                                        ((uint32_t) ndlp->nlp_xri << 16) |
+                                        ((uint32_t) ndlp->nlp_type << 8) |
+                                        ((uint32_t) ndlp->nlp_rpi & 0xff));
+                               /* LOG change to PRLI */
+                               /* FIND node DID prli */
+                               lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
+                                               "%d:0931 FIND node DID prli "
+                                               "Data: x%p x%x x%x x%x\n",
+                                               phba->brd_no,
+                                               ndlp, ndlp->nlp_DID,
+                                               ndlp->nlp_flag, data1);
+                               return (ndlp);
+                       }
+               }
+       }
+
+       if (order & NLP_SEARCH_NPR) {
+               list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
+                                       nlp_listp) {
+                       if (lpfc_matchdid(phba, ndlp, did)) {
+
+                               data1 = (((uint32_t) ndlp->nlp_state << 24) |
+                                        ((uint32_t) ndlp->nlp_xri << 16) |
+                                        ((uint32_t) ndlp->nlp_type << 8) |
+                                        ((uint32_t) ndlp->nlp_rpi & 0xff));
+                               /* LOG change to NPR */
+                               /* FIND node DID npr */
+                               lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
+                                               "%d:0931 FIND node DID npr "
+                                               "Data: x%p x%x x%x x%x\n",
+                                               phba->brd_no,
+                                               ndlp, ndlp->nlp_DID,
+                                               ndlp->nlp_flag, data1);
+                               return (ndlp);
+                       }
+               }
+       }
+
+       if (order & NLP_SEARCH_UNUSED) {
+               list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
+                                       nlp_listp) {
+                       if (lpfc_matchdid(phba, ndlp, did)) {
+
+                               data1 = (((uint32_t) ndlp->nlp_state << 24) |
+                                        ((uint32_t) ndlp->nlp_xri << 16) |
+                                        ((uint32_t) ndlp->nlp_type << 8) |
+                                        ((uint32_t) ndlp->nlp_rpi & 0xff));
+                               /* LOG change to UNUSED */
+                               /* FIND node DID unused */
+                               lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
+                                               "%d:0931 FIND node DID unused "
+                                               "Data: x%p x%x x%x x%x\n",
+                                               phba->brd_no,
+                                               ndlp, ndlp->nlp_DID,
+                                               ndlp->nlp_flag, data1);
+                               return (ndlp);
+                       }
+               }
+       }
+
+       /* FIND node did <did> NOT FOUND */
+       lpfc_printf_log(phba,
+                       KERN_INFO,
+                       LOG_NODE,
+                       "%d:0932 FIND node did x%x NOT FOUND Data: x%x\n",
+                       phba->brd_no, did, order);
+
+       /* no match found */
+       return NULL;
+}
+
+struct lpfc_nodelist *
+lpfc_setup_disc_node(struct lpfc_hba * phba, uint32_t did)
+{
+       struct lpfc_nodelist *ndlp;
+       uint32_t flg;
+
+       if ((ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, did)) == 0) {
+               if ((phba->hba_state == LPFC_HBA_READY) &&
+                  ((lpfc_rscn_payload_check(phba, did) == 0)))
+                       return NULL;
+               ndlp = (struct lpfc_nodelist *)
+                    mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+               if (!ndlp)
+                       return NULL;
+               lpfc_nlp_init(phba, ndlp, did);
+               ndlp->nlp_state = NLP_STE_NPR_NODE;
+               lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+               ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+               return ndlp;
+       }
+       if ((phba->hba_state == LPFC_HBA_READY) &&
+           (phba->fc_flag & FC_RSCN_MODE)) {
+               if (lpfc_rscn_payload_check(phba, did)) {
+                       ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+               }
+               else {
+                       ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+                       ndlp = NULL;
+               }
+       }
+       else {
+               flg = ndlp->nlp_flag & NLP_LIST_MASK;
+               if ((flg == NLP_ADISC_LIST) ||
+               (flg == NLP_PLOGI_LIST)) {
+                       return NULL;
+               }
+               ndlp->nlp_state = NLP_STE_NPR_NODE;
+               lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+               ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+       }
+       return ndlp;
+}
+
+/* Build a list of nodes to discover based on the loopmap */
+void
+lpfc_disc_list_loopmap(struct lpfc_hba * phba)
+{
+       int j;
+       uint32_t alpa, index;
+
+       if (phba->hba_state <= LPFC_LINK_DOWN) {
+               return;
+       }
+       if (phba->fc_topology != TOPOLOGY_LOOP) {
+               return;
+       }
+
+       /* Check for loop map present or not */
+       if (phba->alpa_map[0]) {
+               for (j = 1; j <= phba->alpa_map[0]; j++) {
+                       alpa = phba->alpa_map[j];
+
+                       if (((phba->fc_myDID & 0xff) == alpa) || (alpa == 0)) {
+                               continue;
+                       }
+                       lpfc_setup_disc_node(phba, alpa);
+               }
+       } else {
+               /* No alpamap, so try all alpa's */
+               for (j = 0; j < FC_MAXLOOP; j++) {
+                       /* If cfg_scan_down is set, start from highest
+                        * ALPA (0xef) to lowest (0x1).
+                        */
+                       if (phba->cfg_scan_down)
+                               index = j;
+                       else
+                               index = FC_MAXLOOP - j - 1;
+                       alpa = lpfcAlpaArray[index];
+                       if ((phba->fc_myDID & 0xff) == alpa) {
+                               continue;
+                       }
+
+                       lpfc_setup_disc_node(phba, alpa);
+               }
+       }
+       return;
+}
+
+/* Start Link up / RSCN discovery on NPR list */
+void
+lpfc_disc_start(struct lpfc_hba * phba)
+{
+       struct lpfc_sli *psli;
+       LPFC_MBOXQ_t *mbox;
+       struct lpfc_nodelist *ndlp, *next_ndlp;
+       uint32_t did_changed, num_sent;
+       uint32_t clear_la_pending;
+       int rc;
+
+       psli = &phba->sli;
+
+       if (phba->hba_state <= LPFC_LINK_DOWN) {
+               return;
+       }
+       if (phba->hba_state == LPFC_CLEAR_LA)
+               clear_la_pending = 1;
+       else
+               clear_la_pending = 0;
+
+       if (phba->hba_state < LPFC_HBA_READY) {
+               phba->hba_state = LPFC_DISC_AUTH;
+       }
+       lpfc_set_disctmo(phba);
+
+       if (phba->fc_prevDID == phba->fc_myDID) {
+               did_changed = 0;
+       } else {
+               did_changed = 1;
+       }
+       phba->fc_prevDID = phba->fc_myDID;
+       phba->num_disc_nodes = 0;
+
+       /* Start Discovery state <hba_state> */
+       lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+                       "%d:0202 Start Discovery hba state x%x "
+                       "Data: x%x x%x x%x\n",
+                       phba->brd_no, phba->hba_state, phba->fc_flag,
+                       phba->fc_plogi_cnt, phba->fc_adisc_cnt);
+
+       /* If our did changed, we MUST do PLOGI */
+       list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
+                               nlp_listp) {
+               if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+                       if (did_changed) {
+                               spin_lock_irq(phba->host->host_lock);
+                               ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+                               spin_unlock_irq(phba->host->host_lock);
+                       }
+               }
+       }
+
+       /* First do ADISCs - if any */
+       num_sent = lpfc_els_disc_adisc(phba);
+
+       if (num_sent)
+               return;
+
+       if ((phba->hba_state < LPFC_HBA_READY) && (!clear_la_pending)) {
+               /* If we get here, there is nothing to ADISC */
+               if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
+                       phba->hba_state = LPFC_CLEAR_LA;
+                       lpfc_clear_la(phba, mbox);
+                       mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
+                       rc = lpfc_sli_issue_mbox(phba, mbox,
+                                                (MBX_NOWAIT | MBX_STOP_IOCB));
+                       if (rc == MBX_NOT_FINISHED) {
+                               mempool_free( mbox, phba->mbox_mem_pool);
+                               lpfc_disc_flush_list(phba);
+                               psli->ring[(psli->ip_ring)].flag &=
+                                       ~LPFC_STOP_IOCB_EVENT;
+                               psli->ring[(psli->fcp_ring)].flag &=
+                                       ~LPFC_STOP_IOCB_EVENT;
+                               psli->ring[(psli->next_ring)].flag &=
+                                       ~LPFC_STOP_IOCB_EVENT;
+                               phba->hba_state = LPFC_HBA_READY;
+                       }
+               }
+       } else {
+               /* Next do PLOGIs - if any */
+               num_sent = lpfc_els_disc_plogi(phba);
+
+               if (num_sent)
+                       return;
+
+               if (phba->fc_flag & FC_RSCN_MODE) {
+                       /* Check to see if more RSCNs came in while we
+                        * were processing this one.
+                        */
+                       if ((phba->fc_rscn_id_cnt == 0) &&
+                           (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
+                               spin_lock_irq(phba->host->host_lock);
+                               phba->fc_flag &= ~FC_RSCN_MODE;
+                               spin_unlock_irq(phba->host->host_lock);
+                       }
+                       else
+                               lpfc_els_handle_rscn(phba);
+               }
+       }
+       return;
+}
+
+/*
+ *  Ignore completion for all IOCBs on tx and txcmpl queue for ELS
+ *  ring the match the sppecified nodelist.
+ */
+static void
+lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
+{
+       struct lpfc_sli *psli;
+       IOCB_t     *icmd;
+       struct lpfc_iocbq    *iocb, *next_iocb;
+       struct lpfc_sli_ring *pring;
+       struct lpfc_dmabuf   *mp;
+
+       psli = &phba->sli;
+       pring = &psli->ring[LPFC_ELS_RING];
+
+       /* Error matching iocb on txq or txcmplq
+        * First check the txq.
+        */
+       list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
+               if (iocb->context1 != ndlp) {
+                       continue;
+               }
+               icmd = &iocb->iocb;
+               if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
+                   (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
+
+                       list_del(&iocb->list);
+                       pring->txq_cnt--;
+                       lpfc_els_free_iocb(phba, iocb);
+               }
+       }
+
+       /* Next check the txcmplq */
+       list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
+               if (iocb->context1 != ndlp) {
+                       continue;
+               }
+               icmd = &iocb->iocb;
+               if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
+                   (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
+
+                       iocb->iocb_cmpl = NULL;
+                       /* context2 = cmd, context2->next = rsp, context3 =
+                          bpl */
+                       if (iocb->context2) {
+                               /* Free the response IOCB before handling the
+                                  command. */
+
+                               mp = (struct lpfc_dmabuf *) (iocb->context2);
+                               mp = list_get_first(&mp->list,
+                                                   struct lpfc_dmabuf,
+                                                   list);
+                               if (mp) {
+                                       /* Delay before releasing rsp buffer to
+                                        * give UNREG mbox a chance to take
+                                        * effect.
+                                        */
+                                       list_add(&mp->list,
+                                               &phba->freebufList);
+                               }
+                               lpfc_mbuf_free(phba,
+                                              ((struct lpfc_dmabuf *)
+                                               iocb->context2)->virt,
+                                              ((struct lpfc_dmabuf *)
+                                               iocb->context2)->phys);
+                               kfree(iocb->context2);
+                       }
+
+                       if (iocb->context3) {
+                               lpfc_mbuf_free(phba,
+                                              ((struct lpfc_dmabuf *)
+                                               iocb->context3)->virt,
+                                              ((struct lpfc_dmabuf *)
+                                               iocb->context3)->phys);
+                               kfree(iocb->context3);
+                       }
+               }
+       }
+
+       return;
+}
+
+void
+lpfc_disc_flush_list(struct lpfc_hba * phba)
+{
+       struct lpfc_nodelist *ndlp, *next_ndlp;
+
+       if (phba->fc_plogi_cnt) {
+               list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
+                                       nlp_listp) {
+                       lpfc_free_tx(phba, ndlp);
+                       lpfc_nlp_remove(phba, ndlp);
+               }
+       }
+       if (phba->fc_adisc_cnt) {
+               list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
+                                       nlp_listp) {
+                       lpfc_free_tx(phba, ndlp);
+                       lpfc_nlp_remove(phba, ndlp);
+               }
+       }
+       return;
+}
+
+/*****************************************************************************/
+/*
+ * NAME:     lpfc_disc_timeout
+ *
+ * FUNCTION: Fibre Channel driver discovery timeout routine.
+ *
+ * EXECUTION ENVIRONMENT: interrupt only
+ *
+ * CALLED FROM:
+ *      Timer function
+ *
+ * RETURNS:
+ *      none
+ */
+/*****************************************************************************/
+void
+lpfc_disc_timeout(unsigned long ptr)
+{
+       struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
+       unsigned long flags = 0;
+
+       if (unlikely(!phba))
+               return;
+
+       spin_lock_irqsave(phba->host->host_lock, flags);
+       if (!(phba->work_hba_events & WORKER_DISC_TMO)) {
+               phba->work_hba_events |= WORKER_DISC_TMO;
+               if (phba->work_wait)
+                       wake_up(phba->work_wait);
+       }
+       spin_unlock_irqrestore(phba->host->host_lock, flags);
+       return;
+}
+
+static void
+lpfc_disc_timeout_handler(struct lpfc_hba *phba)
+{
+       struct lpfc_sli *psli;
+       struct lpfc_nodelist *ndlp;
+       LPFC_MBOXQ_t *clearlambox, *initlinkmbox;
+       int rc, clrlaerr = 0;
+
+       if (unlikely(!phba))
+               return;
+
+       if (!(phba->fc_flag & FC_DISC_TMO))
+               return;
+
+       psli = &phba->sli;
+
+       spin_lock_irq(phba->host->host_lock);
+       phba->fc_flag &= ~FC_DISC_TMO;
+       spin_unlock_irq(phba->host->host_lock);
+
+       switch (phba->hba_state) {
+
+       case LPFC_LOCAL_CFG_LINK:
+       /* hba_state is identically LPFC_LOCAL_CFG_LINK while waiting for FAN */
+               /* FAN timeout */
+               lpfc_printf_log(phba,
+                                KERN_WARNING,
+                                LOG_DISCOVERY,
+                                "%d:0221 FAN timeout\n",
+                                phba->brd_no);
+
+               /* Forget about FAN, Start discovery by sending a FLOGI
+                * hba_state is identically LPFC_FLOGI while waiting for FLOGI
+                * cmpl
+                */
+               phba->hba_state = LPFC_FLOGI;
+               lpfc_set_disctmo(phba);
+               lpfc_initial_flogi(phba);
+               break;
+
+       case LPFC_FLOGI:
+       /* hba_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
+               /* Initial FLOGI timeout */
+               lpfc_printf_log(phba,
+                                KERN_ERR,
+                                LOG_DISCOVERY,
+                                "%d:0222 Initial FLOGI timeout\n",
+                                phba->brd_no);
+
+               /* Assume no Fabric and go on with discovery.
+                * Check for outstanding ELS FLOGI to abort.
+                */
+
+               /* FLOGI failed, so just use loop map to make discovery list */
+               lpfc_disc_list_loopmap(phba);
+
+               /* Start discovery */
+               lpfc_disc_start(phba);
+               break;
+
+       case LPFC_FABRIC_CFG_LINK:
+       /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
+          NameServer login */
+               lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+                               "%d:0223 Timeout while waiting for NameServer "
+                               "login\n", phba->brd_no);
+
+               /* Next look for NameServer ndlp */
+               ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, NameServer_DID);
+               if (ndlp)
+                       lpfc_nlp_remove(phba, ndlp);
+               /* Start discovery */
+               lpfc_disc_start(phba);
+               break;
+
+       case LPFC_NS_QRY:
+       /* Check for wait for NameServer Rsp timeout */
+               lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+                               "%d:0224 NameServer Query timeout "
+                               "Data: x%x x%x\n",
+                               phba->brd_no,
+                               phba->fc_ns_retry, LPFC_MAX_NS_RETRY);
+
+               ndlp = lpfc_findnode_did(phba, NLP_SEARCH_UNMAPPED,
+                                                               NameServer_DID);
+               if (ndlp) {
+                       if (phba->fc_ns_retry < LPFC_MAX_NS_RETRY) {
+                               /* Try it one more time */
+                               rc = lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT);
+                               if (rc == 0)
+                                       break;
+                       }
+                       phba->fc_ns_retry = 0;
+               }
+
+               /* Nothing to authenticate, so CLEAR_LA right now */
+               clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+               if (!clearlambox) {
+                       clrlaerr = 1;
+                       lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+                                       "%d:0226 Device Discovery "
+                                       "completion error\n",
+                                       phba->brd_no);
+                       phba->hba_state = LPFC_HBA_ERROR;
+                       break;
+               }
+
+               phba->hba_state = LPFC_CLEAR_LA;
+               lpfc_clear_la(phba, clearlambox);
+               clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
+               rc = lpfc_sli_issue_mbox(phba, clearlambox,
+                                        (MBX_NOWAIT | MBX_STOP_IOCB));
+               if (rc == MBX_NOT_FINISHED) {
+                       mempool_free(clearlambox, phba->mbox_mem_pool);
+                       clrlaerr = 1;
+                       break;
+               }
+
+               /* Setup and issue mailbox INITIALIZE LINK command */
+               initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+               if (!initlinkmbox) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+                                       "%d:0226 Device Discovery "
+                                       "completion error\n",
+                                       phba->brd_no);
+                       phba->hba_state = LPFC_HBA_ERROR;
+                       break;
+               }
+
+               lpfc_linkdown(phba);
+               lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
+                              phba->cfg_link_speed);
+               initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
+               rc = lpfc_sli_issue_mbox(phba, initlinkmbox,
+                                        (MBX_NOWAIT | MBX_STOP_IOCB));
+               if (rc == MBX_NOT_FINISHED)
+                       mempool_free(initlinkmbox, phba->mbox_mem_pool);
+
+               break;
+
+       case LPFC_DISC_AUTH:
+       /* Node Authentication timeout */
+               lpfc_printf_log(phba,
+                                KERN_ERR,
+                                LOG_DISCOVERY,
+                                "%d:0227 Node Authentication timeout\n",
+                                phba->brd_no);
+               lpfc_disc_flush_list(phba);
+               clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+               if (!clearlambox) {
+                       clrlaerr = 1;
+                       lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+                                       "%d:0226 Device Discovery "
+                                       "completion error\n",
+                                       phba->brd_no);
+                       phba->hba_state = LPFC_HBA_ERROR;
+                       break;
+               }
+               phba->hba_state = LPFC_CLEAR_LA;
+               lpfc_clear_la(phba, clearlambox);
+               clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
+               rc = lpfc_sli_issue_mbox(phba, clearlambox,
+                                        (MBX_NOWAIT | MBX_STOP_IOCB));
+               if (rc == MBX_NOT_FINISHED) {
+                       mempool_free(clearlambox, phba->mbox_mem_pool);
+                       clrlaerr = 1;
+               }
+               break;
+
+       case LPFC_CLEAR_LA:
+       /* CLEAR LA timeout */
+               lpfc_printf_log(phba,
+                                KERN_ERR,
+                                LOG_DISCOVERY,
+                                "%d:0228 CLEAR LA timeout\n",
+                                phba->brd_no);
+               clrlaerr = 1;
+               break;
+
+       case LPFC_HBA_READY:
+               if (phba->fc_flag & FC_RSCN_MODE) {
+                       lpfc_printf_log(phba,
+                                       KERN_ERR,
+                                       LOG_DISCOVERY,
+                                       "%d:0231 RSCN timeout Data: x%x x%x\n",
+                                       phba->brd_no,
+                                       phba->fc_ns_retry, LPFC_MAX_NS_RETRY);
+
+                       /* Cleanup any outstanding ELS commands */
+                       lpfc_els_flush_cmd(phba);
+
+                       lpfc_els_flush_rscn(phba);
+                       lpfc_disc_flush_list(phba);
+               }
+               break;
+       }
+
+       if (clrlaerr) {
+               lpfc_disc_flush_list(phba);
+               psli->ring[(psli->ip_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
+               psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
+               psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
+               phba->hba_state = LPFC_HBA_READY;
+       }
+
+       return;
+}
+
+static void
+lpfc_nodev_timeout(unsigned long ptr)
+{
+       struct lpfc_hba *phba;
+       struct lpfc_nodelist *ndlp;
+       unsigned long iflag;
+       struct lpfc_work_evt  *evtp;
+
+       ndlp = (struct lpfc_nodelist *)ptr;
+       phba = ndlp->nlp_phba;
+       evtp = &ndlp->nodev_timeout_evt;
+       spin_lock_irqsave(phba->host->host_lock, iflag);
+
+       if (!list_empty(&evtp->evt_listp)) {
+               spin_unlock_irqrestore(phba->host->host_lock, iflag);
+               return;
+       }
+       evtp->evt_arg1  = ndlp;
+       evtp->evt       = LPFC_EVT_NODEV_TMO;
+       list_add_tail(&evtp->evt_listp, &phba->work_list);
+       if (phba->work_wait)
+               wake_up(phba->work_wait);
+
+       spin_unlock_irqrestore(phba->host->host_lock, iflag);
+       return;
+}
+
+
+/*
+ * This routine handles processing a NameServer REG_LOGIN mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is
+ * handed off to the SLI layer.
+ */
+void
+lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+       struct lpfc_sli *psli;
+       MAILBOX_t *mb;
+       struct lpfc_dmabuf *mp;
+       struct lpfc_nodelist *ndlp;
+
+       psli = &phba->sli;
+       mb = &pmb->mb;
+
+       ndlp = (struct lpfc_nodelist *) pmb->context2;
+       mp = (struct lpfc_dmabuf *) (pmb->context1);
+
+       pmb->context1 = NULL;
+
+       if (ndlp->nlp_rpi != 0)
+               lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
+       ndlp->nlp_rpi = mb->un.varWords[0];
+       lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
+       ndlp->nlp_type |= NLP_FABRIC;
+       ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
+       lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
+
+       /* Start issuing Fabric-Device Management Interface (FDMI)
+        * command to 0xfffffa (FDMI well known port)
+        */
+       if (phba->cfg_fdmi_on == 1) {
+               lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DHBA);
+       } else {
+               /*
+                * Delay issuing FDMI command if fdmi-on=2
+                * (supporting RPA/hostnmae)
+                */
+               mod_timer(&phba->fc_fdmitmo, jiffies + HZ * 60);
+       }
+
+       lpfc_mbuf_free(phba, mp->virt, mp->phys);
+       kfree(mp);
+       mempool_free( pmb, phba->mbox_mem_pool);
+
+       return;
+}
+
+/*
+ * This routine looks up the ndlp hash
+ * table for the given RPI. If rpi found
+ * it return the node list pointer
+ * else return 0.
+ */
+struct lpfc_nodelist *
+lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi)
+{
+       struct lpfc_nodelist *ret;
+
+       ret = phba->fc_nlplookup[LPFC_RPI_HASH_FUNC(rpi)];
+       while ((ret != 0) && (ret->nlp_rpi != rpi)) {
+               ret = ret->nlp_rpi_hash_next;
+       }
+       return ret;
+}
+
+/*
+ * This routine looks up the ndlp hash table for the
+ * given RPI. If rpi found it return the node list
+ * pointer else return 0 after deleting the entry
+ * from hash table.
+ */
+struct lpfc_nodelist *
+lpfc_findnode_remove_rpi(struct lpfc_hba * phba, uint16_t rpi)
+{
+       struct lpfc_nodelist *ret, *temp;;
+
+       ret = phba->fc_nlplookup[LPFC_RPI_HASH_FUNC(rpi)];
+       if (ret == 0)
+               return NULL;
+
+       if (ret->nlp_rpi == rpi) {
+               phba->fc_nlplookup[LPFC_RPI_HASH_FUNC(rpi)] =
+                   ret->nlp_rpi_hash_next;
+               ret->nlp_rpi_hash_next = NULL;
+               return ret;
+       }
+
+       while ((ret->nlp_rpi_hash_next != 0) &&
+              (ret->nlp_rpi_hash_next->nlp_rpi != rpi)) {
+               ret = ret->nlp_rpi_hash_next;
+       }
+
+       if (ret->nlp_rpi_hash_next != 0) {
+               temp = ret->nlp_rpi_hash_next;
+               ret->nlp_rpi_hash_next = temp->nlp_rpi_hash_next;
+               temp->nlp_rpi_hash_next = NULL;
+               return temp;
+       } else {
+               return NULL;
+       }
+}
+
+/*
+ * This routine adds the node list entry to the
+ * ndlp hash table.
+ */
+void
+lpfc_addnode_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+                uint16_t rpi)
+{
+
+       uint32_t index;
+
+       index = LPFC_RPI_HASH_FUNC(rpi);
+       ndlp->nlp_rpi_hash_next = phba->fc_nlplookup[index];
+       phba->fc_nlplookup[index] = ndlp;
+       return;
+}
+
+void
+lpfc_nlp_init(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+                uint32_t did)
+{
+       memset(ndlp, 0, sizeof (struct lpfc_nodelist));
+       INIT_LIST_HEAD(&ndlp->nodev_timeout_evt.evt_listp);
+       INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
+       init_timer(&ndlp->nlp_tmofunc);
+       ndlp->nlp_tmofunc.function = lpfc_nodev_timeout;
+       ndlp->nlp_tmofunc.data = (unsigned long)ndlp;
+       init_timer(&ndlp->nlp_delayfunc);
+       ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
+       ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
+       ndlp->nlp_DID = did;
+       ndlp->nlp_phba = phba;
+       ndlp->nlp_sid = NLP_NO_SID;
+       return;
+}
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
new file mode 100644 (file)
index 0000000..fc958a9
--- /dev/null
@@ -0,0 +1,2687 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Enterprise Fibre Channel Host Bus Adapters.                     *
+ * Refer to the README file included with this package for         *
+ * driver version and adapter support.                             *
+ * Copyright (C) 2004 Emulex Corporation.                          *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of the GNU General Public License     *
+ * as published by the Free Software Foundation; either version 2  *
+ * of the License, or (at your option) any later version.          *
+ *                                                                 *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of  *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the   *
+ * GNU General Public License for more details, a copy of which    *
+ * can be found in the file COPYING included with this package.    *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_hw.h 1.37 2005/03/29 19:51:45EST sf_support Exp  $
+ */
+
+#define FDMI_DID        0xfffffaU
+#define NameServer_DID  0xfffffcU
+#define SCR_DID         0xfffffdU
+#define Fabric_DID      0xfffffeU
+#define Bcast_DID       0xffffffU
+#define Mask_DID        0xffffffU
+#define CT_DID_MASK     0xffff00U
+#define Fabric_DID_MASK 0xfff000U
+#define WELL_KNOWN_DID_MASK 0xfffff0U
+
+#define PT2PT_LocalID  1
+#define PT2PT_RemoteID 2
+
+#define FF_DEF_EDTOV          2000     /* Default E_D_TOV (2000ms) */
+#define FF_DEF_ALTOV            15     /* Default AL_TIME (15ms) */
+#define FF_DEF_RATOV             2     /* Default RA_TOV (2s) */
+#define FF_DEF_ARBTOV         1900     /* Default ARB_TOV (1900ms) */
+
+#define LPFC_BUF_RING0        64       /* Number of buffers to post to RING
+                                          0 */
+
+#define FCELSSIZE             1024     /* maximum ELS transfer size */
+
+#define LPFC_FCP_RING            0     /* ring 0 for FCP initiator commands */
+#define LPFC_IP_RING             1     /* ring 1 for IP commands */
+#define LPFC_ELS_RING            2     /* ring 2 for ELS commands */
+#define LPFC_FCP_NEXT_RING       3
+
+#define SLI2_IOCB_CMD_R0_ENTRIES    172        /* SLI-2 FCP command ring entries */
+#define SLI2_IOCB_RSP_R0_ENTRIES    134        /* SLI-2 FCP response ring entries */
+#define SLI2_IOCB_CMD_R1_ENTRIES      4        /* SLI-2 IP command ring entries */
+#define SLI2_IOCB_RSP_R1_ENTRIES      4        /* SLI-2 IP response ring entries */
+#define SLI2_IOCB_CMD_R1XTRA_ENTRIES 36        /* SLI-2 extra FCP cmd ring entries */
+#define SLI2_IOCB_RSP_R1XTRA_ENTRIES 52        /* SLI-2 extra FCP rsp ring entries */
+#define SLI2_IOCB_CMD_R2_ENTRIES     20        /* SLI-2 ELS command ring entries */
+#define SLI2_IOCB_RSP_R2_ENTRIES     20        /* SLI-2 ELS response ring entries */
+#define SLI2_IOCB_CMD_R3_ENTRIES      0
+#define SLI2_IOCB_RSP_R3_ENTRIES      0
+#define SLI2_IOCB_CMD_R3XTRA_ENTRIES 24
+#define SLI2_IOCB_RSP_R3XTRA_ENTRIES 32
+
+/* Common Transport structures and definitions */
+
+union CtRevisionId {
+       /* Structure is in Big Endian format */
+       struct {
+               uint32_t Revision:8;
+               uint32_t InId:24;
+       } bits;
+       uint32_t word;
+};
+
+union CtCommandResponse {
+       /* Structure is in Big Endian format */
+       struct {
+               uint32_t CmdRsp:16;
+               uint32_t Size:16;
+       } bits;
+       uint32_t word;
+};
+
+struct lpfc_sli_ct_request {
+       /* Structure is in Big Endian format */
+       union CtRevisionId RevisionId;
+       uint8_t FsType;
+       uint8_t FsSubType;
+       uint8_t Options;
+       uint8_t Rsrvd1;
+       union CtCommandResponse CommandResponse;
+       uint8_t Rsrvd2;
+       uint8_t ReasonCode;
+       uint8_t Explanation;
+       uint8_t VendorUnique;
+
+       union {
+               uint32_t PortID;
+               struct gid {
+                       uint8_t PortType;       /* for GID_PT requests */
+                       uint8_t DomainScope;
+                       uint8_t AreaScope;
+                       uint8_t Fc4Type;        /* for GID_FT requests */
+               } gid;
+               struct rft {
+                       uint32_t PortId;        /* For RFT_ID requests */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+                       uint32_t rsvd0:16;
+                       uint32_t rsvd1:7;
+                       uint32_t fcpReg:1;      /* Type 8 */
+                       uint32_t rsvd2:2;
+                       uint32_t ipReg:1;       /* Type 5 */
+                       uint32_t rsvd3:5;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+                       uint32_t rsvd0:16;
+                       uint32_t fcpReg:1;      /* Type 8 */
+                       uint32_t rsvd1:7;
+                       uint32_t rsvd3:5;
+                       uint32_t ipReg:1;       /* Type 5 */
+                       uint32_t rsvd2:2;
+#endif
+
+                       uint32_t rsvd[7];
+               } rft;
+               struct rnn {
+                       uint32_t PortId;        /* For RNN_ID requests */
+                       uint8_t wwnn[8];
+               } rnn;
+               struct rsnn {   /* For RSNN_ID requests */
+                       uint8_t wwnn[8];
+                       uint8_t len;
+                       uint8_t symbname[255];
+               } rsnn;
+       } un;
+};
+
+#define  SLI_CT_REVISION        1
+#define  GID_REQUEST_SZ         (sizeof(struct lpfc_sli_ct_request) - 260)
+#define  RFT_REQUEST_SZ         (sizeof(struct lpfc_sli_ct_request) - 228)
+#define  RNN_REQUEST_SZ         (sizeof(struct lpfc_sli_ct_request) - 252)
+#define  RSNN_REQUEST_SZ        (sizeof(struct lpfc_sli_ct_request))
+
+/*
+ * FsType Definitions
+ */
+
+#define  SLI_CT_MANAGEMENT_SERVICE        0xFA
+#define  SLI_CT_TIME_SERVICE              0xFB
+#define  SLI_CT_DIRECTORY_SERVICE         0xFC
+#define  SLI_CT_FABRIC_CONTROLLER_SERVICE 0xFD
+
+/*
+ * Directory Service Subtypes
+ */
+
+#define  SLI_CT_DIRECTORY_NAME_SERVER     0x02
+
+/*
+ * Response Codes
+ */
+
+#define  SLI_CT_RESPONSE_FS_RJT           0x8001
+#define  SLI_CT_RESPONSE_FS_ACC           0x8002
+
+/*
+ * Reason Codes
+ */
+
+#define  SLI_CT_NO_ADDITIONAL_EXPL       0x0
+#define  SLI_CT_INVALID_COMMAND           0x01
+#define  SLI_CT_INVALID_VERSION           0x02
+#define  SLI_CT_LOGICAL_ERROR             0x03
+#define  SLI_CT_INVALID_IU_SIZE           0x04
+#define  SLI_CT_LOGICAL_BUSY              0x05
+#define  SLI_CT_PROTOCOL_ERROR            0x07
+#define  SLI_CT_UNABLE_TO_PERFORM_REQ     0x09
+#define  SLI_CT_REQ_NOT_SUPPORTED         0x0b
+#define  SLI_CT_HBA_INFO_NOT_REGISTERED          0x10
+#define  SLI_CT_MULTIPLE_HBA_ATTR_OF_SAME_TYPE  0x11
+#define  SLI_CT_INVALID_HBA_ATTR_BLOCK_LEN      0x12
+#define  SLI_CT_HBA_ATTR_NOT_PRESENT     0x13
+#define  SLI_CT_PORT_INFO_NOT_REGISTERED  0x20
+#define  SLI_CT_MULTIPLE_PORT_ATTR_OF_SAME_TYPE 0x21
+#define  SLI_CT_INVALID_PORT_ATTR_BLOCK_LEN     0x22
+#define  SLI_CT_VENDOR_UNIQUE             0xff
+
+/*
+ * Name Server SLI_CT_UNABLE_TO_PERFORM_REQ Explanations
+ */
+
+#define  SLI_CT_NO_PORT_ID                0x01
+#define  SLI_CT_NO_PORT_NAME              0x02
+#define  SLI_CT_NO_NODE_NAME              0x03
+#define  SLI_CT_NO_CLASS_OF_SERVICE       0x04
+#define  SLI_CT_NO_IP_ADDRESS             0x05
+#define  SLI_CT_NO_IPA                    0x06
+#define  SLI_CT_NO_FC4_TYPES              0x07
+#define  SLI_CT_NO_SYMBOLIC_PORT_NAME     0x08
+#define  SLI_CT_NO_SYMBOLIC_NODE_NAME     0x09
+#define  SLI_CT_NO_PORT_TYPE              0x0A
+#define  SLI_CT_ACCESS_DENIED             0x10
+#define  SLI_CT_INVALID_PORT_ID           0x11
+#define  SLI_CT_DATABASE_EMPTY            0x12
+
+/*
+ * Name Server Command Codes
+ */
+
+#define  SLI_CTNS_GA_NXT      0x0100
+#define  SLI_CTNS_GPN_ID      0x0112
+#define  SLI_CTNS_GNN_ID      0x0113
+#define  SLI_CTNS_GCS_ID      0x0114
+#define  SLI_CTNS_GFT_ID      0x0117
+#define  SLI_CTNS_GSPN_ID     0x0118
+#define  SLI_CTNS_GPT_ID      0x011A
+#define  SLI_CTNS_GID_PN      0x0121
+#define  SLI_CTNS_GID_NN      0x0131
+#define  SLI_CTNS_GIP_NN      0x0135
+#define  SLI_CTNS_GIPA_NN     0x0136
+#define  SLI_CTNS_GSNN_NN     0x0139
+#define  SLI_CTNS_GNN_IP      0x0153
+#define  SLI_CTNS_GIPA_IP     0x0156
+#define  SLI_CTNS_GID_FT      0x0171
+#define  SLI_CTNS_GID_PT      0x01A1
+#define  SLI_CTNS_RPN_ID      0x0212
+#define  SLI_CTNS_RNN_ID      0x0213
+#define  SLI_CTNS_RCS_ID      0x0214
+#define  SLI_CTNS_RFT_ID      0x0217
+#define  SLI_CTNS_RSPN_ID     0x0218
+#define  SLI_CTNS_RPT_ID      0x021A
+#define  SLI_CTNS_RIP_NN      0x0235
+#define  SLI_CTNS_RIPA_NN     0x0236
+#define  SLI_CTNS_RSNN_NN     0x0239
+#define  SLI_CTNS_DA_ID       0x0300
+
+/*
+ * Port Types
+ */
+
+#define  SLI_CTPT_N_PORT      0x01
+#define  SLI_CTPT_NL_PORT     0x02
+#define  SLI_CTPT_FNL_PORT    0x03
+#define  SLI_CTPT_IP          0x04
+#define  SLI_CTPT_FCP         0x08
+#define  SLI_CTPT_NX_PORT     0x7F
+#define  SLI_CTPT_F_PORT      0x81
+#define  SLI_CTPT_FL_PORT     0x82
+#define  SLI_CTPT_E_PORT      0x84
+
+#define SLI_CT_LAST_ENTRY     0x80000000
+
+/* Fibre Channel Service Parameter definitions */
+
+#define FC_PH_4_0   6          /* FC-PH version 4.0 */
+#define FC_PH_4_1   7          /* FC-PH version 4.1 */
+#define FC_PH_4_2   8          /* FC-PH version 4.2 */
+#define FC_PH_4_3   9          /* FC-PH version 4.3 */
+
+#define FC_PH_LOW   8          /* Lowest supported FC-PH version */
+#define FC_PH_HIGH  9          /* Highest supported FC-PH version */
+#define FC_PH3   0x20          /* FC-PH-3 version */
+
+#define FF_FRAME_SIZE     2048
+
+struct lpfc_name {
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint8_t nameType:4;     /* FC Word 0, bit 28:31 */
+       uint8_t IEEEextMsn:4;   /* FC Word 0, bit 24:27, bit 8:11 of IEEE ext */
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint8_t IEEEextMsn:4;   /* FC Word 0, bit 24:27, bit 8:11 of IEEE ext */
+       uint8_t nameType:4;     /* FC Word 0, bit 28:31 */
+#endif
+
+#define NAME_IEEE           0x1        /* IEEE name - nameType */
+#define NAME_IEEE_EXT       0x2        /* IEEE extended name */
+#define NAME_FC_TYPE        0x3        /* FC native name type */
+#define NAME_IP_TYPE        0x4        /* IP address */
+#define NAME_CCITT_TYPE     0xC
+#define NAME_CCITT_GR_TYPE  0xE
+       uint8_t IEEEextLsb;     /* FC Word 0, bit 16:23, IEEE extended Lsb */
+       uint8_t IEEE[6];        /* FC IEEE address */
+};
+
+struct csp {
+       uint8_t fcphHigh;       /* FC Word 0, byte 0 */
+       uint8_t fcphLow;
+       uint8_t bbCreditMsb;
+       uint8_t bbCreditlsb;    /* FC Word 0, byte 3 */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint16_t increasingOffset:1;    /* FC Word 1, bit 31 */
+       uint16_t randomOffset:1;        /* FC Word 1, bit 30 */
+       uint16_t word1Reserved2:1;      /* FC Word 1, bit 29 */
+       uint16_t fPort:1;       /* FC Word 1, bit 28 */
+       uint16_t altBbCredit:1; /* FC Word 1, bit 27 */
+       uint16_t edtovResolution:1;     /* FC Word 1, bit 26 */
+       uint16_t multicast:1;   /* FC Word 1, bit 25 */
+       uint16_t broadcast:1;   /* FC Word 1, bit 24 */
+
+       uint16_t huntgroup:1;   /* FC Word 1, bit 23 */
+       uint16_t simplex:1;     /* FC Word 1, bit 22 */
+       uint16_t word1Reserved1:3;      /* FC Word 1, bit 21:19 */
+       uint16_t dhd:1;         /* FC Word 1, bit 18 */
+       uint16_t contIncSeqCnt:1;       /* FC Word 1, bit 17 */
+       uint16_t payloadlength:1;       /* FC Word 1, bit 16 */
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint16_t broadcast:1;   /* FC Word 1, bit 24 */
+       uint16_t multicast:1;   /* FC Word 1, bit 25 */
+       uint16_t edtovResolution:1;     /* FC Word 1, bit 26 */
+       uint16_t altBbCredit:1; /* FC Word 1, bit 27 */
+       uint16_t fPort:1;       /* FC Word 1, bit 28 */
+       uint16_t word1Reserved2:1;      /* FC Word 1, bit 29 */
+       uint16_t randomOffset:1;        /* FC Word 1, bit 30 */
+       uint16_t increasingOffset:1;    /* FC Word 1, bit 31 */
+
+       uint16_t payloadlength:1;       /* FC Word 1, bit 16 */
+       uint16_t contIncSeqCnt:1;       /* FC Word 1, bit 17 */
+       uint16_t dhd:1;         /* FC Word 1, bit 18 */
+       uint16_t word1Reserved1:3;      /* FC Word 1, bit 21:19 */
+       uint16_t simplex:1;     /* FC Word 1, bit 22 */
+       uint16_t huntgroup:1;   /* FC Word 1, bit 23 */
+#endif
+
+       uint8_t bbRcvSizeMsb;   /* Upper nibble is reserved */
+       uint8_t bbRcvSizeLsb;   /* FC Word 1, byte 3 */
+       union {
+               struct {
+                       uint8_t word2Reserved1; /* FC Word 2 byte 0 */
+
+                       uint8_t totalConcurrSeq;        /* FC Word 2 byte 1 */
+                       uint8_t roByCategoryMsb;        /* FC Word 2 byte 2 */
+
+                       uint8_t roByCategoryLsb;        /* FC Word 2 byte 3 */
+               } nPort;
+               uint32_t r_a_tov;       /* R_A_TOV must be in B.E. format */
+       } w2;
+
+       uint32_t e_d_tov;       /* E_D_TOV must be in B.E. format */
+};
+
+struct class_parms {
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint8_t classValid:1;   /* FC Word 0, bit 31 */
+       uint8_t intermix:1;     /* FC Word 0, bit 30 */
+       uint8_t stackedXparent:1;       /* FC Word 0, bit 29 */
+       uint8_t stackedLockDown:1;      /* FC Word 0, bit 28 */
+       uint8_t seqDelivery:1;  /* FC Word 0, bit 27 */
+       uint8_t word0Reserved1:3;       /* FC Word 0, bit 24:26 */
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint8_t word0Reserved1:3;       /* FC Word 0, bit 24:26 */
+       uint8_t seqDelivery:1;  /* FC Word 0, bit 27 */
+       uint8_t stackedLockDown:1;      /* FC Word 0, bit 28 */
+       uint8_t stackedXparent:1;       /* FC Word 0, bit 29 */
+       uint8_t intermix:1;     /* FC Word 0, bit 30 */
+       uint8_t classValid:1;   /* FC Word 0, bit 31 */
+
+#endif
+
+       uint8_t word0Reserved2; /* FC Word 0, bit 16:23 */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint8_t iCtlXidReAssgn:2;       /* FC Word 0, Bit 14:15 */
+       uint8_t iCtlInitialPa:2;        /* FC Word 0, bit 12:13 */
+       uint8_t iCtlAck0capable:1;      /* FC Word 0, bit 11 */
+       uint8_t iCtlAckNcapable:1;      /* FC Word 0, bit 10 */
+       uint8_t word0Reserved3:2;       /* FC Word 0, bit  8: 9 */
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint8_t word0Reserved3:2;       /* FC Word 0, bit  8: 9 */
+       uint8_t iCtlAckNcapable:1;      /* FC Word 0, bit 10 */
+       uint8_t iCtlAck0capable:1;      /* FC Word 0, bit 11 */
+       uint8_t iCtlInitialPa:2;        /* FC Word 0, bit 12:13 */
+       uint8_t iCtlXidReAssgn:2;       /* FC Word 0, Bit 14:15 */
+#endif
+
+       uint8_t word0Reserved4; /* FC Word 0, bit  0: 7 */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint8_t rCtlAck0capable:1;      /* FC Word 1, bit 31 */
+       uint8_t rCtlAckNcapable:1;      /* FC Word 1, bit 30 */
+       uint8_t rCtlXidInterlck:1;      /* FC Word 1, bit 29 */
+       uint8_t rCtlErrorPolicy:2;      /* FC Word 1, bit 27:28 */
+       uint8_t word1Reserved1:1;       /* FC Word 1, bit 26 */
+       uint8_t rCtlCatPerSeq:2;        /* FC Word 1, bit 24:25 */
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint8_t rCtlCatPerSeq:2;        /* FC Word 1, bit 24:25 */
+       uint8_t word1Reserved1:1;       /* FC Word 1, bit 26 */
+       uint8_t rCtlErrorPolicy:2;      /* FC Word 1, bit 27:28 */
+       uint8_t rCtlXidInterlck:1;      /* FC Word 1, bit 29 */
+       uint8_t rCtlAckNcapable:1;      /* FC Word 1, bit 30 */
+       uint8_t rCtlAck0capable:1;      /* FC Word 1, bit 31 */
+#endif
+
+       uint8_t word1Reserved2; /* FC Word 1, bit 16:23 */
+       uint8_t rcvDataSizeMsb; /* FC Word 1, bit  8:15 */
+       uint8_t rcvDataSizeLsb; /* FC Word 1, bit  0: 7 */
+
+       uint8_t concurrentSeqMsb;       /* FC Word 2, bit 24:31 */
+       uint8_t concurrentSeqLsb;       /* FC Word 2, bit 16:23 */
+       uint8_t EeCreditSeqMsb; /* FC Word 2, bit  8:15 */
+       uint8_t EeCreditSeqLsb; /* FC Word 2, bit  0: 7 */
+
+       uint8_t openSeqPerXchgMsb;      /* FC Word 3, bit 24:31 */
+       uint8_t openSeqPerXchgLsb;      /* FC Word 3, bit 16:23 */
+       uint8_t word3Reserved1; /* Fc Word 3, bit  8:15 */
+       uint8_t word3Reserved2; /* Fc Word 3, bit  0: 7 */
+};
+
+struct serv_parm {     /* Structure is in Big Endian format */
+       struct csp cmn;
+       struct lpfc_name portName;
+       struct lpfc_name nodeName;
+       struct class_parms cls1;
+       struct class_parms cls2;
+       struct class_parms cls3;
+       struct class_parms cls4;
+       uint8_t vendorVersion[16];
+};
+
+/*
+ *  Extended Link Service LS_COMMAND codes (Payload Word 0)
+ */
+#ifdef __BIG_ENDIAN_BITFIELD
+#define ELS_CMD_MASK      0xffff0000
+#define ELS_RSP_MASK      0xff000000
+#define ELS_CMD_LS_RJT    0x01000000
+#define ELS_CMD_ACC       0x02000000
+#define ELS_CMD_PLOGI     0x03000000
+#define ELS_CMD_FLOGI     0x04000000
+#define ELS_CMD_LOGO      0x05000000
+#define ELS_CMD_ABTX      0x06000000
+#define ELS_CMD_RCS       0x07000000
+#define ELS_CMD_RES       0x08000000
+#define ELS_CMD_RSS       0x09000000
+#define ELS_CMD_RSI       0x0A000000
+#define ELS_CMD_ESTS      0x0B000000
+#define ELS_CMD_ESTC      0x0C000000
+#define ELS_CMD_ADVC      0x0D000000
+#define ELS_CMD_RTV       0x0E000000
+#define ELS_CMD_RLS       0x0F000000
+#define ELS_CMD_ECHO      0x10000000
+#define ELS_CMD_TEST      0x11000000
+#define ELS_CMD_RRQ       0x12000000
+#define ELS_CMD_PRLI      0x20100014
+#define ELS_CMD_PRLO      0x21100014
+#define ELS_CMD_PDISC     0x50000000
+#define ELS_CMD_FDISC     0x51000000
+#define ELS_CMD_ADISC     0x52000000
+#define ELS_CMD_FARP      0x54000000
+#define ELS_CMD_FARPR     0x55000000
+#define ELS_CMD_FAN       0x60000000
+#define ELS_CMD_RSCN      0x61040000
+#define ELS_CMD_SCR       0x62000000
+#define ELS_CMD_RNID      0x78000000
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+#define ELS_CMD_MASK      0xffff
+#define ELS_RSP_MASK      0xff
+#define ELS_CMD_LS_RJT    0x01
+#define ELS_CMD_ACC       0x02
+#define ELS_CMD_PLOGI     0x03
+#define ELS_CMD_FLOGI     0x04
+#define ELS_CMD_LOGO      0x05
+#define ELS_CMD_ABTX      0x06
+#define ELS_CMD_RCS       0x07
+#define ELS_CMD_RES       0x08
+#define ELS_CMD_RSS       0x09
+#define ELS_CMD_RSI       0x0A
+#define ELS_CMD_ESTS      0x0B
+#define ELS_CMD_ESTC      0x0C
+#define ELS_CMD_ADVC      0x0D
+#define ELS_CMD_RTV       0x0E
+#define ELS_CMD_RLS       0x0F
+#define ELS_CMD_ECHO      0x10
+#define ELS_CMD_TEST      0x11
+#define ELS_CMD_RRQ       0x12
+#define ELS_CMD_PRLI      0x14001020
+#define ELS_CMD_PRLO      0x14001021
+#define ELS_CMD_PDISC     0x50
+#define ELS_CMD_FDISC     0x51
+#define ELS_CMD_ADISC     0x52
+#define ELS_CMD_FARP      0x54
+#define ELS_CMD_FARPR     0x55
+#define ELS_CMD_FAN       0x60
+#define ELS_CMD_RSCN      0x0461
+#define ELS_CMD_SCR       0x62
+#define ELS_CMD_RNID      0x78
+#endif
+
+/*
+ *  LS_RJT Payload Definition
+ */
+
+struct ls_rjt {        /* Structure is in Big Endian format */
+       union {
+               uint32_t lsRjtError;
+               struct {
+                       uint8_t lsRjtRsvd0;     /* FC Word 0, bit 24:31 */
+
+                       uint8_t lsRjtRsnCode;   /* FC Word 0, bit 16:23 */
+                       /* LS_RJT reason codes */
+#define LSRJT_INVALID_CMD     0x01
+#define LSRJT_LOGICAL_ERR     0x03
+#define LSRJT_LOGICAL_BSY     0x05
+#define LSRJT_PROTOCOL_ERR    0x07
+#define LSRJT_UNABLE_TPC      0x09     /* Unable to perform command */
+#define LSRJT_CMD_UNSUPPORTED 0x0B
+#define LSRJT_VENDOR_UNIQUE   0xFF     /* See Byte 3 */
+
+                       uint8_t lsRjtRsnCodeExp; /* FC Word 0, bit 8:15 */
+                       /* LS_RJT reason explanation */
+#define LSEXP_NOTHING_MORE      0x00
+#define LSEXP_SPARM_OPTIONS     0x01
+#define LSEXP_SPARM_ICTL        0x03
+#define LSEXP_SPARM_RCTL        0x05
+#define LSEXP_SPARM_RCV_SIZE    0x07
+#define LSEXP_SPARM_CONCUR_SEQ  0x09
+#define LSEXP_SPARM_CREDIT      0x0B
+#define LSEXP_INVALID_PNAME     0x0D
+#define LSEXP_INVALID_NNAME     0x0E
+#define LSEXP_INVALID_CSP       0x0F
+#define LSEXP_INVALID_ASSOC_HDR 0x11
+#define LSEXP_ASSOC_HDR_REQ     0x13
+#define LSEXP_INVALID_O_SID     0x15
+#define LSEXP_INVALID_OX_RX     0x17
+#define LSEXP_CMD_IN_PROGRESS   0x19
+#define LSEXP_INVALID_NPORT_ID  0x1F
+#define LSEXP_INVALID_SEQ_ID    0x21
+#define LSEXP_INVALID_XCHG      0x23
+#define LSEXP_INACTIVE_XCHG     0x25
+#define LSEXP_RQ_REQUIRED       0x27
+#define LSEXP_OUT_OF_RESOURCE   0x29
+#define LSEXP_CANT_GIVE_DATA    0x2A
+#define LSEXP_REQ_UNSUPPORTED   0x2C
+                       uint8_t vendorUnique;   /* FC Word 0, bit  0: 7 */
+               } b;
+       } un;
+};
+
+/*
+ *  N_Port Login (FLOGO/PLOGO Request) Payload Definition
+ */
+
+typedef struct _LOGO {         /* Structure is in Big Endian format */
+       union {
+               uint32_t nPortId32;     /* Access nPortId as a word */
+               struct {
+                       uint8_t word1Reserved1; /* FC Word 1, bit 31:24 */
+                       uint8_t nPortIdByte0;   /* N_port  ID bit 16:23 */
+                       uint8_t nPortIdByte1;   /* N_port  ID bit  8:15 */
+                       uint8_t nPortIdByte2;   /* N_port  ID bit  0: 7 */
+               } b;
+       } un;
+       struct lpfc_name portName;      /* N_port name field */
+} LOGO;
+
+/*
+ *  FCP Login (PRLI Request / ACC) Payload Definition
+ */
+
+#define PRLX_PAGE_LEN   0x10
+#define TPRLO_PAGE_LEN  0x14
+
+typedef struct _PRLI {         /* Structure is in Big Endian format */
+       uint8_t prliType;       /* FC Parm Word 0, bit 24:31 */
+
+#define PRLI_FCP_TYPE 0x08
+       uint8_t word0Reserved1; /* FC Parm Word 0, bit 16:23 */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint8_t origProcAssocV:1;       /* FC Parm Word 0, bit 15 */
+       uint8_t respProcAssocV:1;       /* FC Parm Word 0, bit 14 */
+       uint8_t estabImagePair:1;       /* FC Parm Word 0, bit 13 */
+
+       /*    ACC = imagePairEstablished */
+       uint8_t word0Reserved2:1;       /* FC Parm Word 0, bit 12 */
+       uint8_t acceptRspCode:4;        /* FC Parm Word 0, bit 8:11, ACC ONLY */
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint8_t acceptRspCode:4;        /* FC Parm Word 0, bit 8:11, ACC ONLY */
+       uint8_t word0Reserved2:1;       /* FC Parm Word 0, bit 12 */
+       uint8_t estabImagePair:1;       /* FC Parm Word 0, bit 13 */
+       uint8_t respProcAssocV:1;       /* FC Parm Word 0, bit 14 */
+       uint8_t origProcAssocV:1;       /* FC Parm Word 0, bit 15 */
+       /*    ACC = imagePairEstablished */
+#endif
+
+#define PRLI_REQ_EXECUTED     0x1      /* acceptRspCode */
+#define PRLI_NO_RESOURCES     0x2
+#define PRLI_INIT_INCOMPLETE  0x3
+#define PRLI_NO_SUCH_PA       0x4
+#define PRLI_PREDEF_CONFIG    0x5
+#define PRLI_PARTIAL_SUCCESS  0x6
+#define PRLI_INVALID_PAGE_CNT 0x7
+       uint8_t word0Reserved3; /* FC Parm Word 0, bit 0:7 */
+
+       uint32_t origProcAssoc; /* FC Parm Word 1, bit 0:31 */
+
+       uint32_t respProcAssoc; /* FC Parm Word 2, bit 0:31 */
+
+       uint8_t word3Reserved1; /* FC Parm Word 3, bit 24:31 */
+       uint8_t word3Reserved2; /* FC Parm Word 3, bit 16:23 */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint16_t Word3bit15Resved:1;    /* FC Parm Word 3, bit 15 */
+       uint16_t Word3bit14Resved:1;    /* FC Parm Word 3, bit 14 */
+       uint16_t Word3bit13Resved:1;    /* FC Parm Word 3, bit 13 */
+       uint16_t Word3bit12Resved:1;    /* FC Parm Word 3, bit 12 */
+       uint16_t Word3bit11Resved:1;    /* FC Parm Word 3, bit 11 */
+       uint16_t Word3bit10Resved:1;    /* FC Parm Word 3, bit 10 */
+       uint16_t TaskRetryIdReq:1;      /* FC Parm Word 3, bit  9 */
+       uint16_t Retry:1;       /* FC Parm Word 3, bit  8 */
+       uint16_t ConfmComplAllowed:1;   /* FC Parm Word 3, bit  7 */
+       uint16_t dataOverLay:1; /* FC Parm Word 3, bit  6 */
+       uint16_t initiatorFunc:1;       /* FC Parm Word 3, bit  5 */
+       uint16_t targetFunc:1;  /* FC Parm Word 3, bit  4 */
+       uint16_t cmdDataMixEna:1;       /* FC Parm Word 3, bit  3 */
+       uint16_t dataRspMixEna:1;       /* FC Parm Word 3, bit  2 */
+       uint16_t readXferRdyDis:1;      /* FC Parm Word 3, bit  1 */
+       uint16_t writeXferRdyDis:1;     /* FC Parm Word 3, bit  0 */
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint16_t Retry:1;       /* FC Parm Word 3, bit  8 */
+       uint16_t TaskRetryIdReq:1;      /* FC Parm Word 3, bit  9 */
+       uint16_t Word3bit10Resved:1;    /* FC Parm Word 3, bit 10 */
+       uint16_t Word3bit11Resved:1;    /* FC Parm Word 3, bit 11 */
+       uint16_t Word3bit12Resved:1;    /* FC Parm Word 3, bit 12 */
+       uint16_t Word3bit13Resved:1;    /* FC Parm Word 3, bit 13 */
+       uint16_t Word3bit14Resved:1;    /* FC Parm Word 3, bit 14 */
+       uint16_t Word3bit15Resved:1;    /* FC Parm Word 3, bit 15 */
+       uint16_t writeXferRdyDis:1;     /* FC Parm Word 3, bit  0 */
+       uint16_t readXferRdyDis:1;      /* FC Parm Word 3, bit  1 */
+       uint16_t dataRspMixEna:1;       /* FC Parm Word 3, bit  2 */
+       uint16_t cmdDataMixEna:1;       /* FC Parm Word 3, bit  3 */
+       uint16_t targetFunc:1;  /* FC Parm Word 3, bit  4 */
+       uint16_t initiatorFunc:1;       /* FC Parm Word 3, bit  5 */
+       uint16_t dataOverLay:1; /* FC Parm Word 3, bit  6 */
+       uint16_t ConfmComplAllowed:1;   /* FC Parm Word 3, bit  7 */
+#endif
+} PRLI;
+
+/*
+ *  FCP Logout (PRLO Request / ACC) Payload Definition
+ */
+
+typedef struct _PRLO {         /* Structure is in Big Endian format */
+       uint8_t prloType;       /* FC Parm Word 0, bit 24:31 */
+
+#define PRLO_FCP_TYPE  0x08
+       uint8_t word0Reserved1; /* FC Parm Word 0, bit 16:23 */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint8_t origProcAssocV:1;       /* FC Parm Word 0, bit 15 */
+       uint8_t respProcAssocV:1;       /* FC Parm Word 0, bit 14 */
+       uint8_t word0Reserved2:2;       /* FC Parm Word 0, bit 12:13 */
+       uint8_t acceptRspCode:4;        /* FC Parm Word 0, bit 8:11, ACC ONLY */
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint8_t acceptRspCode:4;        /* FC Parm Word 0, bit 8:11, ACC ONLY */
+       uint8_t word0Reserved2:2;       /* FC Parm Word 0, bit 12:13 */
+       uint8_t respProcAssocV:1;       /* FC Parm Word 0, bit 14 */
+       uint8_t origProcAssocV:1;       /* FC Parm Word 0, bit 15 */
+#endif
+
+#define PRLO_REQ_EXECUTED     0x1      /* acceptRspCode */
+#define PRLO_NO_SUCH_IMAGE    0x4
+#define PRLO_INVALID_PAGE_CNT 0x7
+
+       uint8_t word0Reserved3; /* FC Parm Word 0, bit 0:7 */
+
+       uint32_t origProcAssoc; /* FC Parm Word 1, bit 0:31 */
+
+       uint32_t respProcAssoc; /* FC Parm Word 2, bit 0:31 */
+
+       uint32_t word3Reserved1;        /* FC Parm Word 3, bit 0:31 */
+} PRLO;
+
+typedef struct _ADISC {                /* Structure is in Big Endian format */
+       uint32_t hardAL_PA;
+       struct lpfc_name portName;
+       struct lpfc_name nodeName;
+       uint32_t DID;
+} ADISC;
+
+typedef struct _FARP {         /* Structure is in Big Endian format */
+       uint32_t Mflags:8;
+       uint32_t Odid:24;
+#define FARP_NO_ACTION          0      /* FARP information enclosed, no
+                                          action */
+#define FARP_MATCH_PORT         0x1    /* Match on Responder Port Name */
+#define FARP_MATCH_NODE         0x2    /* Match on Responder Node Name */
+#define FARP_MATCH_IP           0x4    /* Match on IP address, not supported */
+#define FARP_MATCH_IPV4         0x5    /* Match on IPV4 address, not
+                                          supported */
+#define FARP_MATCH_IPV6         0x6    /* Match on IPV6 address, not
+                                          supported */
+       uint32_t Rflags:8;
+       uint32_t Rdid:24;
+#define FARP_REQUEST_PLOGI      0x1    /* Request for PLOGI */
+#define FARP_REQUEST_FARPR      0x2    /* Request for FARP Response */
+       struct lpfc_name OportName;
+       struct lpfc_name OnodeName;
+       struct lpfc_name RportName;
+       struct lpfc_name RnodeName;
+       uint8_t Oipaddr[16];
+       uint8_t Ripaddr[16];
+} FARP;
+
+typedef struct _FAN {          /* Structure is in Big Endian format */
+       uint32_t Fdid;
+       struct lpfc_name FportName;
+       struct lpfc_name FnodeName;
+} FAN;
+
+typedef struct _SCR {          /* Structure is in Big Endian format */
+       uint8_t resvd1;
+       uint8_t resvd2;
+       uint8_t resvd3;
+       uint8_t Function;
+#define  SCR_FUNC_FABRIC     0x01
+#define  SCR_FUNC_NPORT      0x02
+#define  SCR_FUNC_FULL       0x03
+#define  SCR_CLEAR           0xff
+} SCR;
+
+typedef struct _RNID_TOP_DISC {
+       struct lpfc_name portName;
+       uint8_t resvd[8];
+       uint32_t unitType;
+#define RNID_HBA            0x7
+#define RNID_HOST           0xa
+#define RNID_DRIVER         0xd
+       uint32_t physPort;
+       uint32_t attachedNodes;
+       uint16_t ipVersion;
+#define RNID_IPV4           0x1
+#define RNID_IPV6           0x2
+       uint16_t UDPport;
+       uint8_t ipAddr[16];
+       uint16_t resvd1;
+       uint16_t flags;
+#define RNID_TD_SUPPORT     0x1
+#define RNID_LP_VALID       0x2
+} RNID_TOP_DISC;
+
+typedef struct _RNID {         /* Structure is in Big Endian format */
+       uint8_t Format;
+#define RNID_TOPOLOGY_DISC  0xdf
+       uint8_t CommonLen;
+       uint8_t resvd1;
+       uint8_t SpecificLen;
+       struct lpfc_name portName;
+       struct lpfc_name nodeName;
+       union {
+               RNID_TOP_DISC topologyDisc;     /* topology disc (0xdf) */
+       } un;
+} RNID;
+
+typedef struct _RRQ {          /* Structure is in Big Endian format */
+       uint32_t SID;
+       uint16_t Oxid;
+       uint16_t Rxid;
+       uint8_t resv[32];       /* optional association hdr */
+} RRQ;
+
+/* This is used for RSCN command */
+typedef struct _D_ID {         /* Structure is in Big Endian format */
+       union {
+               uint32_t word;
+               struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+                       uint8_t resv;
+                       uint8_t domain;
+                       uint8_t area;
+                       uint8_t id;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+                       uint8_t id;
+                       uint8_t area;
+                       uint8_t domain;
+                       uint8_t resv;
+#endif
+               } b;
+       } un;
+} D_ID;
+
+/*
+ *  Structure to define all ELS Payload types
+ */
+
+typedef struct _ELS_PKT {      /* Structure is in Big Endian format */
+       uint8_t elsCode;        /* FC Word 0, bit 24:31 */
+       uint8_t elsByte1;
+       uint8_t elsByte2;
+       uint8_t elsByte3;
+       union {
+               struct ls_rjt lsRjt;    /* Payload for LS_RJT ELS response */
+               struct serv_parm logi;  /* Payload for PLOGI/FLOGI/PDISC/ACC */
+               LOGO logo;      /* Payload for PLOGO/FLOGO/ACC */
+               PRLI prli;      /* Payload for PRLI/ACC */
+               PRLO prlo;      /* Payload for PRLO/ACC */
+               ADISC adisc;    /* Payload for ADISC/ACC */
+               FARP farp;      /* Payload for FARP/ACC */
+               FAN fan;        /* Payload for FAN */
+               SCR scr;        /* Payload for SCR/ACC */
+               RRQ rrq;        /* Payload for RRQ */
+               RNID rnid;      /* Payload for RNID */
+               uint8_t pad[128 - 4];   /* Pad out to payload of 128 bytes */
+       } un;
+} ELS_PKT;
+
+/*
+ * FDMI
+ * HBA MAnagement Operations Command Codes
+ */
+#define  SLI_MGMT_GRHL     0x100       /* Get registered HBA list */
+#define  SLI_MGMT_GHAT     0x101       /* Get HBA attributes */
+#define  SLI_MGMT_GRPL     0x102       /* Get registered Port list */
+#define  SLI_MGMT_GPAT     0x110       /* Get Port attributes */
+#define  SLI_MGMT_RHBA     0x200       /* Register HBA */
+#define  SLI_MGMT_RHAT     0x201       /* Register HBA atttributes */
+#define  SLI_MGMT_RPRT     0x210       /* Register Port */
+#define  SLI_MGMT_RPA      0x211       /* Register Port attributes */
+#define  SLI_MGMT_DHBA     0x300       /* De-register HBA */
+#define  SLI_MGMT_DPRT     0x310       /* De-register Port */
+
+/*
+ * Management Service Subtypes
+ */
+#define  SLI_CT_FDMI_Subtypes     0x10
+
+/*
+ * HBA Management Service Reject Code
+ */
+#define  REJECT_CODE             0x9   /* Unable to perform command request */
+
+/*
+ * HBA Management Service Reject Reason Code
+ * Please refer to the Reason Codes above
+ */
+
+/*
+ * HBA Attribute Types
+ */
+#define  NODE_NAME               0x1
+#define  MANUFACTURER            0x2
+#define  SERIAL_NUMBER           0x3
+#define  MODEL                   0x4
+#define  MODEL_DESCRIPTION       0x5
+#define  HARDWARE_VERSION        0x6
+#define  DRIVER_VERSION          0x7
+#define  OPTION_ROM_VERSION      0x8
+#define  FIRMWARE_VERSION        0x9
+#define  OS_NAME_VERSION        0xa
+#define  MAX_CT_PAYLOAD_LEN     0xb
+
+/*
+ * Port Attrubute Types
+ */
+#define  SUPPORTED_FC4_TYPES     0x1
+#define  SUPPORTED_SPEED         0x2
+#define  PORT_SPEED              0x3
+#define  MAX_FRAME_SIZE          0x4
+#define  OS_DEVICE_NAME          0x5
+#define  HOST_NAME               0x6
+
+union AttributesDef {
+       /* Structure is in Big Endian format */
+       struct {
+               uint32_t AttrType:16;
+               uint32_t AttrLen:16;
+       } bits;
+       uint32_t word;
+};
+
+
+/*
+ * HBA Attribute Entry (8 - 260 bytes)
+ */
+typedef struct {
+       union AttributesDef ad;
+       union {
+               uint32_t VendorSpecific;
+               uint8_t Manufacturer[64];
+               uint8_t SerialNumber[64];
+               uint8_t Model[256];
+               uint8_t ModelDescription[256];
+               uint8_t HardwareVersion[256];
+               uint8_t DriverVersion[256];
+               uint8_t OptionROMVersion[256];
+               uint8_t FirmwareVersion[256];
+               struct lpfc_name NodeName;
+               uint8_t SupportFC4Types[32];
+               uint32_t SupportSpeed;
+               uint32_t PortSpeed;
+               uint32_t MaxFrameSize;
+               uint8_t OsDeviceName[256];
+               uint8_t OsNameVersion[256];
+               uint32_t MaxCTPayloadLen;
+               uint8_t HostName[256];
+       } un;
+} ATTRIBUTE_ENTRY;
+
+/*
+ * HBA Attribute Block
+ */
+typedef struct {
+       uint32_t EntryCnt;      /* Number of HBA attribute entries */
+       ATTRIBUTE_ENTRY Entry;  /* Variable-length array */
+} ATTRIBUTE_BLOCK;
+
+/*
+ * Port Entry
+ */
+typedef struct {
+       struct lpfc_name PortName;
+} PORT_ENTRY;
+
+/*
+ * HBA Identifier
+ */
+typedef struct {
+       struct lpfc_name PortName;
+} HBA_IDENTIFIER;
+
+/*
+ * Registered Port List Format
+ */
+typedef struct {
+       uint32_t EntryCnt;
+       PORT_ENTRY pe;          /* Variable-length array */
+} REG_PORT_LIST;
+
+/*
+ * Register HBA(RHBA)
+ */
+typedef struct {
+       HBA_IDENTIFIER hi;
+       REG_PORT_LIST rpl;      /* variable-length array */
+/* ATTRIBUTE_BLOCK   ab; */
+} REG_HBA;
+
+/*
+ * Register HBA Attributes (RHAT)
+ */
+typedef struct {
+       struct lpfc_name HBA_PortName;
+       ATTRIBUTE_BLOCK ab;
+} REG_HBA_ATTRIBUTE;
+
+/*
+ * Register Port Attributes (RPA)
+ */
+typedef struct {
+       struct lpfc_name PortName;
+       ATTRIBUTE_BLOCK ab;
+} REG_PORT_ATTRIBUTE;
+
+/*
+ * Get Registered HBA List (GRHL) Accept Payload Format
+ */
+typedef struct {
+       uint32_t HBA__Entry_Cnt; /* Number of Registered HBA Identifiers */
+       struct lpfc_name HBA_PortName;  /* Variable-length array */
+} GRHL_ACC_PAYLOAD;
+
+/*
+ * Get Registered Port List (GRPL) Accept Payload Format
+ */
+typedef struct {
+       uint32_t RPL_Entry_Cnt; /* Number of Registered Port Entries */
+       PORT_ENTRY Reg_Port_Entry[1];   /* Variable-length array */
+} GRPL_ACC_PAYLOAD;
+
+/*
+ * Get Port Attributes (GPAT) Accept Payload Format
+ */
+
+typedef struct {
+       ATTRIBUTE_BLOCK pab;
+} GPAT_ACC_PAYLOAD;
+
+
+/*
+ *  Begin HBA configuration parameters.
+ *  The PCI configuration register BAR assignments are:
+ *  BAR0, offset 0x10 - SLIM base memory address
+ *  BAR1, offset 0x14 - SLIM base memory high address
+ *  BAR2, offset 0x18 - REGISTER base memory address
+ *  BAR3, offset 0x1c - REGISTER base memory high address
+ *  BAR4, offset 0x20 - BIU I/O registers
+ *  BAR5, offset 0x24 - REGISTER base io high address
+ */
+
+/* Number of rings currently used and available. */
+#define MAX_CONFIGURED_RINGS     3
+#define MAX_RINGS                4
+
+/* IOCB / Mailbox is owned by FireFly */
+#define OWN_CHIP        1
+
+/* IOCB / Mailbox is owned by Host */
+#define OWN_HOST        0
+
+/* Number of 4-byte words in an IOCB. */
+#define IOCB_WORD_SZ    8
+
+/* defines for type field in fc header */
+#define FC_ELS_DATA     0x1
+#define FC_LLC_SNAP     0x5
+#define FC_FCP_DATA     0x8
+#define FC_COMMON_TRANSPORT_ULP 0x20
+
+/* defines for rctl field in fc header */
+#define FC_DEV_DATA     0x0
+#define FC_UNSOL_CTL    0x2
+#define FC_SOL_CTL      0x3
+#define FC_UNSOL_DATA   0x4
+#define FC_FCP_CMND     0x6
+#define FC_ELS_REQ      0x22
+#define FC_ELS_RSP      0x23
+
+/* network headers for Dfctl field */
+#define FC_NET_HDR      0x20
+
+/* Start FireFly Register definitions */
+#define PCI_VENDOR_ID_EMULEX        0x10df
+#define PCI_DEVICE_ID_FIREFLY       0x1ae5
+#define PCI_DEVICE_ID_SUPERFLY      0xf700
+#define PCI_DEVICE_ID_DRAGONFLY     0xf800
+#define PCI_DEVICE_ID_RFLY          0xf095
+#define PCI_DEVICE_ID_PFLY          0xf098
+#define PCI_DEVICE_ID_TFLY          0xf0a5
+#define PCI_DEVICE_ID_CENTAUR       0xf900
+#define PCI_DEVICE_ID_PEGASUS       0xf980
+#define PCI_DEVICE_ID_THOR          0xfa00
+#define PCI_DEVICE_ID_VIPER         0xfb00
+#define PCI_DEVICE_ID_HELIOS        0xfd00
+#define PCI_DEVICE_ID_BMID          0xf0d5
+#define PCI_DEVICE_ID_BSMB          0xf0d1
+#define PCI_DEVICE_ID_ZEPHYR        0xfe00
+#define PCI_DEVICE_ID_ZMID          0xf0e5
+#define PCI_DEVICE_ID_ZSMB          0xf0e1
+#define PCI_DEVICE_ID_LP101        0xf0a1
+#define PCI_DEVICE_ID_LP10000S     0xfc00
+
+#define JEDEC_ID_ADDRESS            0x0080001c
+#define FIREFLY_JEDEC_ID            0x1ACC
+#define SUPERFLY_JEDEC_ID           0x0020
+#define DRAGONFLY_JEDEC_ID          0x0021
+#define DRAGONFLY_V2_JEDEC_ID       0x0025
+#define CENTAUR_2G_JEDEC_ID         0x0026
+#define CENTAUR_1G_JEDEC_ID         0x0028
+#define PEGASUS_ORION_JEDEC_ID      0x0036
+#define PEGASUS_JEDEC_ID            0x0038
+#define THOR_JEDEC_ID               0x0012
+#define HELIOS_JEDEC_ID             0x0364
+#define ZEPHYR_JEDEC_ID             0x0577
+#define VIPER_JEDEC_ID              0x4838
+
+#define JEDEC_ID_MASK               0x0FFFF000
+#define JEDEC_ID_SHIFT              12
+#define FC_JEDEC_ID(id)             ((id & JEDEC_ID_MASK) >> JEDEC_ID_SHIFT)
+
+typedef struct {               /* FireFly BIU registers */
+       uint32_t hostAtt;       /* See definitions for Host Attention
+                                  register */
+       uint32_t chipAtt;       /* See definitions for Chip Attention
+                                  register */
+       uint32_t hostStatus;    /* See definitions for Host Status register */
+       uint32_t hostControl;   /* See definitions for Host Control register */
+       uint32_t buiConfig;     /* See definitions for BIU configuration
+                                  register */
+} FF_REGS;
+
+/* IO Register size in bytes */
+#define FF_REG_AREA_SIZE       256
+
+/* Host Attention Register */
+
+#define HA_REG_OFFSET  0       /* Byte offset from register base address */
+
+#define HA_R0RE_REQ    0x00000001      /* Bit  0 */
+#define HA_R0CE_RSP    0x00000002      /* Bit  1 */
+#define HA_R0ATT       0x00000008      /* Bit  3 */
+#define HA_R1RE_REQ    0x00000010      /* Bit  4 */
+#define HA_R1CE_RSP    0x00000020      /* Bit  5 */
+#define HA_R1ATT       0x00000080      /* Bit  7 */
+#define HA_R2RE_REQ    0x00000100      /* Bit  8 */
+#define HA_R2CE_RSP    0x00000200      /* Bit  9 */
+#define HA_R2ATT       0x00000800      /* Bit 11 */
+#define HA_R3RE_REQ    0x00001000      /* Bit 12 */
+#define HA_R3CE_RSP    0x00002000      /* Bit 13 */
+#define HA_R3ATT       0x00008000      /* Bit 15 */
+#define HA_LATT        0x20000000      /* Bit 29 */
+#define HA_MBATT       0x40000000      /* Bit 30 */
+#define HA_ERATT       0x80000000      /* Bit 31 */
+
+#define HA_RXRE_REQ    0x00000001      /* Bit  0 */
+#define HA_RXCE_RSP    0x00000002      /* Bit  1 */
+#define HA_RXATT       0x00000008      /* Bit  3 */
+#define HA_RXMASK      0x0000000f
+
+/* Chip Attention Register */
+
+#define CA_REG_OFFSET  4       /* Byte offset from register base address */
+
+#define CA_R0CE_REQ    0x00000001      /* Bit  0 */
+#define CA_R0RE_RSP    0x00000002      /* Bit  1 */
+#define CA_R0ATT       0x00000008      /* Bit  3 */
+#define CA_R1CE_REQ    0x00000010      /* Bit  4 */
+#define CA_R1RE_RSP    0x00000020      /* Bit  5 */
+#define CA_R1ATT       0x00000080      /* Bit  7 */
+#define CA_R2CE_REQ    0x00000100      /* Bit  8 */
+#define CA_R2RE_RSP    0x00000200      /* Bit  9 */
+#define CA_R2ATT       0x00000800      /* Bit 11 */
+#define CA_R3CE_REQ    0x00001000      /* Bit 12 */
+#define CA_R3RE_RSP    0x00002000      /* Bit 13 */
+#define CA_R3ATT       0x00008000      /* Bit 15 */
+#define CA_MBATT       0x40000000      /* Bit 30 */
+
+/* Host Status Register */
+
+#define HS_REG_OFFSET  8       /* Byte offset from register base address */
+
+#define HS_MBRDY       0x00400000      /* Bit 22 */
+#define HS_FFRDY       0x00800000      /* Bit 23 */
+#define HS_FFER8       0x01000000      /* Bit 24 */
+#define HS_FFER7       0x02000000      /* Bit 25 */
+#define HS_FFER6       0x04000000      /* Bit 26 */
+#define HS_FFER5       0x08000000      /* Bit 27 */
+#define HS_FFER4       0x10000000      /* Bit 28 */
+#define HS_FFER3       0x20000000      /* Bit 29 */
+#define HS_FFER2       0x40000000      /* Bit 30 */
+#define HS_FFER1       0x80000000      /* Bit 31 */
+#define HS_FFERM       0xFF000000      /* Mask for error bits 31:24 */
+
+/* Host Control Register */
+
+#define HC_REG_OFFSET  12      /* Word offset from register base address */
+
+#define HC_MBINT_ENA   0x00000001      /* Bit  0 */
+#define HC_R0INT_ENA   0x00000002      /* Bit  1 */
+#define HC_R1INT_ENA   0x00000004      /* Bit  2 */
+#define HC_R2INT_ENA   0x00000008      /* Bit  3 */
+#define HC_R3INT_ENA   0x00000010      /* Bit  4 */
+#define HC_INITHBI     0x02000000      /* Bit 25 */
+#define HC_INITMB      0x04000000      /* Bit 26 */
+#define HC_INITFF      0x08000000      /* Bit 27 */
+#define HC_LAINT_ENA   0x20000000      /* Bit 29 */
+#define HC_ERINT_ENA   0x80000000      /* Bit 31 */
+
+/* Mailbox Commands */
+#define MBX_SHUTDOWN        0x00       /* terminate testing */
+#define MBX_LOAD_SM         0x01
+#define MBX_READ_NV         0x02
+#define MBX_WRITE_NV        0x03
+#define MBX_RUN_BIU_DIAG    0x04
+#define MBX_INIT_LINK       0x05
+#define MBX_DOWN_LINK       0x06
+#define MBX_CONFIG_LINK     0x07
+#define MBX_CONFIG_RING     0x09
+#define MBX_RESET_RING      0x0A
+#define MBX_READ_CONFIG     0x0B
+#define MBX_READ_RCONFIG    0x0C
+#define MBX_READ_SPARM      0x0D
+#define MBX_READ_STATUS     0x0E
+#define MBX_READ_RPI        0x0F
+#define MBX_READ_XRI        0x10
+#define MBX_READ_REV        0x11
+#define MBX_READ_LNK_STAT   0x12
+#define MBX_REG_LOGIN       0x13
+#define MBX_UNREG_LOGIN     0x14
+#define MBX_READ_LA         0x15
+#define MBX_CLEAR_LA        0x16
+#define MBX_DUMP_MEMORY     0x17
+#define MBX_DUMP_CONTEXT    0x18
+#define MBX_RUN_DIAGS       0x19
+#define MBX_RESTART         0x1A
+#define MBX_UPDATE_CFG      0x1B
+#define MBX_DOWN_LOAD       0x1C
+#define MBX_DEL_LD_ENTRY    0x1D
+#define MBX_RUN_PROGRAM     0x1E
+#define MBX_SET_MASK        0x20
+#define MBX_SET_SLIM        0x21
+#define MBX_UNREG_D_ID      0x23
+#define MBX_CONFIG_FARP     0x25
+
+#define MBX_LOAD_AREA       0x81
+#define MBX_RUN_BIU_DIAG64  0x84
+#define MBX_CONFIG_PORT     0x88
+#define MBX_READ_SPARM64    0x8D
+#define MBX_READ_RPI64      0x8F
+#define MBX_REG_LOGIN64     0x93
+#define MBX_READ_LA64       0x95
+
+#define MBX_FLASH_WR_ULA    0x98
+#define MBX_SET_DEBUG       0x99
+#define MBX_LOAD_EXP_ROM    0x9C
+
+#define MBX_MAX_CMDS        0x9D
+#define MBX_SLI2_CMD_MASK   0x80
+
+/* IOCB Commands */
+
+#define CMD_RCV_SEQUENCE_CX     0x01
+#define CMD_XMIT_SEQUENCE_CR    0x02
+#define CMD_XMIT_SEQUENCE_CX    0x03
+#define CMD_XMIT_BCAST_CN       0x04
+#define CMD_XMIT_BCAST_CX       0x05
+#define CMD_QUE_RING_BUF_CN     0x06
+#define CMD_QUE_XRI_BUF_CX      0x07
+#define CMD_IOCB_CONTINUE_CN    0x08
+#define CMD_RET_XRI_BUF_CX      0x09
+#define CMD_ELS_REQUEST_CR      0x0A
+#define CMD_ELS_REQUEST_CX      0x0B
+#define CMD_RCV_ELS_REQ_CX      0x0D
+#define CMD_ABORT_XRI_CN        0x0E
+#define CMD_ABORT_XRI_CX        0x0F
+#define CMD_CLOSE_XRI_CN        0x10
+#define CMD_CLOSE_XRI_CX        0x11
+#define CMD_CREATE_XRI_CR       0x12
+#define CMD_CREATE_XRI_CX       0x13
+#define CMD_GET_RPI_CN          0x14
+#define CMD_XMIT_ELS_RSP_CX     0x15
+#define CMD_GET_RPI_CR          0x16
+#define CMD_XRI_ABORTED_CX      0x17
+#define CMD_FCP_IWRITE_CR       0x18
+#define CMD_FCP_IWRITE_CX       0x19
+#define CMD_FCP_IREAD_CR        0x1A
+#define CMD_FCP_IREAD_CX        0x1B
+#define CMD_FCP_ICMND_CR        0x1C
+#define CMD_FCP_ICMND_CX        0x1D
+
+#define CMD_ADAPTER_MSG         0x20
+#define CMD_ADAPTER_DUMP        0x22
+
+/*  SLI_2 IOCB Command Set */
+
+#define CMD_RCV_SEQUENCE64_CX   0x81
+#define CMD_XMIT_SEQUENCE64_CR  0x82
+#define CMD_XMIT_SEQUENCE64_CX  0x83
+#define CMD_XMIT_BCAST64_CN     0x84
+#define CMD_XMIT_BCAST64_CX     0x85
+#define CMD_QUE_RING_BUF64_CN   0x86
+#define CMD_QUE_XRI_BUF64_CX    0x87
+#define CMD_IOCB_CONTINUE64_CN  0x88
+#define CMD_RET_XRI_BUF64_CX    0x89
+#define CMD_ELS_REQUEST64_CR    0x8A
+#define CMD_ELS_REQUEST64_CX    0x8B
+#define CMD_ABORT_MXRI64_CN     0x8C
+#define CMD_RCV_ELS_REQ64_CX    0x8D
+#define CMD_XMIT_ELS_RSP64_CX   0x95
+#define CMD_FCP_IWRITE64_CR     0x98
+#define CMD_FCP_IWRITE64_CX     0x99
+#define CMD_FCP_IREAD64_CR      0x9A
+#define CMD_FCP_IREAD64_CX      0x9B
+#define CMD_FCP_ICMND64_CR      0x9C
+#define CMD_FCP_ICMND64_CX      0x9D
+
+#define CMD_GEN_REQUEST64_CR    0xC2
+#define CMD_GEN_REQUEST64_CX    0xC3
+
+#define CMD_MAX_IOCB_CMD        0xE6
+#define CMD_IOCB_MASK           0xff
+
+#define MAX_MSG_DATA            28     /* max msg data in CMD_ADAPTER_MSG
+                                          iocb */
+#define LPFC_MAX_ADPTMSG         32    /* max msg data */
+/*
+ *  Define Status
+ */
+#define MBX_SUCCESS                 0
+#define MBXERR_NUM_RINGS            1
+#define MBXERR_NUM_IOCBS            2
+#define MBXERR_IOCBS_EXCEEDED       3
+#define MBXERR_BAD_RING_NUMBER      4
+#define MBXERR_MASK_ENTRIES_RANGE   5
+#define MBXERR_MASKS_EXCEEDED       6
+#define MBXERR_BAD_PROFILE          7
+#define MBXERR_BAD_DEF_CLASS        8
+#define MBXERR_BAD_MAX_RESPONDER    9
+#define MBXERR_BAD_MAX_ORIGINATOR   10
+#define MBXERR_RPI_REGISTERED       11
+#define MBXERR_RPI_FULL             12
+#define MBXERR_NO_RESOURCES         13
+#define MBXERR_BAD_RCV_LENGTH       14
+#define MBXERR_DMA_ERROR            15
+#define MBXERR_ERROR                16
+#define MBX_NOT_FINISHED           255
+
+#define MBX_BUSY                   0xffffff /* Attempted cmd to busy Mailbox */
+#define MBX_TIMEOUT                0xfffffe /* time-out expired waiting for */
+
+/*
+ *    Begin Structure Definitions for Mailbox Commands
+ */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint8_t tval;
+       uint8_t tmask;
+       uint8_t rval;
+       uint8_t rmask;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint8_t rmask;
+       uint8_t rval;
+       uint8_t tmask;
+       uint8_t tval;
+#endif
+} RR_REG;
+
+struct ulp_bde {
+       uint32_t bdeAddress;
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint32_t bdeReserved:4;
+       uint32_t bdeAddrHigh:4;
+       uint32_t bdeSize:24;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint32_t bdeSize:24;
+       uint32_t bdeAddrHigh:4;
+       uint32_t bdeReserved:4;
+#endif
+};
+
+struct ulp_bde64 {     /* SLI-2 */
+       union ULP_BDE_TUS {
+               uint32_t w;
+               struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+                       uint32_t bdeFlags:8;    /* BDE Flags 0 IS A SUPPORTED
+                                                  VALUE !! */
+                       uint32_t bdeSize:24;    /* Size of buffer (in bytes) */
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+                       uint32_t bdeSize:24;    /* Size of buffer (in bytes) */
+                       uint32_t bdeFlags:8;    /* BDE Flags 0 IS A SUPPORTED
+                                                  VALUE !! */
+#endif
+
+#define BUFF_USE_RSVD       0x01       /* bdeFlags */
+#define BUFF_USE_INTRPT     0x02       /* Not Implemented with LP6000 */
+#define BUFF_USE_CMND       0x04       /* Optional, 1=cmd/rsp 0=data buffer */
+#define BUFF_USE_RCV        0x08       /*  "" "", 1=rcv buffer, 0=xmit
+                                           buffer */
+#define BUFF_TYPE_32BIT     0x10       /*  "" "", 1=32 bit addr 0=64 bit
+                                           addr */
+#define BUFF_TYPE_SPECIAL   0x20       /* Not Implemented with LP6000  */
+#define BUFF_TYPE_BDL       0x40       /* Optional,  may be set in BDL */
+#define BUFF_TYPE_INVALID   0x80       /*  ""  "" */
+               } f;
+       } tus;
+       uint32_t addrLow;
+       uint32_t addrHigh;
+};
+#define BDE64_SIZE_WORD 0
+#define BPL64_SIZE_WORD 0x40
+
+typedef struct ULP_BDL {       /* SLI-2 */
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint32_t bdeFlags:8;    /* BDL Flags */
+       uint32_t bdeSize:24;    /* Size of BDL array in host memory (bytes) */
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint32_t bdeSize:24;    /* Size of BDL array in host memory (bytes) */
+       uint32_t bdeFlags:8;    /* BDL Flags */
+#endif
+
+       uint32_t addrLow;       /* Address 0:31 */
+       uint32_t addrHigh;      /* Address 32:63 */
+       uint32_t ulpIoTag32;    /* Can be used for 32 bit I/O Tag */
+} ULP_BDL;
+
+/* Structure for MB Command LOAD_SM and DOWN_LOAD */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint32_t rsvd2:25;
+       uint32_t acknowledgment:1;
+       uint32_t version:1;
+       uint32_t erase_or_prog:1;
+       uint32_t update_flash:1;
+       uint32_t update_ram:1;
+       uint32_t method:1;
+       uint32_t load_cmplt:1;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint32_t load_cmplt:1;
+       uint32_t method:1;
+       uint32_t update_ram:1;
+       uint32_t update_flash:1;
+       uint32_t erase_or_prog:1;
+       uint32_t version:1;
+       uint32_t acknowledgment:1;
+       uint32_t rsvd2:25;
+#endif
+
+       uint32_t dl_to_adr_low;
+       uint32_t dl_to_adr_high;
+       uint32_t dl_len;
+       union {
+               uint32_t dl_from_mbx_offset;
+               struct ulp_bde dl_from_bde;
+               struct ulp_bde64 dl_from_bde64;
+       } un;
+
+} LOAD_SM_VAR;
+
+/* Structure for MB Command READ_NVPARM (02) */
+
+typedef struct {
+       uint32_t rsvd1[3];      /* Read as all one's */
+       uint32_t rsvd2;         /* Read as all zero's */
+       uint32_t portname[2];   /* N_PORT name */
+       uint32_t nodename[2];   /* NODE name */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint32_t pref_DID:24;
+       uint32_t hardAL_PA:8;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint32_t hardAL_PA:8;
+       uint32_t pref_DID:24;
+#endif
+
+       uint32_t rsvd3[21];     /* Read as all one's */
+} READ_NV_VAR;
+
+/* Structure for MB Command WRITE_NVPARMS (03) */
+
+typedef struct {
+       uint32_t rsvd1[3];      /* Must be all one's */
+       uint32_t rsvd2;         /* Must be all zero's */
+       uint32_t portname[2];   /* N_PORT name */
+       uint32_t nodename[2];   /* NODE name */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint32_t pref_DID:24;
+       uint32_t hardAL_PA:8;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint32_t hardAL_PA:8;
+       uint32_t pref_DID:24;
+#endif
+
+       uint32_t rsvd3[21];     /* Must be all one's */
+} WRITE_NV_VAR;
+
+/* Structure for MB Command RUN_BIU_DIAG (04) */
+/* Structure for MB Command RUN_BIU_DIAG64 (0x84) */
+
+typedef struct {
+       uint32_t rsvd1;
+       union {
+               struct {
+                       struct ulp_bde xmit_bde;
+                       struct ulp_bde rcv_bde;
+               } s1;
+               struct {
+                       struct ulp_bde64 xmit_bde64;
+                       struct ulp_bde64 rcv_bde64;
+               } s2;
+       } un;
+} BIU_DIAG_VAR;
+
+/* Structure for MB Command INIT_LINK (05) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint32_t rsvd1:24;
+       uint32_t lipsr_AL_PA:8; /* AL_PA to issue Lip Selective Reset to */
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint32_t lipsr_AL_PA:8; /* AL_PA to issue Lip Selective Reset to */
+       uint32_t rsvd1:24;
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint8_t fabric_AL_PA;   /* If using a Fabric Assigned AL_PA */
+       uint8_t rsvd2;
+       uint16_t link_flags;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint16_t link_flags;
+       uint8_t rsvd2;
+       uint8_t fabric_AL_PA;   /* If using a Fabric Assigned AL_PA */
+#endif
+
+#define FLAGS_LOCAL_LB               0x01 /* link_flags (=1) ENDEC loopback */
+#define FLAGS_TOPOLOGY_MODE_LOOP_PT  0x00 /* Attempt loop then pt-pt */
+#define FLAGS_TOPOLOGY_MODE_PT_PT    0x02 /* Attempt pt-pt only */
+#define FLAGS_TOPOLOGY_MODE_LOOP     0x04 /* Attempt loop only */
+#define FLAGS_TOPOLOGY_MODE_PT_LOOP  0x06 /* Attempt pt-pt then loop */
+#define FLAGS_LIRP_LILP              0x80 /* LIRP / LILP is disabled */
+
+#define FLAGS_TOPOLOGY_FAILOVER      0x0400    /* Bit 10 */
+#define FLAGS_LINK_SPEED             0x0800    /* Bit 11 */
+
+       uint32_t link_speed;
+#define LINK_SPEED_AUTO 0       /* Auto selection */
+#define LINK_SPEED_1G   1       /* 1 Gigabaud */
+#define LINK_SPEED_2G   2       /* 2 Gigabaud */
+#define LINK_SPEED_4G   4       /* 4 Gigabaud */
+#define LINK_SPEED_8G   8       /* 4 Gigabaud */
+#define LINK_SPEED_10G   16      /* 10 Gigabaud */
+
+} INIT_LINK_VAR;
+
+/* Structure for MB Command DOWN_LINK (06) */
+
+typedef struct {
+       uint32_t rsvd1;
+} DOWN_LINK_VAR;
+
+/* Structure for MB Command CONFIG_LINK (07) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint32_t cr:1;
+       uint32_t ci:1;
+       uint32_t cr_delay:6;
+       uint32_t cr_count:8;
+       uint32_t rsvd1:8;
+       uint32_t MaxBBC:8;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint32_t MaxBBC:8;
+       uint32_t rsvd1:8;
+       uint32_t cr_count:8;
+       uint32_t cr_delay:6;
+       uint32_t ci:1;
+       uint32_t cr:1;
+#endif
+
+       uint32_t myId;
+       uint32_t rsvd2;
+       uint32_t edtov;
+       uint32_t arbtov;
+       uint32_t ratov;
+       uint32_t rttov;
+       uint32_t altov;
+       uint32_t crtov;
+       uint32_t citov;
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint32_t rrq_enable:1;
+       uint32_t rrq_immed:1;
+       uint32_t rsvd4:29;
+       uint32_t ack0_enable:1;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint32_t ack0_enable:1;
+       uint32_t rsvd4:29;
+       uint32_t rrq_immed:1;
+       uint32_t rrq_enable:1;
+#endif
+} CONFIG_LINK;
+
+/* Structure for MB Command PART_SLIM (08)
+ * will be removed since SLI1 is no longer supported!
+ */
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint16_t offCiocb;
+       uint16_t numCiocb;
+       uint16_t offRiocb;
+       uint16_t numRiocb;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint16_t numCiocb;
+       uint16_t offCiocb;
+       uint16_t numRiocb;
+       uint16_t offRiocb;
+#endif
+} RING_DEF;
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint32_t unused1:24;
+       uint32_t numRing:8;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint32_t numRing:8;
+       uint32_t unused1:24;
+#endif
+
+       RING_DEF ringdef[4];
+       uint32_t hbainit;
+} PART_SLIM_VAR;
+
+/* Structure for MB Command CONFIG_RING (09) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint32_t unused2:6;
+       uint32_t recvSeq:1;
+       uint32_t recvNotify:1;
+       uint32_t numMask:8;
+       uint32_t profile:8;
+       uint32_t unused1:4;
+       uint32_t ring:4;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint32_t ring:4;
+       uint32_t unused1:4;
+       uint32_t profile:8;
+       uint32_t numMask:8;
+       uint32_t recvNotify:1;
+       uint32_t recvSeq:1;
+       uint32_t unused2:6;
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint16_t maxRespXchg;
+       uint16_t maxOrigXchg;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint16_t maxOrigXchg;
+       uint16_t maxRespXchg;
+#endif
+
+       RR_REG rrRegs[6];
+} CONFIG_RING_VAR;
+
+/* Structure for MB Command RESET_RING (10) */
+
+typedef struct {
+       uint32_t ring_no;
+} RESET_RING_VAR;
+
+/* Structure for MB Command READ_CONFIG (11) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint32_t cr:1;
+       uint32_t ci:1;
+       uint32_t cr_delay:6;
+       uint32_t cr_count:8;
+       uint32_t InitBBC:8;
+       uint32_t MaxBBC:8;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint32_t MaxBBC:8;
+       uint32_t InitBBC:8;
+       uint32_t cr_count:8;
+       uint32_t cr_delay:6;
+       uint32_t ci:1;
+       uint32_t cr:1;
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint32_t topology:8;
+       uint32_t myDid:24;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint32_t myDid:24;
+       uint32_t topology:8;
+#endif
+
+       /* Defines for topology (defined previously) */
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint32_t AR:1;
+       uint32_t IR:1;
+       uint32_t rsvd1:29;
+       uint32_t ack0:1;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint32_t ack0:1;
+       uint32_t rsvd1:29;
+       uint32_t IR:1;
+       uint32_t AR:1;
+#endif
+
+       uint32_t edtov;
+       uint32_t arbtov;
+       uint32_t ratov;
+       uint32_t rttov;
+       uint32_t altov;
+       uint32_t lmt;
+#define LMT_RESERVED    0x0    /* Not used */
+#define LMT_266_10bit   0x1    /* 265.625 Mbaud 10 bit iface  */
+#define LMT_532_10bit   0x2    /* 531.25  Mbaud 10 bit iface  */
+#define LMT_1063_20bit  0x3    /* 1062.5   Mbaud 20 bit iface */
+#define LMT_1063_10bit  0x4    /* 1062.5   Mbaud 10 bit iface */
+#define LMT_2125_10bit  0x8    /* 2125     Mbaud 10 bit iface */
+#define LMT_4250_10bit  0x40   /* 4250     Mbaud 10 bit iface */
+
+       uint32_t rsvd2;
+       uint32_t rsvd3;
+       uint32_t max_xri;
+       uint32_t max_iocb;
+       uint32_t max_rpi;
+       uint32_t avail_xri;
+       uint32_t avail_iocb;
+       uint32_t avail_rpi;
+       uint32_t default_rpi;
+} READ_CONFIG_VAR;
+
+/* Structure for MB Command READ_RCONFIG (12) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint32_t rsvd2:7;
+       uint32_t recvNotify:1;
+       uint32_t numMask:8;
+       uint32_t profile:8;
+       uint32_t rsvd1:4;
+       uint32_t ring:4;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint32_t ring:4;
+       uint32_t rsvd1:4;
+       uint32_t profile:8;
+       uint32_t numMask:8;
+       uint32_t recvNotify:1;
+       uint32_t rsvd2:7;
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint16_t maxResp;
+       uint16_t maxOrig;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint16_t maxOrig;
+       uint16_t maxResp;
+#endif
+
+       RR_REG rrRegs[6];
+
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint16_t cmdRingOffset;
+       uint16_t cmdEntryCnt;
+       uint16_t rspRingOffset;
+       uint16_t rspEntryCnt;
+       uint16_t nextCmdOffset;
+       uint16_t rsvd3;
+       uint16_t nextRspOffset;
+       uint16_t rsvd4;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint16_t cmdEntryCnt;
+       uint16_t cmdRingOffset;
+       uint16_t rspEntryCnt;
+       uint16_t rspRingOffset;
+       uint16_t rsvd3;
+       uint16_t nextCmdOffset;
+       uint16_t rsvd4;
+       uint16_t nextRspOffset;
+#endif
+} READ_RCONF_VAR;
+
+/* Structure for MB Command READ_SPARM (13) */
+/* Structure for MB Command READ_SPARM64 (0x8D) */
+
+typedef struct {
+       uint32_t rsvd1;
+       uint32_t rsvd2;
+       union {
+               struct ulp_bde sp; /* This BDE points to struct serv_parm
+                                     structure */
+               struct ulp_bde64 sp64;
+       } un;
+} READ_SPARM_VAR;
+
+/* Structure for MB Command READ_STATUS (14) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint32_t rsvd1:31;
+       uint32_t clrCounters:1;
+       uint16_t activeXriCnt;
+       uint16_t activeRpiCnt;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint32_t clrCounters:1;
+       uint32_t rsvd1:31;
+       uint16_t activeRpiCnt;
+       uint16_t activeXriCnt;
+#endif
+
+       uint32_t xmitByteCnt;
+       uint32_t rcvByteCnt;
+       uint32_t xmitFrameCnt;
+       uint32_t rcvFrameCnt;
+       uint32_t xmitSeqCnt;
+       uint32_t rcvSeqCnt;
+       uint32_t totalOrigExchanges;
+       uint32_t totalRespExchanges;
+       uint32_t rcvPbsyCnt;
+       uint32_t rcvFbsyCnt;
+} READ_STATUS_VAR;
+
+/* Structure for MB Command READ_RPI (15) */
+/* Structure for MB Command READ_RPI64 (0x8F) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint16_t nextRpi;
+       uint16_t reqRpi;
+       uint32_t rsvd2:8;
+       uint32_t DID:24;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint16_t reqRpi;
+       uint16_t nextRpi;
+       uint32_t DID:24;
+       uint32_t rsvd2:8;
+#endif
+
+       union {
+               struct ulp_bde sp;
+               struct ulp_bde64 sp64;
+       } un;
+
+} READ_RPI_VAR;
+
+/* Structure for MB Command READ_XRI (16) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint16_t nextXri;
+       uint16_t reqXri;
+       uint16_t rsvd1;
+       uint16_t rpi;
+       uint32_t rsvd2:8;
+       uint32_t DID:24;
+       uint32_t rsvd3:8;
+       uint32_t SID:24;
+       uint32_t rsvd4;
+       uint8_t seqId;
+       uint8_t rsvd5;
+       uint16_t seqCount;
+       uint16_t oxId;
+       uint16_t rxId;
+       uint32_t rsvd6:30;
+       uint32_t si:1;
+       uint32_t exchOrig:1;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint16_t reqXri;
+       uint16_t nextXri;
+       uint16_t rpi;
+       uint16_t rsvd1;
+       uint32_t DID:24;
+       uint32_t rsvd2:8;
+       uint32_t SID:24;
+       uint32_t rsvd3:8;
+       uint32_t rsvd4;
+       uint16_t seqCount;
+       uint8_t rsvd5;
+       uint8_t seqId;
+       uint16_t rxId;
+       uint16_t oxId;
+       uint32_t exchOrig:1;
+       uint32_t si:1;
+       uint32_t rsvd6:30;
+#endif
+} READ_XRI_VAR;
+
+/* Structure for MB Command READ_REV (17) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint32_t cv:1;
+       uint32_t rr:1;
+       uint32_t rsvd1:29;
+       uint32_t rv:1;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint32_t rv:1;
+       uint32_t rsvd1:29;
+       uint32_t rr:1;
+       uint32_t cv:1;
+#endif
+
+       uint32_t biuRev;
+       uint32_t smRev;
+       union {
+               uint32_t smFwRev;
+               struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+                       uint8_t ProgType;
+                       uint8_t ProgId;
+                       uint16_t ProgVer:4;
+                       uint16_t ProgRev:4;
+                       uint16_t ProgFixLvl:2;
+                       uint16_t ProgDistType:2;
+                       uint16_t DistCnt:4;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+                       uint16_t DistCnt:4;
+                       uint16_t ProgDistType:2;
+                       uint16_t ProgFixLvl:2;
+                       uint16_t ProgRev:4;
+                       uint16_t ProgVer:4;
+                       uint8_t ProgId;
+                       uint8_t ProgType;
+#endif
+
+               } b;
+       } un;
+       uint32_t endecRev;
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint8_t feaLevelHigh;
+       uint8_t feaLevelLow;
+       uint8_t fcphHigh;
+       uint8_t fcphLow;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint8_t fcphLow;
+       uint8_t fcphHigh;
+       uint8_t feaLevelLow;
+       uint8_t feaLevelHigh;
+#endif
+
+       uint32_t postKernRev;
+       uint32_t opFwRev;
+       uint8_t opFwName[16];
+       uint32_t sli1FwRev;
+       uint8_t sli1FwName[16];
+       uint32_t sli2FwRev;
+       uint8_t sli2FwName[16];
+       uint32_t rsvd2;
+       uint32_t RandomData[7];
+} READ_REV_VAR;
+
+/* Structure for MB Command READ_LINK_STAT (18) */
+
+typedef struct {
+       uint32_t rsvd1;
+       uint32_t linkFailureCnt;
+       uint32_t lossSyncCnt;
+
+       uint32_t lossSignalCnt;
+       uint32_t primSeqErrCnt;
+       uint32_t invalidXmitWord;
+       uint32_t crcCnt;
+       uint32_t primSeqTimeout;
+       uint32_t elasticOverrun;
+       uint32_t arbTimeout;
+} READ_LNK_VAR;
+
+/* Structure for MB Command REG_LOGIN (19) */
+/* Structure for MB Command REG_LOGIN64 (0x93) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint16_t rsvd1;
+       uint16_t rpi;
+       uint32_t rsvd2:8;
+       uint32_t did:24;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint16_t rpi;
+       uint16_t rsvd1;
+       uint32_t did:24;
+       uint32_t rsvd2:8;
+#endif
+
+       union {
+               struct ulp_bde sp;
+               struct ulp_bde64 sp64;
+       } un;
+
+} REG_LOGIN_VAR;
+
+/* Word 30 contents for REG_LOGIN */
+typedef union {
+       struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint16_t rsvd1:12;
+               uint16_t wd30_class:4;
+               uint16_t xri;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+               uint16_t xri;
+               uint16_t wd30_class:4;
+               uint16_t rsvd1:12;
+#endif
+       } f;
+       uint32_t word;
+} REG_WD30;
+
+/* Structure for MB Command UNREG_LOGIN (20) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint16_t rsvd1;
+       uint16_t rpi;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint16_t rpi;
+       uint16_t rsvd1;
+#endif
+} UNREG_LOGIN_VAR;
+
+/* Structure for MB Command UNREG_D_ID (0x23) */
+
+typedef struct {
+       uint32_t did;
+} UNREG_D_ID_VAR;
+
+/* Structure for MB Command READ_LA (21) */
+/* Structure for MB Command READ_LA64 (0x95) */
+
+typedef struct {
+       uint32_t eventTag;      /* Event tag */
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint32_t rsvd1:22;
+       uint32_t pb:1;
+       uint32_t il:1;
+       uint32_t attType:8;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint32_t attType:8;
+       uint32_t il:1;
+       uint32_t pb:1;
+       uint32_t rsvd1:22;
+#endif
+
+#define AT_RESERVED    0x00    /* Reserved - attType */
+#define AT_LINK_UP     0x01    /* Link is up */
+#define AT_LINK_DOWN   0x02    /* Link is down */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint8_t granted_AL_PA;
+       uint8_t lipAlPs;
+       uint8_t lipType;
+       uint8_t topology;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint8_t topology;
+       uint8_t lipType;
+       uint8_t lipAlPs;
+       uint8_t granted_AL_PA;
+#endif
+
+#define TOPOLOGY_PT_PT 0x01    /* Topology is pt-pt / pt-fabric */
+#define TOPOLOGY_LOOP  0x02    /* Topology is FC-AL */
+
+       union {
+               struct ulp_bde lilpBde; /* This BDE points to a 128 byte buffer
+                                          to */
+               /* store the LILP AL_PA position map into */
+               struct ulp_bde64 lilpBde64;
+       } un;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint32_t Dlu:1;
+       uint32_t Dtf:1;
+       uint32_t Drsvd2:14;
+       uint32_t DlnkSpeed:8;
+       uint32_t DnlPort:4;
+       uint32_t Dtx:2;
+       uint32_t Drx:2;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint32_t Drx:2;
+       uint32_t Dtx:2;
+       uint32_t DnlPort:4;
+       uint32_t DlnkSpeed:8;
+       uint32_t Drsvd2:14;
+       uint32_t Dtf:1;
+       uint32_t Dlu:1;
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint32_t Ulu:1;
+       uint32_t Utf:1;
+       uint32_t Ursvd2:14;
+       uint32_t UlnkSpeed:8;
+       uint32_t UnlPort:4;
+       uint32_t Utx:2;
+       uint32_t Urx:2;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint32_t Urx:2;
+       uint32_t Utx:2;
+       uint32_t UnlPort:4;
+       uint32_t UlnkSpeed:8;
+       uint32_t Ursvd2:14;
+       uint32_t Utf:1;
+       uint32_t Ulu:1;
+#endif
+
+#define LA_UNKNW_LINK  0x0    /* lnkSpeed */
+#define LA_1GHZ_LINK   0x04   /* lnkSpeed */
+#define LA_2GHZ_LINK   0x08   /* lnkSpeed */
+#define LA_4GHZ_LINK   0x10   /* lnkSpeed */
+#define LA_8GHZ_LINK   0x20   /* lnkSpeed */
+#define LA_10GHZ_LINK  0x40   /* lnkSpeed */
+
+} READ_LA_VAR;
+
+/* Structure for MB Command CLEAR_LA (22) */
+
+typedef struct {
+       uint32_t eventTag;      /* Event tag */
+       uint32_t rsvd1;
+} CLEAR_LA_VAR;
+
+/* Structure for MB Command DUMP */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint32_t rsvd:25;
+       uint32_t ra:1;
+       uint32_t co:1;
+       uint32_t cv:1;
+       uint32_t type:4;
+       uint32_t entry_index:16;
+       uint32_t region_id:16;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint32_t type:4;
+       uint32_t cv:1;
+       uint32_t co:1;
+       uint32_t ra:1;
+       uint32_t rsvd:25;
+       uint32_t region_id:16;
+       uint32_t entry_index:16;
+#endif
+
+       uint32_t rsvd1;
+       uint32_t word_cnt;
+       uint32_t resp_offset;
+} DUMP_VAR;
+
+#define  DMP_MEM_REG             0x1
+#define  DMP_NV_PARAMS           0x2
+
+#define  DMP_REGION_VPD          0xe
+#define  DMP_VPD_SIZE            0x400  /* maximum amount of VPD */
+#define  DMP_RSP_OFFSET          0x14   /* word 5 contains first word of rsp */
+#define  DMP_RSP_SIZE            0x6C   /* maximum of 27 words of rsp data */
+
+/* Structure for MB Command CONFIG_PORT (0x88) */
+
+typedef struct {
+       uint32_t pcbLen;
+       uint32_t pcbLow;       /* bit 31:0  of memory based port config block */
+       uint32_t pcbHigh;      /* bit 63:32 of memory based port config block */
+       uint32_t hbainit[5];
+} CONFIG_PORT_VAR;
+
+/* SLI-2 Port Control Block */
+
+/* SLIM POINTER */
+#define SLIMOFF 0x30           /* WORD */
+
+typedef struct _SLI2_RDSC {
+       uint32_t cmdEntries;
+       uint32_t cmdAddrLow;
+       uint32_t cmdAddrHigh;
+
+       uint32_t rspEntries;
+       uint32_t rspAddrLow;
+       uint32_t rspAddrHigh;
+} SLI2_RDSC;
+
+typedef struct _PCB {
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint32_t type:8;
+#define TYPE_NATIVE_SLI2       0x01;
+       uint32_t feature:8;
+#define FEATURE_INITIAL_SLI2   0x01;
+       uint32_t rsvd:12;
+       uint32_t maxRing:4;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint32_t maxRing:4;
+       uint32_t rsvd:12;
+       uint32_t feature:8;
+#define FEATURE_INITIAL_SLI2   0x01;
+       uint32_t type:8;
+#define TYPE_NATIVE_SLI2       0x01;
+#endif
+
+       uint32_t mailBoxSize;
+       uint32_t mbAddrLow;
+       uint32_t mbAddrHigh;
+
+       uint32_t hgpAddrLow;
+       uint32_t hgpAddrHigh;
+
+       uint32_t pgpAddrLow;
+       uint32_t pgpAddrHigh;
+       SLI2_RDSC rdsc[MAX_RINGS];
+} PCB_t;
+
+/* NEW_FEATURE */
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint32_t rsvd0:27;
+       uint32_t discardFarp:1;
+       uint32_t IPEnable:1;
+       uint32_t nodeName:1;
+       uint32_t portName:1;
+       uint32_t filterEnable:1;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint32_t filterEnable:1;
+       uint32_t portName:1;
+       uint32_t nodeName:1;
+       uint32_t IPEnable:1;
+       uint32_t discardFarp:1;
+       uint32_t rsvd:27;
+#endif
+
+       uint8_t portname[8];    /* Used to be struct lpfc_name */
+       uint8_t nodename[8];
+       uint32_t rsvd1;
+       uint32_t rsvd2;
+       uint32_t rsvd3;
+       uint32_t IPAddress;
+} CONFIG_FARP_VAR;
+
+/* Union of all Mailbox Command types */
+#define MAILBOX_CMD_WSIZE      32
+#define MAILBOX_CMD_SIZE       (MAILBOX_CMD_WSIZE * sizeof(uint32_t))
+
+typedef union {
+       uint32_t varWords[MAILBOX_CMD_WSIZE - 1];
+       LOAD_SM_VAR varLdSM;    /* cmd =  1 (LOAD_SM)        */
+       READ_NV_VAR varRDnvp;   /* cmd =  2 (READ_NVPARMS)   */
+       WRITE_NV_VAR varWTnvp;  /* cmd =  3 (WRITE_NVPARMS)  */
+       BIU_DIAG_VAR varBIUdiag;        /* cmd =  4 (RUN_BIU_DIAG)   */
+       INIT_LINK_VAR varInitLnk;       /* cmd =  5 (INIT_LINK)      */
+       DOWN_LINK_VAR varDwnLnk;        /* cmd =  6 (DOWN_LINK)      */
+       CONFIG_LINK varCfgLnk;  /* cmd =  7 (CONFIG_LINK)    */
+       PART_SLIM_VAR varSlim;  /* cmd =  8 (PART_SLIM)      */
+       CONFIG_RING_VAR varCfgRing;     /* cmd =  9 (CONFIG_RING)    */
+       RESET_RING_VAR varRstRing;      /* cmd = 10 (RESET_RING)     */
+       READ_CONFIG_VAR varRdConfig;    /* cmd = 11 (READ_CONFIG)    */
+       READ_RCONF_VAR varRdRConfig;    /* cmd = 12 (READ_RCONFIG)   */
+       READ_SPARM_VAR varRdSparm;      /* cmd = 13 (READ_SPARM(64)) */
+       READ_STATUS_VAR varRdStatus;    /* cmd = 14 (READ_STATUS)    */
+       READ_RPI_VAR varRdRPI;  /* cmd = 15 (READ_RPI(64))   */
+       READ_XRI_VAR varRdXRI;  /* cmd = 16 (READ_XRI)       */
+       READ_REV_VAR varRdRev;  /* cmd = 17 (READ_REV)       */
+       READ_LNK_VAR varRdLnk;  /* cmd = 18 (READ_LNK_STAT)  */
+       REG_LOGIN_VAR varRegLogin;      /* cmd = 19 (REG_LOGIN(64))  */
+       UNREG_LOGIN_VAR varUnregLogin;  /* cmd = 20 (UNREG_LOGIN)    */
+       READ_LA_VAR varReadLA;  /* cmd = 21 (READ_LA(64))    */
+       CLEAR_LA_VAR varClearLA;        /* cmd = 22 (CLEAR_LA)       */
+       DUMP_VAR varDmp;        /* Warm Start DUMP mbx cmd   */
+       UNREG_D_ID_VAR varUnregDID; /* cmd = 0x23 (UNREG_D_ID)   */
+       CONFIG_FARP_VAR varCfgFarp; /* cmd = 0x25 (CONFIG_FARP)  NEW_FEATURE */
+       CONFIG_PORT_VAR varCfgPort; /* cmd = 0x88 (CONFIG_PORT)  */
+} MAILVARIANTS;
+
+/*
+ * SLI-2 specific structures
+ */
+
+typedef struct {
+       uint32_t cmdPutInx;
+       uint32_t rspGetInx;
+} HGP;
+
+typedef struct {
+       uint32_t cmdGetInx;
+       uint32_t rspPutInx;
+} PGP;
+
+typedef struct _SLI2_DESC {
+       HGP host[MAX_RINGS];
+       uint32_t unused1[16];
+       PGP port[MAX_RINGS];
+} SLI2_DESC;
+
+typedef union {
+       SLI2_DESC s2;
+} SLI_VAR;
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint16_t mbxStatus;
+       uint8_t mbxCommand;
+       uint8_t mbxReserved:6;
+       uint8_t mbxHc:1;
+       uint8_t mbxOwner:1;     /* Low order bit first word */
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint8_t mbxOwner:1;     /* Low order bit first word */
+       uint8_t mbxHc:1;
+       uint8_t mbxReserved:6;
+       uint8_t mbxCommand;
+       uint16_t mbxStatus;
+#endif
+
+       MAILVARIANTS un;
+       SLI_VAR us;
+} MAILBOX_t;
+
+/*
+ *    Begin Structure Definitions for IOCB Commands
+ */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint8_t statAction;
+       uint8_t statRsn;
+       uint8_t statBaExp;
+       uint8_t statLocalError;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint8_t statLocalError;
+       uint8_t statBaExp;
+       uint8_t statRsn;
+       uint8_t statAction;
+#endif
+       /* statRsn  P/F_RJT reason codes */
+#define RJT_BAD_D_ID       0x01        /* Invalid D_ID field */
+#define RJT_BAD_S_ID       0x02        /* Invalid S_ID field */
+#define RJT_UNAVAIL_TEMP   0x03        /* N_Port unavailable temp. */
+#define RJT_UNAVAIL_PERM   0x04        /* N_Port unavailable perm. */
+#define RJT_UNSUP_CLASS    0x05        /* Class not supported */
+#define RJT_DELIM_ERR      0x06        /* Delimiter usage error */
+#define RJT_UNSUP_TYPE     0x07        /* Type not supported */
+#define RJT_BAD_CONTROL    0x08        /* Invalid link conrtol */
+#define RJT_BAD_RCTL       0x09        /* R_CTL invalid */
+#define RJT_BAD_FCTL       0x0A        /* F_CTL invalid */
+#define RJT_BAD_OXID       0x0B        /* OX_ID invalid */
+#define RJT_BAD_RXID       0x0C        /* RX_ID invalid */
+#define RJT_BAD_SEQID      0x0D        /* SEQ_ID invalid */
+#define RJT_BAD_DFCTL      0x0E        /* DF_CTL invalid */
+#define RJT_BAD_SEQCNT     0x0F        /* SEQ_CNT invalid */
+#define RJT_BAD_PARM       0x10        /* Param. field invalid */
+#define RJT_XCHG_ERR       0x11        /* Exchange error */
+#define RJT_PROT_ERR       0x12        /* Protocol error */
+#define RJT_BAD_LENGTH     0x13        /* Invalid Length */
+#define RJT_UNEXPECTED_ACK 0x14        /* Unexpected ACK */
+#define RJT_LOGIN_REQUIRED 0x16        /* Login required */
+#define RJT_TOO_MANY_SEQ   0x17        /* Excessive sequences */
+#define RJT_XCHG_NOT_STRT  0x18        /* Exchange not started */
+#define RJT_UNSUP_SEC_HDR  0x19        /* Security hdr not supported */
+#define RJT_UNAVAIL_PATH   0x1A        /* Fabric Path not available */
+#define RJT_VENDOR_UNIQUE  0xFF        /* Vendor unique error */
+
+#define IOERR_SUCCESS                 0x00     /* statLocalError */
+#define IOERR_MISSING_CONTINUE        0x01
+#define IOERR_SEQUENCE_TIMEOUT        0x02
+#define IOERR_INTERNAL_ERROR          0x03
+#define IOERR_INVALID_RPI             0x04
+#define IOERR_NO_XRI                  0x05
+#define IOERR_ILLEGAL_COMMAND         0x06
+#define IOERR_XCHG_DROPPED            0x07
+#define IOERR_ILLEGAL_FIELD           0x08
+#define IOERR_BAD_CONTINUE            0x09
+#define IOERR_TOO_MANY_BUFFERS        0x0A
+#define IOERR_RCV_BUFFER_WAITING      0x0B
+#define IOERR_NO_CONNECTION           0x0C
+#define IOERR_TX_DMA_FAILED           0x0D
+#define IOERR_RX_DMA_FAILED           0x0E
+#define IOERR_ILLEGAL_FRAME           0x0F
+#define IOERR_EXTRA_DATA              0x10
+#define IOERR_NO_RESOURCES            0x11
+#define IOERR_RESERVED                0x12
+#define IOERR_ILLEGAL_LENGTH          0x13
+#define IOERR_UNSUPPORTED_FEATURE     0x14
+#define IOERR_ABORT_IN_PROGRESS       0x15
+#define IOERR_ABORT_REQUESTED         0x16
+#define IOERR_RECEIVE_BUFFER_TIMEOUT  0x17
+#define IOERR_LOOP_OPEN_FAILURE       0x18
+#define IOERR_RING_RESET              0x19
+#define IOERR_LINK_DOWN               0x1A
+#define IOERR_CORRUPTED_DATA          0x1B
+#define IOERR_CORRUPTED_RPI           0x1C
+#define IOERR_OUT_OF_ORDER_DATA       0x1D
+#define IOERR_OUT_OF_ORDER_ACK        0x1E
+#define IOERR_DUP_FRAME               0x1F
+#define IOERR_LINK_CONTROL_FRAME      0x20     /* ACK_N received */
+#define IOERR_BAD_HOST_ADDRESS        0x21
+#define IOERR_RCV_HDRBUF_WAITING      0x22
+#define IOERR_MISSING_HDR_BUFFER      0x23
+#define IOERR_MSEQ_CHAIN_CORRUPTED    0x24
+#define IOERR_ABORTMULT_REQUESTED     0x25
+#define IOERR_BUFFER_SHORTAGE         0x28
+#define IOERR_DEFAULT                 0x29
+#define IOERR_CNT                     0x2A
+
+#define IOERR_DRVR_MASK               0x100
+#define IOERR_SLI_DOWN                0x101  /* ulpStatus  - Driver defined */
+#define IOERR_SLI_BRESET              0x102
+#define IOERR_SLI_ABORTED             0x103
+} PARM_ERR;
+
+typedef union {
+       struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint8_t Rctl;   /* R_CTL field */
+               uint8_t Type;   /* TYPE field */
+               uint8_t Dfctl;  /* DF_CTL field */
+               uint8_t Fctl;   /* Bits 0-7 of IOCB word 5 */
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+               uint8_t Fctl;   /* Bits 0-7 of IOCB word 5 */
+               uint8_t Dfctl;  /* DF_CTL field */
+               uint8_t Type;   /* TYPE field */
+               uint8_t Rctl;   /* R_CTL field */
+#endif
+
+#define BC      0x02           /* Broadcast Received  - Fctl */
+#define SI      0x04           /* Sequence Initiative */
+#define LA      0x08           /* Ignore Link Attention state */
+#define LS      0x80           /* Last Sequence */
+       } hcsw;
+       uint32_t reserved;
+} WORD5;
+
+/* IOCB Command template for a generic response */
+typedef struct {
+       uint32_t reserved[4];
+       PARM_ERR perr;
+} GENERIC_RSP;
+
+/* IOCB Command template for XMIT / XMIT_BCAST / RCV_SEQUENCE / XMIT_ELS */
+typedef struct {
+       struct ulp_bde xrsqbde[2];
+       uint32_t xrsqRo;        /* Starting Relative Offset */
+       WORD5 w5;               /* Header control/status word */
+} XR_SEQ_FIELDS;
+
+/* IOCB Command template for ELS_REQUEST */
+typedef struct {
+       struct ulp_bde elsReq;
+       struct ulp_bde elsRsp;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint32_t word4Rsvd:7;
+       uint32_t fl:1;
+       uint32_t myID:24;
+       uint32_t word5Rsvd:8;
+       uint32_t remoteID:24;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint32_t myID:24;
+       uint32_t fl:1;
+       uint32_t word4Rsvd:7;
+       uint32_t remoteID:24;
+       uint32_t word5Rsvd:8;
+#endif
+} ELS_REQUEST;
+
+/* IOCB Command template for RCV_ELS_REQ */
+typedef struct {
+       struct ulp_bde elsReq[2];
+       uint32_t parmRo;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint32_t word5Rsvd:8;
+       uint32_t remoteID:24;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint32_t remoteID:24;
+       uint32_t word5Rsvd:8;
+#endif
+} RCV_ELS_REQ;
+
+/* IOCB Command template for ABORT / CLOSE_XRI */
+typedef struct {
+       uint32_t rsvd[3];
+       uint32_t abortType;
+#define ABORT_TYPE_ABTX  0x00000000
+#define ABORT_TYPE_ABTS  0x00000001
+       uint32_t parm;
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint16_t abortContextTag; /* ulpContext from command to abort/close */
+       uint16_t abortIoTag;    /* ulpIoTag from command to abort/close */
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint16_t abortIoTag;    /* ulpIoTag from command to abort/close */
+       uint16_t abortContextTag; /* ulpContext from command to abort/close */
+#endif
+} AC_XRI;
+
+/* IOCB Command template for ABORT_MXRI64 */
+typedef struct {
+       uint32_t rsvd[3];
+       uint32_t abortType;
+       uint32_t parm;
+       uint32_t iotag32;
+} A_MXRI64;
+
+/* IOCB Command template for GET_RPI */
+typedef struct {
+       uint32_t rsvd[4];
+       uint32_t parmRo;
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint32_t word5Rsvd:8;
+       uint32_t remoteID:24;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint32_t remoteID:24;
+       uint32_t word5Rsvd:8;
+#endif
+} GET_RPI;
+
+/* IOCB Command template for all FCP Initiator commands */
+typedef struct {
+       struct ulp_bde fcpi_cmnd;       /* FCP_CMND payload descriptor */
+       struct ulp_bde fcpi_rsp;        /* Rcv buffer */
+       uint32_t fcpi_parm;
+       uint32_t fcpi_XRdy;     /* transfer ready for IWRITE */
+} FCPI_FIELDS;
+
+/* IOCB Command template for all FCP Target commands */
+typedef struct {
+       struct ulp_bde fcpt_Buffer[2];  /* FCP_CMND payload descriptor */
+       uint32_t fcpt_Offset;
+       uint32_t fcpt_Length;   /* transfer ready for IWRITE */
+} FCPT_FIELDS;
+
+/* SLI-2 IOCB structure definitions */
+
+/* IOCB Command template for 64 bit XMIT / XMIT_BCAST / XMIT_ELS */
+typedef struct {
+       ULP_BDL bdl;
+       uint32_t xrsqRo;        /* Starting Relative Offset */
+       WORD5 w5;               /* Header control/status word */
+} XMT_SEQ_FIELDS64;
+
+/* IOCB Command template for 64 bit RCV_SEQUENCE64 */
+typedef struct {
+       struct ulp_bde64 rcvBde;
+       uint32_t rsvd1;
+       uint32_t xrsqRo;        /* Starting Relative Offset */
+       WORD5 w5;               /* Header control/status word */
+} RCV_SEQ_FIELDS64;
+
+/* IOCB Command template for ELS_REQUEST64 */
+typedef struct {
+       ULP_BDL bdl;
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint32_t word4Rsvd:7;
+       uint32_t fl:1;
+       uint32_t myID:24;
+       uint32_t word5Rsvd:8;
+       uint32_t remoteID:24;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint32_t myID:24;
+       uint32_t fl:1;
+       uint32_t word4Rsvd:7;
+       uint32_t remoteID:24;
+       uint32_t word5Rsvd:8;
+#endif
+} ELS_REQUEST64;
+
+/* IOCB Command template for GEN_REQUEST64 */
+typedef struct {
+       ULP_BDL bdl;
+       uint32_t xrsqRo;        /* Starting Relative Offset */
+       WORD5 w5;               /* Header control/status word */
+} GEN_REQUEST64;
+
+/* IOCB Command template for RCV_ELS_REQ64 */
+typedef struct {
+       struct ulp_bde64 elsReq;
+       uint32_t rcvd1;
+       uint32_t parmRo;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint32_t word5Rsvd:8;
+       uint32_t remoteID:24;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint32_t remoteID:24;
+       uint32_t word5Rsvd:8;
+#endif
+} RCV_ELS_REQ64;
+
+/* IOCB Command template for all 64 bit FCP Initiator commands */
+typedef struct {
+       ULP_BDL bdl;
+       uint32_t fcpi_parm;
+       uint32_t fcpi_XRdy;     /* transfer ready for IWRITE */
+} FCPI_FIELDS64;
+
+/* IOCB Command template for all 64 bit FCP Target commands */
+typedef struct {
+       ULP_BDL bdl;
+       uint32_t fcpt_Offset;
+       uint32_t fcpt_Length;   /* transfer ready for IWRITE */
+} FCPT_FIELDS64;
+
+typedef struct _IOCB { /* IOCB structure */
+       union {
+               GENERIC_RSP grsp;       /* Generic response */
+               XR_SEQ_FIELDS xrseq;    /* XMIT / BCAST / RCV_SEQUENCE cmd */
+               struct ulp_bde cont[3]; /* up to 3 continuation bdes */
+               RCV_ELS_REQ rcvels;     /* RCV_ELS_REQ template */
+               AC_XRI acxri;   /* ABORT / CLOSE_XRI template */
+               A_MXRI64 amxri; /* abort multiple xri command overlay */
+               GET_RPI getrpi; /* GET_RPI template */
+               FCPI_FIELDS fcpi;       /* FCP Initiator template */
+               FCPT_FIELDS fcpt;       /* FCP target template */
+
+               /* SLI-2 structures */
+
+               struct ulp_bde64 cont64[2];     /* up to 2 64 bit continuation
+                                          bde_64s */
+               ELS_REQUEST64 elsreq64; /* ELS_REQUEST template */
+               GEN_REQUEST64 genreq64; /* GEN_REQUEST template */
+               RCV_ELS_REQ64 rcvels64; /* RCV_ELS_REQ template */
+               XMT_SEQ_FIELDS64 xseq64;        /* XMIT / BCAST cmd */
+               FCPI_FIELDS64 fcpi64;   /* FCP 64 bit Initiator template */
+               FCPT_FIELDS64 fcpt64;   /* FCP 64 bit target template */
+
+               uint32_t ulpWord[IOCB_WORD_SZ - 2];     /* generic 6 'words' */
+       } un;
+       union {
+               struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+                       uint16_t ulpContext;    /* High order bits word 6 */
+                       uint16_t ulpIoTag;      /* Low  order bits word 6 */
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+                       uint16_t ulpIoTag;      /* Low  order bits word 6 */
+                       uint16_t ulpContext;    /* High order bits word 6 */
+#endif
+               } t1;
+               struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+                       uint16_t ulpContext;    /* High order bits word 6 */
+                       uint16_t ulpIoTag1:2;   /* Low  order bits word 6 */
+                       uint16_t ulpIoTag0:14;  /* Low  order bits word 6 */
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+                       uint16_t ulpIoTag0:14;  /* Low  order bits word 6 */
+                       uint16_t ulpIoTag1:2;   /* Low  order bits word 6 */
+                       uint16_t ulpContext;    /* High order bits word 6 */
+#endif
+               } t2;
+       } un1;
+#define ulpContext un1.t1.ulpContext
+#define ulpIoTag   un1.t1.ulpIoTag
+#define ulpIoTag0  un1.t2.ulpIoTag0
+
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint32_t ulpTimeout:8;
+       uint32_t ulpXS:1;
+       uint32_t ulpFCP2Rcvy:1;
+       uint32_t ulpPU:2;
+       uint32_t ulpIr:1;
+       uint32_t ulpClass:3;
+       uint32_t ulpCommand:8;
+       uint32_t ulpStatus:4;
+       uint32_t ulpBdeCount:2;
+       uint32_t ulpLe:1;
+       uint32_t ulpOwner:1;    /* Low order bit word 7 */
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+       uint32_t ulpOwner:1;    /* Low order bit word 7 */
+       uint32_t ulpLe:1;
+       uint32_t ulpBdeCount:2;
+       uint32_t ulpStatus:4;
+       uint32_t ulpCommand:8;
+       uint32_t ulpClass:3;
+       uint32_t ulpIr:1;
+       uint32_t ulpPU:2;
+       uint32_t ulpFCP2Rcvy:1;
+       uint32_t ulpXS:1;
+       uint32_t ulpTimeout:8;
+#endif
+
+#define PARM_UNUSED        0   /* PU field (Word 4) not used */
+#define PARM_REL_OFF       1   /* PU field (Word 4) = R. O. */
+#define PARM_READ_CHECK    2   /* PU field (Word 4) = Data Transfer Length */
+#define CLASS1             0   /* Class 1 */
+#define CLASS2             1   /* Class 2 */
+#define CLASS3             2   /* Class 3 */
+#define CLASS_FCP_INTERMIX 7   /* FCP Data->Cls 1, all else->Cls 2 */
+
+#define IOSTAT_SUCCESS         0x0     /* ulpStatus  - HBA defined */
+#define IOSTAT_FCP_RSP_ERROR   0x1
+#define IOSTAT_REMOTE_STOP     0x2
+#define IOSTAT_LOCAL_REJECT    0x3
+#define IOSTAT_NPORT_RJT       0x4
+#define IOSTAT_FABRIC_RJT      0x5
+#define IOSTAT_NPORT_BSY       0x6
+#define IOSTAT_FABRIC_BSY      0x7
+#define IOSTAT_INTERMED_RSP    0x8
+#define IOSTAT_LS_RJT          0x9
+#define IOSTAT_BA_RJT          0xA
+#define IOSTAT_RSVD1           0xB
+#define IOSTAT_RSVD2           0xC
+#define IOSTAT_RSVD3           0xD
+#define IOSTAT_RSVD4           0xE
+#define IOSTAT_RSVD5           0xF
+#define IOSTAT_DRIVER_REJECT   0x10   /* ulpStatus  - Driver defined */
+#define IOSTAT_DEFAULT         0xF    /* Same as rsvd5 for now */
+#define IOSTAT_CNT             0x11
+
+} IOCB_t;
+
+
+#define SLI1_SLIM_SIZE   (4 * 1024)
+
+/* Up to 498 IOCBs will fit into 16k
+ * 256 (MAILBOX_t) + 140 (PCB_t) + ( 32 (IOCB_t) * 498 ) = < 16384
+ */
+#define SLI2_SLIM_SIZE   (16 * 1024)
+
+/* Maximum IOCBs that will fit in SLI2 slim */
+#define MAX_SLI2_IOCB    498
+
+struct lpfc_sli2_slim {
+       MAILBOX_t mbx;
+       PCB_t pcb;
+       IOCB_t IOCBs[MAX_SLI2_IOCB];
+};
+
+/*******************************************************************
+This macro check PCI device to allow special handling for LC HBAs.
+
+Parameters:
+device : struct pci_dev 's device field
+
+return 1 => TRUE
+       0 => FALSE
+ *******************************************************************/
+static inline int
+lpfc_is_LC_HBA(unsigned short device)
+{
+       if ((device == PCI_DEVICE_ID_TFLY) ||
+           (device == PCI_DEVICE_ID_PFLY) ||
+           (device == PCI_DEVICE_ID_LP101) ||
+           (device == PCI_DEVICE_ID_BMID) ||
+           (device == PCI_DEVICE_ID_BSMB) ||
+           (device == PCI_DEVICE_ID_ZMID) ||
+           (device == PCI_DEVICE_ID_ZSMB) ||
+           (device == PCI_DEVICE_ID_RFLY))
+               return 1;
+       else
+               return 0;
+}
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
new file mode 100644 (file)
index 0000000..233c912
--- /dev/null
@@ -0,0 +1,1739 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Enterprise Fibre Channel Host Bus Adapters.                     *
+ * Refer to the README file included with this package for         *
+ * driver version and adapter support.                             *
+ * Copyright (C) 2004 Emulex Corporation.                          *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of the GNU General Public License     *
+ * as published by the Free Software Foundation; either version 2  *
+ * of the License, or (at your option) any later version.          *
+ *                                                                 *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of  *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the   *
+ * GNU General Public License for more details, a copy of which    *
+ * can be found in the file COPYING included with this package.    *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_init.c 1.233 2005/04/13 11:59:09EDT sf_support Exp  $
+ */
+
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_version.h"
+
+static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *);
+static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
+static int lpfc_post_rcv_buf(struct lpfc_hba *);
+
+static struct scsi_transport_template *lpfc_transport_template = NULL;
+static DEFINE_IDR(lpfc_hba_index);
+
+/************************************************************************/
+/*                                                                      */
+/*    lpfc_config_port_prep                                             */
+/*    This routine will do LPFC initialization prior to the             */
+/*    CONFIG_PORT mailbox command. This will be initialized             */
+/*    as a SLI layer callback routine.                                  */
+/*    This routine returns 0 on success or -ERESTART if it wants        */
+/*    the SLI layer to reset the HBA and try again. Any                 */
+/*    other return value indicates an error.                            */
+/*                                                                      */
+/************************************************************************/
+int
+lpfc_config_port_prep(struct lpfc_hba * phba)
+{
+       lpfc_vpd_t *vp = &phba->vpd;
+       int i = 0, rc;
+       LPFC_MBOXQ_t *pmb;
+       MAILBOX_t *mb;
+       char *lpfc_vpd_data = NULL;
+       uint16_t offset = 0;
+       static char licensed[56] =
+                   "key unlock for use with gnu public licensed code only\0";
+
+       pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!pmb) {
+               phba->hba_state = LPFC_HBA_ERROR;
+               return -ENOMEM;
+       }
+
+       mb = &pmb->mb;
+       phba->hba_state = LPFC_INIT_MBX_CMDS;
+
+       if (lpfc_is_LC_HBA(phba->pcidev->device)) {
+               uint32_t *ptext = (uint32_t *) licensed;
+
+               for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
+                       *ptext = cpu_to_be32(*ptext);
+
+               lpfc_read_nv(phba, pmb);
+               memset((char*)mb->un.varRDnvp.rsvd3, 0,
+                       sizeof (mb->un.varRDnvp.rsvd3));
+               memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
+                        sizeof (licensed));
+
+               rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+
+               if (rc != MBX_SUCCESS) {
+                       lpfc_printf_log(phba,
+                                       KERN_ERR,
+                                       LOG_MBOX,
+                                       "%d:0324 Config Port initialization "
+                                       "error, mbxCmd x%x READ_NVPARM, "
+                                       "mbxStatus x%x\n",
+                                       phba->brd_no,
+                                       mb->mbxCommand, mb->mbxStatus);
+                       mempool_free(pmb, phba->mbox_mem_pool);
+                       return -ERESTART;
+               }
+               memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
+                      sizeof (mb->un.varRDnvp.nodename));
+       }
+
+       /* Setup and issue mailbox READ REV command */
+       lpfc_read_rev(phba, pmb);
+       rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+       if (rc != MBX_SUCCESS) {
+               lpfc_printf_log(phba,
+                               KERN_ERR,
+                               LOG_INIT,
+                               "%d:0439 Adapter failed to init, mbxCmd x%x "
+                               "READ_REV, mbxStatus x%x\n",
+                               phba->brd_no,
+                               mb->mbxCommand, mb->mbxStatus);
+               mempool_free( pmb, phba->mbox_mem_pool);
+               return -ERESTART;
+       }
+
+       /* The HBA's current state is provided by the ProgType and rr fields.
+        * Read and check the value of these fields before continuing to config
+        * this port.
+        */
+       if (mb->un.varRdRev.rr == 0 || mb->un.varRdRev.un.b.ProgType != 2) {
+               /* Old firmware */
+               vp->rev.rBit = 0;
+               lpfc_printf_log(phba,
+                               KERN_ERR,
+                               LOG_INIT,
+                               "%d:0440 Adapter failed to init, mbxCmd x%x "
+                               "READ_REV detected outdated firmware"
+                               "Data: x%x\n",
+                               phba->brd_no,
+                               mb->mbxCommand, 0);
+               mempool_free(pmb, phba->mbox_mem_pool);
+               return -ERESTART;
+       } else {
+               vp->rev.rBit = 1;
+               vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
+               memcpy(vp->rev.sli1FwName,
+                       (char*)mb->un.varRdRev.sli1FwName, 16);
+               vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
+               memcpy(vp->rev.sli2FwName,
+                                       (char *)mb->un.varRdRev.sli2FwName, 16);
+       }
+
+       /* Save information as VPD data */
+       vp->rev.biuRev = mb->un.varRdRev.biuRev;
+       vp->rev.smRev = mb->un.varRdRev.smRev;
+       vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
+       vp->rev.endecRev = mb->un.varRdRev.endecRev;
+       vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
+       vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
+       vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
+       vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
+       vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
+       vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
+
+       if (lpfc_is_LC_HBA(phba->pcidev->device))
+               memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
+                                               sizeof (phba->RandomData));
+
+       /* Get the default values for Model Name and Description */
+       lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
+
+       /* Get adapter VPD information */
+       pmb->context2 = kmalloc(DMP_RSP_SIZE, GFP_KERNEL);
+       if (!pmb->context2)
+               goto out_free_mbox;
+       lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
+       if (!lpfc_vpd_data)
+               goto out_free_context2;
+
+       do {
+               lpfc_dump_mem(phba, pmb, offset);
+               rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+
+               if (rc != MBX_SUCCESS) {
+                       lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                                       "%d:0441 VPD not present on adapter, "
+                                       "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
+                                       phba->brd_no,
+                                       mb->mbxCommand, mb->mbxStatus);
+                       kfree(lpfc_vpd_data);
+                       lpfc_vpd_data = NULL;
+                       break;
+               }
+
+               lpfc_sli_pcimem_bcopy(pmb->context2, lpfc_vpd_data + offset,
+                                                       mb->un.varDmp.word_cnt);
+               offset += mb->un.varDmp.word_cnt;
+       } while (mb->un.varDmp.word_cnt);
+       lpfc_parse_vpd(phba, lpfc_vpd_data);
+
+       kfree(lpfc_vpd_data);
+out_free_context2:
+       kfree(pmb->context2);
+out_free_mbox:
+       mempool_free(pmb, phba->mbox_mem_pool);
+       return 0;
+}
+
+/************************************************************************/
+/*                                                                      */
+/*    lpfc_config_port_post                                             */
+/*    This routine will do LPFC initialization after the                */
+/*    CONFIG_PORT mailbox command. This will be initialized             */
+/*    as a SLI layer callback routine.                                  */
+/*    This routine returns 0 on success. Any other return value         */
+/*    indicates an error.                                               */
+/*                                                                      */
+/************************************************************************/
+int
+lpfc_config_port_post(struct lpfc_hba * phba)
+{
+       LPFC_MBOXQ_t *pmb;
+       MAILBOX_t *mb;
+       struct lpfc_dmabuf *mp;
+       struct lpfc_sli *psli = &phba->sli;
+       uint32_t status, timeout;
+       int i, j, rc;
+
+       pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!pmb) {
+               phba->hba_state = LPFC_HBA_ERROR;
+               return -ENOMEM;
+       }
+       mb = &pmb->mb;
+
+       lpfc_config_link(phba, pmb);
+       rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+       if (rc != MBX_SUCCESS) {
+               lpfc_printf_log(phba,
+                               KERN_ERR,
+                               LOG_INIT,
+                               "%d:0447 Adapter failed init, mbxCmd x%x "
+                               "CONFIG_LINK mbxStatus x%x\n",
+                               phba->brd_no,
+                               mb->mbxCommand, mb->mbxStatus);
+               phba->hba_state = LPFC_HBA_ERROR;
+               mempool_free( pmb, phba->mbox_mem_pool);
+               return -EIO;
+       }
+
+       /* Get login parameters for NID.  */
+       lpfc_read_sparam(phba, pmb);
+       if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
+               lpfc_printf_log(phba,
+                               KERN_ERR,
+                               LOG_INIT,
+                               "%d:0448 Adapter failed init, mbxCmd x%x "
+                               "READ_SPARM mbxStatus x%x\n",
+                               phba->brd_no,
+                               mb->mbxCommand, mb->mbxStatus);
+               phba->hba_state = LPFC_HBA_ERROR;
+               mp = (struct lpfc_dmabuf *) pmb->context1;
+               mempool_free( pmb, phba->mbox_mem_pool);
+               lpfc_mbuf_free(phba, mp->virt, mp->phys);
+               kfree(mp);
+               return -EIO;
+       }
+
+       mp = (struct lpfc_dmabuf *) pmb->context1;
+
+       memcpy(&phba->fc_sparam, mp->virt, sizeof (struct serv_parm));
+       lpfc_mbuf_free(phba, mp->virt, mp->phys);
+       kfree(mp);
+       pmb->context1 = NULL;
+
+       memcpy(&phba->fc_nodename, &phba->fc_sparam.nodeName,
+              sizeof (struct lpfc_name));
+       memcpy(&phba->fc_portname, &phba->fc_sparam.portName,
+              sizeof (struct lpfc_name));
+       /* If no serial number in VPD data, use low 6 bytes of WWNN */
+       /* This should be consolidated into parse_vpd ? - mr */
+       if (phba->SerialNumber[0] == 0) {
+               uint8_t *outptr;
+
+               outptr = (uint8_t *) & phba->fc_nodename.IEEE[0];
+               for (i = 0; i < 12; i++) {
+                       status = *outptr++;
+                       j = ((status & 0xf0) >> 4);
+                       if (j <= 9)
+                               phba->SerialNumber[i] =
+                                   (char)((uint8_t) 0x30 + (uint8_t) j);
+                       else
+                               phba->SerialNumber[i] =
+                                   (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
+                       i++;
+                       j = (status & 0xf);
+                       if (j <= 9)
+                               phba->SerialNumber[i] =
+                                   (char)((uint8_t) 0x30 + (uint8_t) j);
+                       else
+                               phba->SerialNumber[i] =
+                                   (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
+               }
+       }
+
+       /* This should turn on DELAYED ABTS for ELS timeouts */
+       lpfc_set_slim(phba, pmb, 0x052198, 0x1);
+       if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
+               phba->hba_state = LPFC_HBA_ERROR;
+               mempool_free( pmb, phba->mbox_mem_pool);
+               return -EIO;
+       }
+
+
+       lpfc_read_config(phba, pmb);
+       if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
+               lpfc_printf_log(phba,
+                               KERN_ERR,
+                               LOG_INIT,
+                               "%d:0453 Adapter failed to init, mbxCmd x%x "
+                               "READ_CONFIG, mbxStatus x%x\n",
+                               phba->brd_no,
+                               mb->mbxCommand, mb->mbxStatus);
+               phba->hba_state = LPFC_HBA_ERROR;
+               mempool_free( pmb, phba->mbox_mem_pool);
+               return -EIO;
+       }
+
+       /* Reset the DFT_HBA_Q_DEPTH to the max xri  */
+       if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
+               phba->cfg_hba_queue_depth =
+                       mb->un.varRdConfig.max_xri + 1;
+
+       phba->lmt = mb->un.varRdConfig.lmt;
+       /* HBA is not 4GB capable, or HBA is not 2GB capable,
+       don't let link speed ask for it */
+       if ((((phba->lmt & LMT_4250_10bit) != LMT_4250_10bit) &&
+               (phba->cfg_link_speed > LINK_SPEED_2G)) ||
+               (((phba->lmt & LMT_2125_10bit) != LMT_2125_10bit) &&
+               (phba->cfg_link_speed > LINK_SPEED_1G))) {
+               /* Reset link speed to auto. 1G/2GB HBA cfg'd for 4G */
+               lpfc_printf_log(phba,
+                       KERN_WARNING,
+                       LOG_LINK_EVENT,
+                       "%d:1302 Invalid speed for this board: "
+                       "Reset link speed to auto: x%x\n",
+                       phba->brd_no,
+                       phba->cfg_link_speed);
+                       phba->cfg_link_speed = LINK_SPEED_AUTO;
+       }
+
+       phba->hba_state = LPFC_LINK_DOWN;
+
+       /* Only process IOCBs on ring 0 till hba_state is READY */
+       if (psli->ring[psli->ip_ring].cmdringaddr)
+               psli->ring[psli->ip_ring].flag |= LPFC_STOP_IOCB_EVENT;
+       if (psli->ring[psli->fcp_ring].cmdringaddr)
+               psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
+       if (psli->ring[psli->next_ring].cmdringaddr)
+               psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
+
+       /* Post receive buffers for desired rings */
+       lpfc_post_rcv_buf(phba);
+
+       /* Enable appropriate host interrupts */
+       spin_lock_irq(phba->host->host_lock);
+       status = readl(phba->HCregaddr);
+       status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
+       if (psli->num_rings > 0)
+               status |= HC_R0INT_ENA;
+       if (psli->num_rings > 1)
+               status |= HC_R1INT_ENA;
+       if (psli->num_rings > 2)
+               status |= HC_R2INT_ENA;
+       if (psli->num_rings > 3)
+               status |= HC_R3INT_ENA;
+
+       writel(status, phba->HCregaddr);
+       readl(phba->HCregaddr); /* flush */
+       spin_unlock_irq(phba->host->host_lock);
+
+       /*
+        * Setup the ring 0 (els)  timeout handler
+        */
+       timeout = phba->fc_ratov << 1;
+       phba->els_tmofunc.expires = jiffies + HZ * timeout;
+       add_timer(&phba->els_tmofunc);
+
+       lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
+       pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+       if (lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT) != MBX_SUCCESS) {
+               lpfc_printf_log(phba,
+                               KERN_ERR,
+                               LOG_INIT,
+                               "%d:0454 Adapter failed to init, mbxCmd x%x "
+                               "INIT_LINK, mbxStatus x%x\n",
+                               phba->brd_no,
+                               mb->mbxCommand, mb->mbxStatus);
+
+               /* Clear all interrupt enable conditions */
+               writel(0, phba->HCregaddr);
+               readl(phba->HCregaddr); /* flush */
+               /* Clear all pending interrupts */
+               writel(0xffffffff, phba->HAregaddr);
+               readl(phba->HAregaddr); /* flush */
+
+               phba->hba_state = LPFC_HBA_ERROR;
+               mempool_free(pmb, phba->mbox_mem_pool);
+               return -EIO;
+       }
+       /* MBOX buffer will be freed in mbox compl */
+
+       i = 0;
+       while ((phba->hba_state != LPFC_HBA_READY) ||
+              (phba->num_disc_nodes) || (phba->fc_prli_sent) ||
+              ((phba->fc_map_cnt == 0) && (i<2)) ||
+              (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) {
+               /* Check every second for 30 retries. */
+               i++;
+               if (i > 30) {
+                       break;
+               }
+               if ((i >= 15) && (phba->hba_state <= LPFC_LINK_DOWN)) {
+                       /* The link is down.  Set linkdown timeout */
+                       break;
+               }
+
+               /* Delay for 1 second to give discovery time to complete. */
+               msleep(1000);
+
+       }
+
+       /* Since num_disc_nodes keys off of PLOGI, delay a bit to let
+        * any potential PRLIs to flush thru the SLI sub-system.
+        */
+       msleep(50);
+
+       return (0);
+}
+
+/************************************************************************/
+/*                                                                      */
+/*    lpfc_hba_down_prep                                                */
+/*    This routine will do LPFC uninitialization before the             */
+/*    HBA is reset when bringing down the SLI Layer. This will be       */
+/*    initialized as a SLI layer callback routine.                      */
+/*    This routine returns 0 on success. Any other return value         */
+/*    indicates an error.                                               */
+/*                                                                      */
+/************************************************************************/
+int
+lpfc_hba_down_prep(struct lpfc_hba * phba)
+{
+       /* Disable interrupts */
+       writel(0, phba->HCregaddr);
+       readl(phba->HCregaddr); /* flush */
+
+       /* Cleanup potential discovery resources */
+       lpfc_els_flush_rscn(phba);
+       lpfc_els_flush_cmd(phba);
+       lpfc_disc_flush_list(phba);
+
+       return (0);
+}
+
+/************************************************************************/
+/*                                                                      */
+/*    lpfc_handle_eratt                                                 */
+/*    This routine will handle processing a Host Attention              */
+/*    Error Status event. This will be initialized                      */
+/*    as a SLI layer callback routine.                                  */
+/*                                                                      */
+/************************************************************************/
+void
+lpfc_handle_eratt(struct lpfc_hba * phba)
+{
+       struct lpfc_sli *psli = &phba->sli;
+       struct lpfc_sli_ring  *pring;
+
+       /*
+        * If a reset is sent to the HBA restore PCI configuration registers.
+        */
+       if ( phba->hba_state == LPFC_INIT_START ) {
+               mdelay(1);
+               readl(phba->HCregaddr); /* flush */
+               writel(0, phba->HCregaddr);
+               readl(phba->HCregaddr); /* flush */
+
+               /* Restore PCI cmd register */
+               pci_write_config_word(phba->pcidev,
+                                     PCI_COMMAND, phba->pci_cfg_value);
+       }
+
+       if (phba->work_hs & HS_FFER6) {
+               /* Re-establishing Link */
+               lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
+                               "%d:1301 Re-establishing Link "
+                               "Data: x%x x%x x%x\n",
+                               phba->brd_no, phba->work_hs,
+                               phba->work_status[0], phba->work_status[1]);
+               spin_lock_irq(phba->host->host_lock);
+               phba->fc_flag |= FC_ESTABLISH_LINK;
+               spin_unlock_irq(phba->host->host_lock);
+
+               /*
+               * Firmware stops when it triggled erratt with HS_FFER6.
+               * That could cause the I/Os dropped by the firmware.
+               * Error iocb (I/O) on txcmplq and let the SCSI layer
+               * retry it after re-establishing link.
+               */
+               pring = &psli->ring[psli->fcp_ring];
+               lpfc_sli_abort_iocb_ring(phba, pring);
+
+
+               /*
+                * There was a firmware error.  Take the hba offline and then
+                * attempt to restart it.
+                */
+               lpfc_offline(phba);
+               if (lpfc_online(phba) == 0) {   /* Initialize the HBA */
+                       mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);
+                       return;
+               }
+       } else {
+               /* The if clause above forces this code path when the status
+                * failure is a value other than FFER6.  Do not call the offline
+                *  twice. This is the adapter hardware error path.
+                */
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "%d:0457 Adapter Hardware Error "
+                               "Data: x%x x%x x%x\n",
+                               phba->brd_no, phba->work_hs,
+                               phba->work_status[0], phba->work_status[1]);
+
+               lpfc_offline(phba);
+
+               /*
+                * Restart all traffic to this host.  Since the fc_transport
+                * block functions (future) were not called in lpfc_offline,
+                * don't call them here.
+                */
+               scsi_unblock_requests(phba->host);
+       }
+}
+
+/************************************************************************/
+/*                                                                      */
+/*    lpfc_handle_latt                                                  */
+/*    This routine will handle processing a Host Attention              */
+/*    Link Status event. This will be initialized                       */
+/*    as a SLI layer callback routine.                                  */
+/*                                                                      */
+/************************************************************************/
+void
+lpfc_handle_latt(struct lpfc_hba * phba)
+{
+       struct lpfc_sli *psli = &phba->sli;
+       LPFC_MBOXQ_t *pmb;
+       volatile uint32_t control;
+       struct lpfc_dmabuf *mp;
+       int rc = -ENOMEM;
+
+       pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!pmb)
+               goto lpfc_handle_latt_err_exit;
+
+       mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+       if (!mp)
+               goto lpfc_handle_latt_free_pmb;
+
+       mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+       if (!mp->virt)
+               goto lpfc_handle_latt_free_mp;
+
+       rc = -EIO;
+
+
+       psli->slistat.link_event++;
+       lpfc_read_la(phba, pmb, mp);
+       pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
+       rc = lpfc_sli_issue_mbox (phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB));
+       if (rc == MBX_NOT_FINISHED)
+               goto lpfc_handle_latt_free_mp;
+
+       /* Clear Link Attention in HA REG */
+       spin_lock_irq(phba->host->host_lock);
+       writel(HA_LATT, phba->HAregaddr);
+       readl(phba->HAregaddr); /* flush */
+       spin_unlock_irq(phba->host->host_lock);
+
+       return;
+
+lpfc_handle_latt_free_mp:
+       kfree(mp);
+lpfc_handle_latt_free_pmb:
+       kfree(pmb);
+lpfc_handle_latt_err_exit:
+       /* Enable Link attention interrupts */
+       spin_lock_irq(phba->host->host_lock);
+       psli->sli_flag |= LPFC_PROCESS_LA;
+       control = readl(phba->HCregaddr);
+       control |= HC_LAINT_ENA;
+       writel(control, phba->HCregaddr);
+       readl(phba->HCregaddr); /* flush */
+
+       /* Clear Link Attention in HA REG */
+       writel(HA_LATT, phba->HAregaddr);
+       readl(phba->HAregaddr); /* flush */
+       spin_unlock_irq(phba->host->host_lock);
+       lpfc_linkdown(phba);
+       phba->hba_state = LPFC_HBA_ERROR;
+
+       /* The other case is an error from issue_mbox */
+       if (rc == -ENOMEM)
+               lpfc_printf_log(phba,
+                               KERN_WARNING,
+                               LOG_MBOX,
+                               "%d:0300 READ_LA: no buffers\n",
+                               phba->brd_no);
+
+       return;
+}
+
+/************************************************************************/
+/*                                                                      */
+/*   lpfc_parse_vpd                                                     */
+/*   This routine will parse the VPD data                               */
+/*                                                                      */
+/************************************************************************/
+static int
+lpfc_parse_vpd(struct lpfc_hba * phba, uint8_t * vpd)
+{
+       uint8_t lenlo, lenhi;
+       uint32_t Length;
+       int i, j;
+       int finished = 0;
+       int index = 0;
+
+       if (!vpd)
+               return 0;
+
+       /* Vital Product */
+       lpfc_printf_log(phba,
+                       KERN_INFO,
+                       LOG_INIT,
+                       "%d:0455 Vital Product Data: x%x x%x x%x x%x\n",
+                       phba->brd_no,
+                       (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
+                       (uint32_t) vpd[3]);
+       do {
+               switch (vpd[index]) {
+               case 0x82:
+                       index += 1;
+                       lenlo = vpd[index];
+                       index += 1;
+                       lenhi = vpd[index];
+                       index += 1;
+                       i = ((((unsigned short)lenhi) << 8) + lenlo);
+                       index += i;
+                       break;
+               case 0x90:
+                       index += 1;
+                       lenlo = vpd[index];
+                       index += 1;
+                       lenhi = vpd[index];
+                       index += 1;
+                       Length = ((((unsigned short)lenhi) << 8) + lenlo);
+
+                       while (Length > 0) {
+                       /* Look for Serial Number */
+                       if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
+                               index += 2;
+                               i = vpd[index];
+                               index += 1;
+                               j = 0;
+                               Length -= (3+i);
+                               while(i--) {
+                                       phba->SerialNumber[j++] = vpd[index++];
+                                       if (j == 31)
+                                               break;
+                               }
+                               phba->SerialNumber[j] = 0;
+                               continue;
+                       }
+                       else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
+                               phba->vpd_flag |= VPD_MODEL_DESC;
+                               index += 2;
+                               i = vpd[index];
+                               index += 1;
+                               j = 0;
+                               Length -= (3+i);
+                               while(i--) {
+                                       phba->ModelDesc[j++] = vpd[index++];
+                                       if (j == 255)
+                                               break;
+                               }
+                               phba->ModelDesc[j] = 0;
+                               continue;
+                       }
+                       else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
+                               phba->vpd_flag |= VPD_MODEL_NAME;
+                               index += 2;
+                               i = vpd[index];
+                               index += 1;
+                               j = 0;
+                               Length -= (3+i);
+                               while(i--) {
+                                       phba->ModelName[j++] = vpd[index++];
+                                       if (j == 79)
+                                               break;
+                               }
+                               phba->ModelName[j] = 0;
+                               continue;
+                       }
+                       else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
+                               phba->vpd_flag |= VPD_PROGRAM_TYPE;
+                               index += 2;
+                               i = vpd[index];
+                               index += 1;
+                               j = 0;
+                               Length -= (3+i);
+                               while(i--) {
+                                       phba->ProgramType[j++] = vpd[index++];
+                                       if (j == 255)
+                                               break;
+                               }
+                               phba->ProgramType[j] = 0;
+                               continue;
+                       }
+                       else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
+                               phba->vpd_flag |= VPD_PORT;
+                               index += 2;
+                               i = vpd[index];
+                               index += 1;
+                               j = 0;
+                               Length -= (3+i);
+                               while(i--) {
+                               phba->Port[j++] = vpd[index++];
+                               if (j == 19)
+                                       break;
+                               }
+                               phba->Port[j] = 0;
+                               continue;
+                       }
+                       else {
+                               index += 2;
+                               i = vpd[index];
+                               index += 1;
+                               index += i;
+                               Length -= (3 + i);
+                       }
+               }
+               finished = 0;
+               break;
+               case 0x78:
+                       finished = 1;
+                       break;
+               default:
+                       index ++;
+                       break;
+               }
+       } while (!finished && (index < 108));
+
+       return(1);
+}
+
+static void
+lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
+{
+       lpfc_vpd_t *vp;
+       uint32_t id;
+       char str[16];
+
+       vp = &phba->vpd;
+       pci_read_config_dword(phba->pcidev, PCI_VENDOR_ID, &id);
+
+       switch ((id >> 16) & 0xffff) {
+       case PCI_DEVICE_ID_SUPERFLY:
+               if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
+                       strcpy(str, "LP7000 1");
+               else
+                       strcpy(str, "LP7000E 1");
+               break;
+       case PCI_DEVICE_ID_DRAGONFLY:
+               strcpy(str, "LP8000 1");
+               break;
+       case PCI_DEVICE_ID_CENTAUR:
+               if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
+                       strcpy(str, "LP9002 2");
+               else
+                       strcpy(str, "LP9000 1");
+               break;
+       case PCI_DEVICE_ID_RFLY:
+               strcpy(str, "LP952 2");
+               break;
+       case PCI_DEVICE_ID_PEGASUS:
+               strcpy(str, "LP9802 2");
+               break;
+       case PCI_DEVICE_ID_THOR:
+               strcpy(str, "LP10000 2");
+               break;
+       case PCI_DEVICE_ID_VIPER:
+               strcpy(str, "LPX1000 10");
+               break;
+       case PCI_DEVICE_ID_PFLY:
+               strcpy(str, "LP982 2");
+               break;
+       case PCI_DEVICE_ID_TFLY:
+               strcpy(str, "LP1050 2");
+               break;
+       case PCI_DEVICE_ID_HELIOS:
+               strcpy(str, "LP11000 4");
+               break;
+       case PCI_DEVICE_ID_BMID:
+               strcpy(str, "LP1150 4");
+               break;
+       case PCI_DEVICE_ID_BSMB:
+               strcpy(str, "LP111 4");
+               break;
+       case PCI_DEVICE_ID_ZEPHYR:
+               strcpy(str, "LP11000e 4");
+               break;
+       case PCI_DEVICE_ID_ZMID:
+               strcpy(str, "LP1150e 4");
+               break;
+       case PCI_DEVICE_ID_ZSMB:
+               strcpy(str, "LP111e 4");
+               break;
+       case PCI_DEVICE_ID_LP101:
+               strcpy(str, "LP101 2");
+               break;
+       case PCI_DEVICE_ID_LP10000S:
+               strcpy(str, "LP10000-S 2");
+               break;
+       }
+       if (mdp)
+               sscanf(str, "%s", mdp);
+       if (descp)
+               sprintf(descp, "Emulex LightPulse %s Gigabit PCI Fibre "
+                       "Channel Adapter", str);
+}
+
+/**************************************************/
+/*   lpfc_post_buffer                             */
+/*                                                */
+/*   This routine will post count buffers to the  */
+/*   ring with the QUE_RING_BUF_CN command. This  */
+/*   allows 3 buffers / command to be posted.     */
+/*   Returns the number of buffers NOT posted.    */
+/**************************************************/
+int
+lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
+                int type)
+{
+       IOCB_t *icmd;
+       struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
+       struct lpfc_iocbq *iocb = NULL;
+       struct lpfc_dmabuf *mp1, *mp2;
+
+       cnt += pring->missbufcnt;
+
+       /* While there are buffers to post */
+       while (cnt > 0) {
+               /* Allocate buffer for  command iocb */
+               spin_lock_irq(phba->host->host_lock);
+               list_remove_head(lpfc_iocb_list, iocb, struct lpfc_iocbq, list);
+               spin_unlock_irq(phba->host->host_lock);
+               if (iocb == NULL) {
+                       pring->missbufcnt = cnt;
+                       return cnt;
+               }
+               memset(iocb, 0, sizeof (struct lpfc_iocbq));
+               icmd = &iocb->iocb;
+
+               /* 2 buffers can be posted per command */
+               /* Allocate buffer to post */
+               mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+               if (mp1)
+                   mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
+                                               &mp1->phys);
+               if (mp1 == 0 || mp1->virt == 0) {
+                       if (mp1)
+                               kfree(mp1);
+                       spin_lock_irq(phba->host->host_lock);
+                       list_add_tail(&iocb->list, lpfc_iocb_list);
+                       spin_unlock_irq(phba->host->host_lock);
+                       pring->missbufcnt = cnt;
+                       return cnt;
+               }
+
+               INIT_LIST_HEAD(&mp1->list);
+               /* Allocate buffer to post */
+               if (cnt > 1) {
+                       mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+                       if (mp2)
+                               mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
+                                                           &mp2->phys);
+                       if (mp2 == 0 || mp2->virt == 0) {
+                               if (mp2)
+                                       kfree(mp2);
+                               lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
+                               kfree(mp1);
+                               spin_lock_irq(phba->host->host_lock);
+                               list_add_tail(&iocb->list, lpfc_iocb_list);
+                               spin_unlock_irq(phba->host->host_lock);
+                               pring->missbufcnt = cnt;
+                               return cnt;
+                       }
+
+                       INIT_LIST_HEAD(&mp2->list);
+               } else {
+                       mp2 = NULL;
+               }
+
+               icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
+               icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
+               icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
+               icmd->ulpBdeCount = 1;
+               cnt--;
+               if (mp2) {
+                       icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
+                       icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
+                       icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
+                       cnt--;
+                       icmd->ulpBdeCount = 2;
+               }
+
+               icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
+               icmd->ulpLe = 1;
+
+               spin_lock_irq(phba->host->host_lock);
+               if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) {
+                       lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
+                       kfree(mp1);
+                       cnt++;
+                       if (mp2) {
+                               lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
+                               kfree(mp2);
+                               cnt++;
+                       }
+                       list_add_tail(&iocb->list, lpfc_iocb_list);
+                       pring->missbufcnt = cnt;
+                       spin_unlock_irq(phba->host->host_lock);
+                       return cnt;
+               }
+               spin_unlock_irq(phba->host->host_lock);
+               lpfc_sli_ringpostbuf_put(phba, pring, mp1);
+               if (mp2) {
+                       lpfc_sli_ringpostbuf_put(phba, pring, mp2);
+               }
+       }
+       pring->missbufcnt = 0;
+       return 0;
+}
+
+/************************************************************************/
+/*                                                                      */
+/*   lpfc_post_rcv_buf                                                  */
+/*   This routine post initial rcv buffers to the configured rings      */
+/*                                                                      */
+/************************************************************************/
+static int
+lpfc_post_rcv_buf(struct lpfc_hba * phba)
+{
+       struct lpfc_sli *psli = &phba->sli;
+
+       /* Ring 0, ELS / CT buffers */
+       lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0, 1);
+       /* Ring 2 - FCP no buffers needed */
+
+       return 0;
+}
+
+#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
+
+/************************************************************************/
+/*                                                                      */
+/*   lpfc_sha_init                                                      */
+/*                                                                      */
+/************************************************************************/
+static void
+lpfc_sha_init(uint32_t * HashResultPointer)
+{
+       HashResultPointer[0] = 0x67452301;
+       HashResultPointer[1] = 0xEFCDAB89;
+       HashResultPointer[2] = 0x98BADCFE;
+       HashResultPointer[3] = 0x10325476;
+       HashResultPointer[4] = 0xC3D2E1F0;
+}
+
+/************************************************************************/
+/*                                                                      */
+/*   lpfc_sha_iterate                                                   */
+/*                                                                      */
+/************************************************************************/
+static void
+lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
+{
+       int t;
+       uint32_t TEMP;
+       uint32_t A, B, C, D, E;
+       t = 16;
+       do {
+               HashWorkingPointer[t] =
+                   S(1,
+                     HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
+                                                                    8] ^
+                     HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
+       } while (++t <= 79);
+       t = 0;
+       A = HashResultPointer[0];
+       B = HashResultPointer[1];
+       C = HashResultPointer[2];
+       D = HashResultPointer[3];
+       E = HashResultPointer[4];
+
+       do {
+               if (t < 20) {
+                       TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
+               } else if (t < 40) {
+                       TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
+               } else if (t < 60) {
+                       TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
+               } else {
+                       TEMP = (B ^ C ^ D) + 0xCA62C1D6;
+               }
+               TEMP += S(5, A) + E + HashWorkingPointer[t];
+               E = D;
+               D = C;
+               C = S(30, B);
+               B = A;
+               A = TEMP;
+       } while (++t <= 79);
+
+       HashResultPointer[0] += A;
+       HashResultPointer[1] += B;
+       HashResultPointer[2] += C;
+       HashResultPointer[3] += D;
+       HashResultPointer[4] += E;
+
+}
+
+/************************************************************************/
+/*                                                                      */
+/*   lpfc_challenge_key                                                 */
+/*                                                                      */
+/************************************************************************/
+static void
+lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
+{
+       *HashWorking = (*RandomChallenge ^ *HashWorking);
+}
+
+/************************************************************************/
+/*                                                                      */
+/*   lpfc_hba_init                                                      */
+/*                                                                      */
+/************************************************************************/
+void
+lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
+{
+       int t;
+       uint32_t *HashWorking;
+       uint32_t *pwwnn = phba->wwnn;
+
+       HashWorking = kmalloc(80 * sizeof(uint32_t), GFP_KERNEL);
+       if (!HashWorking)
+               return;
+
+       memset(HashWorking, 0, (80 * sizeof(uint32_t)));
+       HashWorking[0] = HashWorking[78] = *pwwnn++;
+       HashWorking[1] = HashWorking[79] = *pwwnn;
+
+       for (t = 0; t < 7; t++)
+               lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
+
+       lpfc_sha_init(hbainit);
+       lpfc_sha_iterate(hbainit, HashWorking);
+       kfree(HashWorking);
+}
+
+static void
+lpfc_cleanup(struct lpfc_hba * phba, uint32_t save_bind)
+{
+       struct lpfc_nodelist *ndlp, *next_ndlp;
+
+       /* clean up phba - lpfc specific */
+       lpfc_can_disctmo(phba);
+       list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpunmap_list,
+                               nlp_listp) {
+               lpfc_nlp_remove(phba, ndlp);
+       }
+
+       list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpmap_list,
+                                nlp_listp) {
+               lpfc_nlp_remove(phba, ndlp);
+       }
+
+       list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
+                               nlp_listp) {
+               lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+       }
+
+       list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
+                               nlp_listp) {
+               lpfc_nlp_remove(phba, ndlp);
+       }
+
+       list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
+                               nlp_listp) {
+               lpfc_nlp_remove(phba, ndlp);
+       }
+
+       list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_reglogin_list,
+                               nlp_listp) {
+               lpfc_nlp_remove(phba, ndlp);
+       }
+
+       list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_prli_list,
+                               nlp_listp) {
+               lpfc_nlp_remove(phba, ndlp);
+       }
+
+       list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
+                               nlp_listp) {
+               lpfc_nlp_remove(phba, ndlp);
+       }
+
+       INIT_LIST_HEAD(&phba->fc_nlpmap_list);
+       INIT_LIST_HEAD(&phba->fc_nlpunmap_list);
+       INIT_LIST_HEAD(&phba->fc_unused_list);
+       INIT_LIST_HEAD(&phba->fc_plogi_list);
+       INIT_LIST_HEAD(&phba->fc_adisc_list);
+       INIT_LIST_HEAD(&phba->fc_reglogin_list);
+       INIT_LIST_HEAD(&phba->fc_prli_list);
+       INIT_LIST_HEAD(&phba->fc_npr_list);
+
+       phba->fc_map_cnt   = 0;
+       phba->fc_unmap_cnt = 0;
+       phba->fc_plogi_cnt = 0;
+       phba->fc_adisc_cnt = 0;
+       phba->fc_reglogin_cnt = 0;
+       phba->fc_prli_cnt  = 0;
+       phba->fc_npr_cnt   = 0;
+       phba->fc_unused_cnt= 0;
+       return;
+}
+
+static void
+lpfc_establish_link_tmo(unsigned long ptr)
+{
+       struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
+       unsigned long iflag;
+
+
+       /* Re-establishing Link, timer expired */
+       lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+                       "%d:1300 Re-establishing Link, timer expired "
+                       "Data: x%x x%x\n",
+                       phba->brd_no, phba->fc_flag, phba->hba_state);
+       spin_lock_irqsave(phba->host->host_lock, iflag);
+       phba->fc_flag &= ~FC_ESTABLISH_LINK;
+       spin_unlock_irqrestore(phba->host->host_lock, iflag);
+}
+
+static int
+lpfc_stop_timer(struct lpfc_hba * phba)
+{
+       struct lpfc_sli *psli = &phba->sli;
+
+       /* Instead of a timer, this has been converted to a
+        * deferred procedding list.
+        */
+       while (!list_empty(&phba->freebufList)) {
+
+               struct lpfc_dmabuf *mp = NULL;
+
+               list_remove_head((&phba->freebufList), mp,
+                                struct lpfc_dmabuf, list);
+               if (mp) {
+                       lpfc_mbuf_free(phba, mp->virt, mp->phys);
+                       kfree(mp);
+               }
+       }
+
+       del_timer_sync(&phba->fc_estabtmo);
+       del_timer_sync(&phba->fc_disctmo);
+       del_timer_sync(&phba->fc_fdmitmo);
+       del_timer_sync(&phba->els_tmofunc);
+       psli = &phba->sli;
+       del_timer_sync(&psli->mbox_tmo);
+       return(1);
+}
+
+int
+lpfc_online(struct lpfc_hba * phba)
+{
+       if (!phba)
+               return 0;
+
+       if (!(phba->fc_flag & FC_OFFLINE_MODE))
+               return 0;
+
+       lpfc_printf_log(phba,
+                      KERN_WARNING,
+                      LOG_INIT,
+                      "%d:0458 Bring Adapter online\n",
+                      phba->brd_no);
+
+       if (!lpfc_sli_queue_setup(phba))
+               return 1;
+
+       if (lpfc_sli_hba_setup(phba))   /* Initialize the HBA */
+               return 1;
+
+       spin_lock_irq(phba->host->host_lock);
+       phba->fc_flag &= ~FC_OFFLINE_MODE;
+       spin_unlock_irq(phba->host->host_lock);
+
+       /*
+        * Restart all traffic to this host.  Since the fc_transport block
+        * functions (future) were not called in lpfc_offline, don't call them
+        * here.
+        */
+       scsi_unblock_requests(phba->host);
+       return 0;
+}
+
+int
+lpfc_offline(struct lpfc_hba * phba)
+{
+       struct lpfc_sli_ring *pring;
+       struct lpfc_sli *psli;
+       unsigned long iflag;
+       int i = 0;
+
+       if (!phba)
+               return 0;
+
+       if (phba->fc_flag & FC_OFFLINE_MODE)
+               return 0;
+
+       /*
+        * Don't call the fc_transport block api (future).  The device is
+        * going offline and causing a timer to fire in the midlayer is
+        * unproductive.  Just block all new requests until the driver
+        * comes back online.
+        */
+       scsi_block_requests(phba->host);
+       psli = &phba->sli;
+       pring = &psli->ring[psli->fcp_ring];
+
+       lpfc_linkdown(phba);
+
+       /* The linkdown event takes 30 seconds to timeout. */
+       while (pring->txcmplq_cnt) {
+               mdelay(10);
+               if (i++ > 3000)
+                       break;
+       }
+
+       /* stop all timers associated with this hba */
+       lpfc_stop_timer(phba);
+       phba->work_hba_events = 0;
+
+       lpfc_printf_log(phba,
+                      KERN_WARNING,
+                      LOG_INIT,
+                      "%d:0460 Bring Adapter offline\n",
+                      phba->brd_no);
+
+       /* Bring down the SLI Layer and cleanup.  The HBA is offline
+          now.  */
+       lpfc_sli_hba_down(phba);
+       lpfc_cleanup(phba, 1);
+       spin_lock_irqsave(phba->host->host_lock, iflag);
+       phba->fc_flag |= FC_OFFLINE_MODE;
+       spin_unlock_irqrestore(phba->host->host_lock, iflag);
+       return 0;
+}
+
+/******************************************************************************
+* Function name: lpfc_scsi_free
+*
+* Description: Called from lpfc_pci_remove_one free internal driver resources
+*
+******************************************************************************/
+static int
+lpfc_scsi_free(struct lpfc_hba * phba)
+{
+       struct lpfc_scsi_buf *sb, *sb_next;
+       struct lpfc_iocbq *io, *io_next;
+
+       spin_lock_irq(phba->host->host_lock);
+       /* Release all the lpfc_scsi_bufs maintained by this host. */
+       list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
+               list_del(&sb->list);
+               pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
+                                                               sb->dma_handle);
+               kfree(sb);
+               phba->total_scsi_bufs--;
+       }
+
+       /* Release all the lpfc_iocbq entries maintained by this host. */
+       list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
+               list_del(&io->list);
+               kfree(io);
+               phba->total_iocbq_bufs--;
+       }
+
+       spin_unlock_irq(phba->host->host_lock);
+
+       return 0;
+}
+
+
+static int __devinit
+lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
+{
+       struct Scsi_Host *host;
+       struct lpfc_hba  *phba;
+       struct lpfc_sli  *psli;
+       struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
+       unsigned long bar0map_len, bar2map_len;
+       int error = -ENODEV, retval;
+       int i;
+       u64 wwname;
+
+       if (pci_enable_device(pdev))
+               goto out;
+       if (pci_request_regions(pdev, LPFC_DRIVER_NAME))
+               goto out_disable_device;
+
+       host = scsi_host_alloc(&lpfc_template,
+                       sizeof (struct lpfc_hba) + sizeof (unsigned long));
+       if (!host)
+               goto out_release_regions;
+
+       phba = (struct lpfc_hba*)host->hostdata;
+       memset(phba, 0, sizeof (struct lpfc_hba));
+       phba->link_stats = (void *)&phba[1];
+       phba->host = host;
+
+       phba->fc_flag |= FC_LOADING;
+       phba->pcidev = pdev;
+
+       /* Assign an unused board number */
+       if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
+               goto out_put_host;
+
+       error = idr_get_new(&lpfc_hba_index, NULL, &phba->brd_no);
+       if (error)
+               goto out_put_host;
+
+       host->unique_id = phba->brd_no;
+
+       INIT_LIST_HEAD(&phba->ctrspbuflist);
+       INIT_LIST_HEAD(&phba->rnidrspbuflist);
+       INIT_LIST_HEAD(&phba->freebufList);
+
+       /* Initialize timers used by driver */
+       init_timer(&phba->fc_estabtmo);
+       phba->fc_estabtmo.function = lpfc_establish_link_tmo;
+       phba->fc_estabtmo.data = (unsigned long)phba;
+       init_timer(&phba->fc_disctmo);
+       phba->fc_disctmo.function = lpfc_disc_timeout;
+       phba->fc_disctmo.data = (unsigned long)phba;
+
+       init_timer(&phba->fc_fdmitmo);
+       phba->fc_fdmitmo.function = lpfc_fdmi_tmo;
+       phba->fc_fdmitmo.data = (unsigned long)phba;
+       init_timer(&phba->els_tmofunc);
+       phba->els_tmofunc.function = lpfc_els_timeout;
+       phba->els_tmofunc.data = (unsigned long)phba;
+       psli = &phba->sli;
+       init_timer(&psli->mbox_tmo);
+       psli->mbox_tmo.function = lpfc_mbox_timeout;
+       psli->mbox_tmo.data = (unsigned long)phba;
+
+       /*
+        * Get all the module params for configuring this host and then
+        * establish the host parameters.
+        */
+       lpfc_get_cfgparam(phba);
+
+       host->max_id = LPFC_MAX_TARGET;
+       host->max_lun = phba->cfg_max_luns;
+       host->this_id = -1;
+
+       /* Initialize all internally managed lists. */
+       INIT_LIST_HEAD(&phba->fc_nlpmap_list);
+       INIT_LIST_HEAD(&phba->fc_nlpunmap_list);
+       INIT_LIST_HEAD(&phba->fc_unused_list);
+       INIT_LIST_HEAD(&phba->fc_plogi_list);
+       INIT_LIST_HEAD(&phba->fc_adisc_list);
+       INIT_LIST_HEAD(&phba->fc_reglogin_list);
+       INIT_LIST_HEAD(&phba->fc_prli_list);
+       INIT_LIST_HEAD(&phba->fc_npr_list);
+
+
+       pci_set_master(pdev);
+       retval = pci_set_mwi(pdev);
+       if (retval)
+               dev_printk(KERN_WARNING, &pdev->dev,
+                          "Warning: pci_set_mwi returned %d\n", retval);
+
+       if (pci_set_dma_mask(phba->pcidev, DMA_64BIT_MASK) != 0)
+               if (pci_set_dma_mask(phba->pcidev, DMA_32BIT_MASK) != 0)
+                       goto out_idr_remove;
+
+       /*
+        * Get the bus address of Bar0 and Bar2 and the number of bytes
+        * required by each mapping.
+        */
+       phba->pci_bar0_map = pci_resource_start(phba->pcidev, 0);
+       bar0map_len        = pci_resource_len(phba->pcidev, 0);
+
+       phba->pci_bar2_map = pci_resource_start(phba->pcidev, 2);
+       bar2map_len        = pci_resource_len(phba->pcidev, 2);
+
+       /* Map HBA SLIM and Control Registers to a kernel virtual address. */
+       phba->slim_memmap_p      = ioremap(phba->pci_bar0_map, bar0map_len);
+       phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
+
+       /* Allocate memory for SLI-2 structures */
+       phba->slim2p = dma_alloc_coherent(&phba->pcidev->dev, SLI2_SLIM_SIZE,
+                                         &phba->slim2p_mapping, GFP_KERNEL);
+       if (!phba->slim2p)
+               goto out_iounmap;
+
+
+       /* Initialize the SLI Layer to run with lpfc HBAs. */
+       lpfc_sli_setup(phba);
+       lpfc_sli_queue_setup(phba);
+
+       error = lpfc_mem_alloc(phba);
+       if (error)
+               goto out_free_slim;
+
+       /* Initialize and populate the iocb list per host.  */
+       INIT_LIST_HEAD(&phba->lpfc_iocb_list);
+       for (i = 0; i < LPFC_IOCB_LIST_CNT; i++) {
+               iocbq_entry = kmalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
+               if (iocbq_entry == NULL) {
+                       printk(KERN_ERR "%s: only allocated %d iocbs of "
+                               "expected %d count. Unloading driver.\n",
+                               __FUNCTION__, i, LPFC_IOCB_LIST_CNT);
+                       error = -ENOMEM;
+                       goto out_free_iocbq;
+               }
+
+               memset(iocbq_entry, 0, sizeof(struct lpfc_iocbq));
+               spin_lock_irq(phba->host->host_lock);
+               list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
+               phba->total_iocbq_bufs++;
+               spin_unlock_irq(phba->host->host_lock);
+       }
+
+       /* Initialize HBA structure */
+       phba->fc_edtov = FF_DEF_EDTOV;
+       phba->fc_ratov = FF_DEF_RATOV;
+       phba->fc_altov = FF_DEF_ALTOV;
+       phba->fc_arbtov = FF_DEF_ARBTOV;
+
+       INIT_LIST_HEAD(&phba->work_list);
+       phba->work_ha_mask = (HA_ERATT|HA_MBATT|HA_LATT);
+       phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
+
+       /* Startup the kernel thread for this host adapter. */
+       phba->worker_thread = kthread_run(lpfc_do_work, phba,
+                                      "lpfc_worker_%d", phba->brd_no);
+       if (IS_ERR(phba->worker_thread)) {
+               error = PTR_ERR(phba->worker_thread);
+               goto out_free_iocbq;
+       }
+
+       /* We can rely on a queue depth attribute only after SLI HBA setup */
+       host->can_queue = phba->cfg_hba_queue_depth - 10;
+
+       /* Tell the midlayer we support 16 byte commands */
+       host->max_cmd_len = 16;
+
+       /* Initialize the list of scsi buffers used by driver for scsi IO. */
+       INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
+
+       host->transportt = lpfc_transport_template;
+       host->hostdata[0] = (unsigned long)phba;
+       pci_set_drvdata(pdev, host);
+       error = scsi_add_host(host, &pdev->dev);
+       if (error)
+               goto out_kthread_stop;
+
+       error = lpfc_alloc_sysfs_attr(phba);
+       if (error)
+               goto out_kthread_stop;
+
+       error = request_irq(phba->pcidev->irq, lpfc_intr_handler, SA_SHIRQ,
+                                                       LPFC_DRIVER_NAME, phba);
+       if (error) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                       "%d:0451 Enable interrupt handler failed\n",
+                       phba->brd_no);
+               goto out_free_sysfs_attr;
+       }
+       phba->MBslimaddr = phba->slim_memmap_p;
+       phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
+       phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
+       phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
+       phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
+
+       error = lpfc_sli_hba_setup(phba);
+       if (error)
+               goto out_free_irq;
+
+       /*
+        * set fixed host attributes
+        * Must done after lpfc_sli_hba_setup()
+        */
+
+       memcpy(&wwname, &phba->fc_nodename, sizeof(u64));
+       fc_host_node_name(host) = be64_to_cpu(wwname);
+       memcpy(&wwname, &phba->fc_portname, sizeof(u64));
+       fc_host_port_name(host) = be64_to_cpu(wwname);
+       fc_host_supported_classes(host) = FC_COS_CLASS3;
+
+       memset(fc_host_supported_fc4s(host), 0,
+               sizeof(fc_host_supported_fc4s(host)));
+       fc_host_supported_fc4s(host)[2] = 1;
+       fc_host_supported_fc4s(host)[7] = 1;
+
+       lpfc_get_hba_sym_node_name(phba, fc_host_symbolic_name(host));
+
+       fc_host_supported_speeds(host) = 0;
+       switch (FC_JEDEC_ID(phba->vpd.rev.biuRev)) {
+       case VIPER_JEDEC_ID:
+               fc_host_supported_speeds(host) |= FC_PORTSPEED_10GBIT;
+               break;
+       case HELIOS_JEDEC_ID:
+               fc_host_supported_speeds(host) |= FC_PORTSPEED_4GBIT;
+               /* Fall through */
+       case CENTAUR_2G_JEDEC_ID:
+       case PEGASUS_JEDEC_ID:
+       case THOR_JEDEC_ID:
+               fc_host_supported_speeds(host) |= FC_PORTSPEED_2GBIT;
+               /* Fall through */
+       default:
+               fc_host_supported_speeds(host) = FC_PORTSPEED_1GBIT;
+       }
+
+       fc_host_maxframe_size(host) =
+               ((((uint32_t) phba->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
+                (uint32_t) phba->fc_sparam.cmn.bbRcvSizeLsb);
+
+       /* This value is also unchanging */
+       memset(fc_host_active_fc4s(host), 0,
+               sizeof(fc_host_active_fc4s(host)));
+       fc_host_active_fc4s(host)[2] = 1;
+       fc_host_active_fc4s(host)[7] = 1;
+
+       spin_lock_irq(phba->host->host_lock);
+       phba->fc_flag &= ~FC_LOADING;
+       spin_unlock_irq(phba->host->host_lock);
+       return 0;
+
+out_free_irq:
+       lpfc_stop_timer(phba);
+       phba->work_hba_events = 0;
+       free_irq(phba->pcidev->irq, phba);
+out_free_sysfs_attr:
+       lpfc_free_sysfs_attr(phba);
+out_kthread_stop:
+       kthread_stop(phba->worker_thread);
+out_free_iocbq:
+       list_for_each_entry_safe(iocbq_entry, iocbq_next,
+                                               &phba->lpfc_iocb_list, list) {
+               spin_lock_irq(phba->host->host_lock);
+               kfree(iocbq_entry);
+               phba->total_iocbq_bufs--;
+               spin_unlock_irq(phba->host->host_lock);
+       }
+       lpfc_mem_free(phba);
+out_free_slim:
+       dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, phba->slim2p,
+                                                       phba->slim2p_mapping);
+out_iounmap:
+       iounmap(phba->ctrl_regs_memmap_p);
+       iounmap(phba->slim_memmap_p);
+out_idr_remove:
+       idr_remove(&lpfc_hba_index, phba->brd_no);
+out_put_host:
+       scsi_host_put(host);
+out_release_regions:
+       pci_release_regions(pdev);
+out_disable_device:
+       pci_disable_device(pdev);
+out:
+       return error;
+}
+
+static void __devexit
+lpfc_pci_remove_one(struct pci_dev *pdev)
+{
+       struct Scsi_Host   *host = pci_get_drvdata(pdev);
+       struct lpfc_hba    *phba = (struct lpfc_hba *)host->hostdata[0];
+       unsigned long iflag;
+
+       lpfc_free_sysfs_attr(phba);
+
+       spin_lock_irqsave(phba->host->host_lock, iflag);
+       phba->fc_flag |= FC_UNLOADING;
+
+       spin_unlock_irqrestore(phba->host->host_lock, iflag);
+
+       fc_remove_host(phba->host);
+       scsi_remove_host(phba->host);
+
+       kthread_stop(phba->worker_thread);
+
+       /*
+        * Bring down the SLI Layer. This step disable all interrupts,
+        * clears the rings, discards all mailbox commands, and resets
+        * the HBA.
+        */
+       lpfc_sli_hba_down(phba);
+
+       /* Release the irq reservation */
+       free_irq(phba->pcidev->irq, phba);
+
+       lpfc_cleanup(phba, 0);
+       lpfc_stop_timer(phba);
+       phba->work_hba_events = 0;
+
+       /*
+        * Call scsi_free before mem_free since scsi bufs are released to their
+        * corresponding pools here.
+        */
+       lpfc_scsi_free(phba);
+       lpfc_mem_free(phba);
+
+       /* Free resources associated with SLI2 interface */
+       dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
+                         phba->slim2p, phba->slim2p_mapping);
+
+       /* unmap adapter SLIM and Control Registers */
+       iounmap(phba->ctrl_regs_memmap_p);
+       iounmap(phba->slim_memmap_p);
+
+       pci_release_regions(phba->pcidev);
+       pci_disable_device(phba->pcidev);
+
+       idr_remove(&lpfc_hba_index, phba->brd_no);
+       scsi_host_put(phba->host);
+
+       pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_device_id lpfc_id_table[] = {
+       {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
+               PCI_ANY_ID, PCI_ANY_ID, },
+       {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
+               PCI_ANY_ID, PCI_ANY_ID, },
+       {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
+               PCI_ANY_ID, PCI_ANY_ID, },
+       {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
+               PCI_ANY_ID, PCI_ANY_ID, },
+       {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
+               PCI_ANY_ID, PCI_ANY_ID, },
+       {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
+               PCI_ANY_ID, PCI_ANY_ID, },
+       {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
+               PCI_ANY_ID, PCI_ANY_ID, },
+       {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
+               PCI_ANY_ID, PCI_ANY_ID, },
+       {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
+               PCI_ANY_ID, PCI_ANY_ID, },
+       {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
+               PCI_ANY_ID, PCI_ANY_ID, },
+       {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
+               PCI_ANY_ID, PCI_ANY_ID, },
+       {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
+               PCI_ANY_ID, PCI_ANY_ID, },
+       {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
+               PCI_ANY_ID, PCI_ANY_ID, },
+       {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
+               PCI_ANY_ID, PCI_ANY_ID, },
+       {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
+               PCI_ANY_ID, PCI_ANY_ID, },
+       {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
+               PCI_ANY_ID, PCI_ANY_ID, },
+       {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
+               PCI_ANY_ID, PCI_ANY_ID, },
+       { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, lpfc_id_table);
+
+static struct pci_driver lpfc_driver = {
+       .name           = LPFC_DRIVER_NAME,
+       .id_table       = lpfc_id_table,
+       .probe          = lpfc_pci_probe_one,
+       .remove         = __devexit_p(lpfc_pci_remove_one),
+};
+
+static int __init
+lpfc_init(void)
+{
+       int error = 0;
+
+       printk(LPFC_MODULE_DESC "\n");
+
+       lpfc_transport_template =
+                               fc_attach_transport(&lpfc_transport_functions);
+       if (!lpfc_transport_template)
+               return -ENOMEM;
+       error = pci_register_driver(&lpfc_driver);
+       if (error)
+               fc_release_transport(lpfc_transport_template);
+
+       return error;
+}
+
+static void __exit
+lpfc_exit(void)
+{
+       pci_unregister_driver(&lpfc_driver);
+       fc_release_transport(lpfc_transport_template);
+}
+
+module_init(lpfc_init);
+module_exit(lpfc_exit);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(LPFC_MODULE_DESC);
+MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
+MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
new file mode 100644 (file)
index 0000000..a852688
--- /dev/null
@@ -0,0 +1,41 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Enterprise Fibre Channel Host Bus Adapters.                     *
+ * Refer to the README file included with this package for         *
+ * driver version and adapter support.                             *
+ * Copyright (C) 2004 Emulex Corporation.                          *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of the GNU General Public License     *
+ * as published by the Free Software Foundation; either version 2  *
+ * of the License, or (at your option) any later version.          *
+ *                                                                 *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of  *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the   *
+ * GNU General Public License for more details, a copy of which    *
+ * can be found in the file COPYING included with this package.    *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_logmsg.h 1.32 2005/01/25 17:52:01EST sf_support Exp  $
+ */
+
+#define LOG_ELS                       0x1      /* ELS events */
+#define LOG_DISCOVERY                 0x2      /* Link discovery events */
+#define LOG_MBOX                      0x4      /* Mailbox events */
+#define LOG_INIT                      0x8      /* Initialization events */
+#define LOG_LINK_EVENT                0x10     /* Link events */
+#define LOG_IP                        0x20     /* IP traffic history */
+#define LOG_FCP                       0x40     /* FCP traffic history */
+#define LOG_NODE                      0x80     /* Node table events */
+#define LOG_MISC                      0x400    /* Miscellaneous events */
+#define LOG_SLI                       0x800    /* SLI events */
+#define LOG_CHK_COND                  0x1000   /* FCP Check condition flag */
+#define LOG_LIBDFC                    0x2000   /* Libdfc events */
+#define LOG_ALL_MSG                   0xffff   /* LOG all messages */
+
+#define lpfc_printf_log(phba, level, mask, fmt, arg...) \
+       { if (((mask) &(phba)->cfg_log_verbose) || (level[1] <= '3')) \
+               dev_printk(level, &((phba)->pcidev)->dev, fmt, ##arg); }
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
new file mode 100644 (file)
index 0000000..8712a80
--- /dev/null
@@ -0,0 +1,646 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Enterprise Fibre Channel Host Bus Adapters.                     *
+ * Refer to the README file included with this package for         *
+ * driver version and adapter support.                             *
+ * Copyright (C) 2004 Emulex Corporation.                          *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of the GNU General Public License     *
+ * as published by the Free Software Foundation; either version 2  *
+ * of the License, or (at your option) any later version.          *
+ *                                                                 *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of  *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the   *
+ * GNU General Public License for more details, a copy of which    *
+ * can be found in the file COPYING included with this package.    *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_mbox.c 1.85 2005/04/13 11:59:11EDT sf_support Exp  $
+ */
+
+#include <linux/blkdev.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_compat.h"
+
+/**********************************************/
+
+/*                mailbox command             */
+/**********************************************/
+void
+lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset)
+{
+       MAILBOX_t *mb;
+       void *ctx;
+
+       mb = &pmb->mb;
+       ctx = pmb->context2;
+
+       /* Setup to dump VPD region */
+       memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+       mb->mbxCommand = MBX_DUMP_MEMORY;
+       mb->un.varDmp.cv = 1;
+       mb->un.varDmp.type = DMP_NV_PARAMS;
+       mb->un.varDmp.entry_index = offset;
+       mb->un.varDmp.region_id = DMP_REGION_VPD;
+       mb->un.varDmp.word_cnt = (DMP_RSP_SIZE / sizeof (uint32_t));
+       mb->un.varDmp.co = 0;
+       mb->un.varDmp.resp_offset = 0;
+       pmb->context2 = ctx;
+       mb->mbxOwner = OWN_HOST;
+       return;
+}
+
+/**********************************************/
+/*  lpfc_read_nv  Issue a READ NVPARAM        */
+/*                mailbox command             */
+/**********************************************/
+void
+lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+       MAILBOX_t *mb;
+
+       mb = &pmb->mb;
+       memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+       mb->mbxCommand = MBX_READ_NV;
+       mb->mbxOwner = OWN_HOST;
+       return;
+}
+
+/**********************************************/
+/*  lpfc_read_la  Issue a READ LA             */
+/*                mailbox command             */
+/**********************************************/
+int
+lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp)
+{
+       MAILBOX_t *mb;
+       struct lpfc_sli *psli;
+
+       psli = &phba->sli;
+       mb = &pmb->mb;
+       memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+       INIT_LIST_HEAD(&mp->list);
+       mb->mbxCommand = MBX_READ_LA64;
+       mb->un.varReadLA.un.lilpBde64.tus.f.bdeSize = 128;
+       mb->un.varReadLA.un.lilpBde64.addrHigh = putPaddrHigh(mp->phys);
+       mb->un.varReadLA.un.lilpBde64.addrLow = putPaddrLow(mp->phys);
+
+       /* Save address for later completion and set the owner to host so that
+        * the FW knows this mailbox is available for processing.
+        */
+       pmb->context1 = (uint8_t *) mp;
+       mb->mbxOwner = OWN_HOST;
+       return (0);
+}
+
+/**********************************************/
+/*  lpfc_clear_la  Issue a CLEAR LA           */
+/*                 mailbox command            */
+/**********************************************/
+void
+lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+       MAILBOX_t *mb;
+
+       mb = &pmb->mb;
+       memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+       mb->un.varClearLA.eventTag = phba->fc_eventTag;
+       mb->mbxCommand = MBX_CLEAR_LA;
+       mb->mbxOwner = OWN_HOST;
+       return;
+}
+
+/**************************************************/
+/*  lpfc_config_link  Issue a CONFIG LINK         */
+/*                    mailbox command             */
+/**************************************************/
+void
+lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+       MAILBOX_t *mb = &pmb->mb;
+       memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+       /* NEW_FEATURE
+        * SLI-2, Coalescing Response Feature.
+        */
+       if (phba->cfg_cr_delay) {
+               mb->un.varCfgLnk.cr = 1;
+               mb->un.varCfgLnk.ci = 1;
+               mb->un.varCfgLnk.cr_delay = phba->cfg_cr_delay;
+               mb->un.varCfgLnk.cr_count = phba->cfg_cr_count;
+       }
+
+       mb->un.varCfgLnk.myId = phba->fc_myDID;
+       mb->un.varCfgLnk.edtov = phba->fc_edtov;
+       mb->un.varCfgLnk.arbtov = phba->fc_arbtov;
+       mb->un.varCfgLnk.ratov = phba->fc_ratov;
+       mb->un.varCfgLnk.rttov = phba->fc_rttov;
+       mb->un.varCfgLnk.altov = phba->fc_altov;
+       mb->un.varCfgLnk.crtov = phba->fc_crtov;
+       mb->un.varCfgLnk.citov = phba->fc_citov;
+
+       if (phba->cfg_ack0)
+               mb->un.varCfgLnk.ack0_enable = 1;
+
+       mb->mbxCommand = MBX_CONFIG_LINK;
+       mb->mbxOwner = OWN_HOST;
+       return;
+}
+
+/**********************************************/
+/*  lpfc_init_link  Issue an INIT LINK        */
+/*                  mailbox command           */
+/**********************************************/
+void
+lpfc_init_link(struct lpfc_hba * phba,
+              LPFC_MBOXQ_t * pmb, uint32_t topology, uint32_t linkspeed)
+{
+       lpfc_vpd_t *vpd;
+       struct lpfc_sli *psli;
+       MAILBOX_t *mb;
+
+       mb = &pmb->mb;
+       memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+       psli = &phba->sli;
+       switch (topology) {
+       case FLAGS_TOPOLOGY_MODE_LOOP_PT:
+               mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
+               mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
+               break;
+       case FLAGS_TOPOLOGY_MODE_PT_PT:
+               mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
+               break;
+       case FLAGS_TOPOLOGY_MODE_LOOP:
+               mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
+               break;
+       case FLAGS_TOPOLOGY_MODE_PT_LOOP:
+               mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
+               mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
+               break;
+       }
+
+       /* NEW_FEATURE
+        * Setting up the link speed
+        */
+       vpd = &phba->vpd;
+       if (vpd->rev.feaLevelHigh >= 0x02){
+               switch(linkspeed){
+                       case LINK_SPEED_1G:
+                       case LINK_SPEED_2G:
+                       case LINK_SPEED_4G:
+                               mb->un.varInitLnk.link_flags |=
+                                                       FLAGS_LINK_SPEED;
+                               mb->un.varInitLnk.link_speed = linkspeed;
+                       break;
+                       case LINK_SPEED_AUTO:
+                       default:
+                               mb->un.varInitLnk.link_speed =
+                                                       LINK_SPEED_AUTO;
+                       break;
+               }
+
+       }
+       else
+               mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO;
+
+       mb->mbxCommand = (volatile uint8_t)MBX_INIT_LINK;
+       mb->mbxOwner = OWN_HOST;
+       mb->un.varInitLnk.fabric_AL_PA = phba->fc_pref_ALPA;
+       return;
+}
+
+/**********************************************/
+/*  lpfc_read_sparam  Issue a READ SPARAM     */
+/*                    mailbox command         */
+/**********************************************/
+int
+lpfc_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+       struct lpfc_dmabuf *mp;
+       MAILBOX_t *mb;
+       struct lpfc_sli *psli;
+
+       psli = &phba->sli;
+       mb = &pmb->mb;
+       memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+       mb->mbxOwner = OWN_HOST;
+
+       /* Get a buffer to hold the HBAs Service Parameters */
+
+       if (((mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL)) == 0) ||
+           ((mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys))) == 0)) {
+               if (mp)
+                       kfree(mp);
+               mb->mbxCommand = MBX_READ_SPARM64;
+               /* READ_SPARAM: no buffers */
+               lpfc_printf_log(phba,
+                               KERN_WARNING,
+                               LOG_MBOX,
+                               "%d:0301 READ_SPARAM: no buffers\n",
+                               phba->brd_no);
+               return (1);
+       }
+       INIT_LIST_HEAD(&mp->list);
+       mb->mbxCommand = MBX_READ_SPARM64;
+       mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
+       mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
+       mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
+
+       /* save address for completion */
+       pmb->context1 = mp;
+
+       return (0);
+}
+
+/********************************************/
+/*  lpfc_unreg_did  Issue a UNREG_DID       */
+/*                  mailbox command         */
+/********************************************/
+void
+lpfc_unreg_did(struct lpfc_hba * phba, uint32_t did, LPFC_MBOXQ_t * pmb)
+{
+       MAILBOX_t *mb;
+
+       mb = &pmb->mb;
+       memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+       mb->un.varUnregDID.did = did;
+
+       mb->mbxCommand = MBX_UNREG_D_ID;
+       mb->mbxOwner = OWN_HOST;
+       return;
+}
+
+/***********************************************/
+
+/*                  command to write slim      */
+/***********************************************/
+void
+lpfc_set_slim(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint32_t addr,
+             uint32_t value)
+{
+       MAILBOX_t *mb;
+
+       mb = &pmb->mb;
+       memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+       /* addr = 0x090597 is AUTO ABTS disable for ELS commands */
+       /* addr = 0x052198 is DELAYED ABTS enable for ELS commands */
+
+       /*
+        * Always turn on DELAYED ABTS for ELS timeouts
+        */
+       if ((addr == 0x052198) && (value == 0))
+               value = 1;
+
+       mb->un.varWords[0] = addr;
+       mb->un.varWords[1] = value;
+
+       mb->mbxCommand = MBX_SET_SLIM;
+       mb->mbxOwner = OWN_HOST;
+       return;
+}
+
+/**********************************************/
+/*  lpfc_read_nv  Issue a READ CONFIG         */
+/*                mailbox command             */
+/**********************************************/
+void
+lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+       MAILBOX_t *mb;
+
+       mb = &pmb->mb;
+       memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+       mb->mbxCommand = MBX_READ_CONFIG;
+       mb->mbxOwner = OWN_HOST;
+       return;
+}
+
+/********************************************/
+/*  lpfc_reg_login  Issue a REG_LOGIN       */
+/*                  mailbox command         */
+/********************************************/
+int
+lpfc_reg_login(struct lpfc_hba * phba,
+              uint32_t did, uint8_t * param, LPFC_MBOXQ_t * pmb, uint32_t flag)
+{
+       uint8_t *sparam;
+       struct lpfc_dmabuf *mp;
+       MAILBOX_t *mb;
+       struct lpfc_sli *psli;
+
+       psli = &phba->sli;
+       mb = &pmb->mb;
+       memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+       mb->un.varRegLogin.rpi = 0;
+       mb->un.varRegLogin.did = did;
+       mb->un.varWords[30] = flag;     /* Set flag to issue action on cmpl */
+
+       mb->mbxOwner = OWN_HOST;
+
+       /* Get a buffer to hold NPorts Service Parameters */
+       if (((mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL)) == NULL) ||
+           ((mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys))) == 0)) {
+               if (mp)
+                       kfree(mp);
+
+               mb->mbxCommand = MBX_REG_LOGIN64;
+               /* REG_LOGIN: no buffers */
+               lpfc_printf_log(phba,
+                              KERN_WARNING,
+                              LOG_MBOX,
+                              "%d:0302 REG_LOGIN: no buffers Data x%x x%x\n",
+                              phba->brd_no,
+                              (uint32_t) did, (uint32_t) flag);
+               return (1);
+       }
+       INIT_LIST_HEAD(&mp->list);
+       sparam = mp->virt;
+
+       /* Copy param's into a new buffer */
+       memcpy(sparam, param, sizeof (struct serv_parm));
+
+       /* save address for completion */
+       pmb->context1 = (uint8_t *) mp;
+
+       mb->mbxCommand = MBX_REG_LOGIN64;
+       mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
+       mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys);
+       mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys);
+
+       return (0);
+}
+
+/**********************************************/
+/*  lpfc_unreg_login  Issue a UNREG_LOGIN     */
+/*                    mailbox command         */
+/**********************************************/
+void
+lpfc_unreg_login(struct lpfc_hba * phba, uint32_t rpi, LPFC_MBOXQ_t * pmb)
+{
+       MAILBOX_t *mb;
+
+       mb = &pmb->mb;
+       memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+       mb->un.varUnregLogin.rpi = (uint16_t) rpi;
+       mb->un.varUnregLogin.rsvd1 = 0;
+
+       mb->mbxCommand = MBX_UNREG_LOGIN;
+       mb->mbxOwner = OWN_HOST;
+       return;
+}
+
+static void
+lpfc_config_pcb_setup(struct lpfc_hba * phba)
+{
+       struct lpfc_sli *psli = &phba->sli;
+       struct lpfc_sli_ring *pring;
+       PCB_t *pcbp = &phba->slim2p->pcb;
+       dma_addr_t pdma_addr;
+       uint32_t offset;
+       uint32_t iocbCnt;
+       int i;
+
+       psli->MBhostaddr = (uint32_t *)&phba->slim2p->mbx;
+       pcbp->maxRing = (psli->num_rings - 1);
+
+       iocbCnt = 0;
+       for (i = 0; i < psli->num_rings; i++) {
+               pring = &psli->ring[i];
+               /* A ring MUST have both cmd and rsp entries defined to be
+                  valid */
+               if ((pring->numCiocb == 0) || (pring->numRiocb == 0)) {
+                       pcbp->rdsc[i].cmdEntries = 0;
+                       pcbp->rdsc[i].rspEntries = 0;
+                       pcbp->rdsc[i].cmdAddrHigh = 0;
+                       pcbp->rdsc[i].rspAddrHigh = 0;
+                       pcbp->rdsc[i].cmdAddrLow = 0;
+                       pcbp->rdsc[i].rspAddrLow = 0;
+                       pring->cmdringaddr = NULL;
+                       pring->rspringaddr = NULL;
+                       continue;
+               }
+               /* Command ring setup for ring */
+               pring->cmdringaddr =
+                   (void *)&phba->slim2p->IOCBs[iocbCnt];
+               pcbp->rdsc[i].cmdEntries = pring->numCiocb;
+
+               offset = (uint8_t *)&phba->slim2p->IOCBs[iocbCnt] -
+                        (uint8_t *)phba->slim2p;
+               pdma_addr = phba->slim2p_mapping + offset;
+               pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr);
+               pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr);
+               iocbCnt += pring->numCiocb;
+
+               /* Response ring setup for ring */
+               pring->rspringaddr =
+                   (void *)&phba->slim2p->IOCBs[iocbCnt];
+
+               pcbp->rdsc[i].rspEntries = pring->numRiocb;
+               offset = (uint8_t *)&phba->slim2p->IOCBs[iocbCnt] -
+                        (uint8_t *)phba->slim2p;
+               pdma_addr = phba->slim2p_mapping + offset;
+               pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr);
+               pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr);
+               iocbCnt += pring->numRiocb;
+       }
+}
+
+void
+lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+       MAILBOX_t *mb;
+
+       mb = &pmb->mb;
+       memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+       mb->un.varRdRev.cv = 1;
+       mb->mbxCommand = MBX_READ_REV;
+       mb->mbxOwner = OWN_HOST;
+       return;
+}
+
+void
+lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
+{
+       int i;
+       MAILBOX_t *mb = &pmb->mb;
+       struct lpfc_sli *psli;
+       struct lpfc_sli_ring *pring;
+
+       memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+       mb->un.varCfgRing.ring = ring;
+       mb->un.varCfgRing.maxOrigXchg = 0;
+       mb->un.varCfgRing.maxRespXchg = 0;
+       mb->un.varCfgRing.recvNotify = 1;
+
+       psli = &phba->sli;
+       pring = &psli->ring[ring];
+       mb->un.varCfgRing.numMask = pring->num_mask;
+       mb->mbxCommand = MBX_CONFIG_RING;
+       mb->mbxOwner = OWN_HOST;
+
+       /* Is this ring configured for a specific profile */
+       if (pring->prt[0].profile) {
+               mb->un.varCfgRing.profile = pring->prt[0].profile;
+               return;
+       }
+
+       /* Otherwise we setup specific rctl / type masks for this ring */
+       for (i = 0; i < pring->num_mask; i++) {
+               mb->un.varCfgRing.rrRegs[i].rval = pring->prt[i].rctl;
+               if (mb->un.varCfgRing.rrRegs[i].rval != FC_ELS_REQ)
+                       mb->un.varCfgRing.rrRegs[i].rmask = 0xff;
+               else
+                       mb->un.varCfgRing.rrRegs[i].rmask = 0xfe;
+               mb->un.varCfgRing.rrRegs[i].tval = pring->prt[i].type;
+               mb->un.varCfgRing.rrRegs[i].tmask = 0xff;
+       }
+
+       return;
+}
+
+void
+lpfc_config_port(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+       MAILBOX_t *mb = &pmb->mb;
+       dma_addr_t pdma_addr;
+       uint32_t bar_low, bar_high;
+       size_t offset;
+       HGP hgp;
+       void __iomem *to_slim;
+
+       memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
+       mb->mbxCommand = MBX_CONFIG_PORT;
+       mb->mbxOwner = OWN_HOST;
+
+       mb->un.varCfgPort.pcbLen = sizeof(PCB_t);
+
+       offset = (uint8_t *)&phba->slim2p->pcb - (uint8_t *)phba->slim2p;
+       pdma_addr = phba->slim2p_mapping + offset;
+       mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr);
+       mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr);
+
+       /* Now setup pcb */
+       phba->slim2p->pcb.type = TYPE_NATIVE_SLI2;
+       phba->slim2p->pcb.feature = FEATURE_INITIAL_SLI2;
+
+       /* Setup Mailbox pointers */
+       phba->slim2p->pcb.mailBoxSize = sizeof(MAILBOX_t);
+       offset = (uint8_t *)&phba->slim2p->mbx - (uint8_t *)phba->slim2p;
+       pdma_addr = phba->slim2p_mapping + offset;
+       phba->slim2p->pcb.mbAddrHigh = putPaddrHigh(pdma_addr);
+       phba->slim2p->pcb.mbAddrLow = putPaddrLow(pdma_addr);
+
+       /*
+        * Setup Host Group ring pointer.
+        *
+        * For efficiency reasons, the ring get/put pointers can be
+        * placed in adapter memory (SLIM) rather than in host memory.
+        * This allows firmware to avoid PCI reads/writes when updating
+        * and checking pointers.
+        *
+        * The firmware recognizes the use of SLIM memory by comparing
+        * the address of the get/put pointers structure with that of
+        * the SLIM BAR (BAR0).
+        *
+        * Caution: be sure to use the PCI config space value of BAR0/BAR1
+        * (the hardware's view of the base address), not the OS's
+        * value of pci_resource_start() as the OS value may be a cookie
+        * for ioremap/iomap.
+        */
+
+
+       pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_0, &bar_low);
+       pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_1, &bar_high);
+
+
+       /* mask off BAR0's flag bits 0 - 3 */
+       phba->slim2p->pcb.hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) +
+                                       (SLIMOFF*sizeof(uint32_t));
+       if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64)
+               phba->slim2p->pcb.hgpAddrHigh = bar_high;
+       else
+               phba->slim2p->pcb.hgpAddrHigh = 0;
+       /* write HGP data to SLIM at the required longword offset */
+       memset(&hgp, 0, sizeof(HGP));
+       to_slim = phba->MBslimaddr + (SLIMOFF*sizeof (uint32_t));
+       lpfc_memcpy_to_slim(to_slim, &hgp, sizeof (HGP));
+
+       /* Setup Port Group ring pointer */
+       offset = (uint8_t *)&phba->slim2p->mbx.us.s2.port -
+                (uint8_t *)phba->slim2p;
+       pdma_addr = phba->slim2p_mapping + offset;
+       phba->slim2p->pcb.pgpAddrHigh = putPaddrHigh(pdma_addr);
+       phba->slim2p->pcb.pgpAddrLow = putPaddrLow(pdma_addr);
+
+       /* Use callback routine to setp rings in the pcb */
+       lpfc_config_pcb_setup(phba);
+
+       /* special handling for LC HBAs */
+       if (lpfc_is_LC_HBA(phba->pcidev->device)) {
+               uint32_t hbainit[5];
+
+               lpfc_hba_init(phba, hbainit);
+
+               memcpy(&mb->un.varCfgPort.hbainit, hbainit, 20);
+       }
+
+       /* Swap PCB if needed */
+       lpfc_sli_pcimem_bcopy(&phba->slim2p->pcb, &phba->slim2p->pcb,
+                                                               sizeof (PCB_t));
+
+       lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                       "%d:0405 Service Level Interface (SLI) 2 selected\n",
+                       phba->brd_no);
+}
+
+void
+lpfc_mbox_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
+{
+       struct lpfc_sli *psli;
+
+       psli = &phba->sli;
+
+       list_add_tail(&mbq->list, &psli->mboxq);
+
+       psli->mboxq_cnt++;
+
+       return;
+}
+
+LPFC_MBOXQ_t *
+lpfc_mbox_get(struct lpfc_hba * phba)
+{
+       LPFC_MBOXQ_t *mbq = NULL;
+       struct lpfc_sli *psli = &phba->sli;
+
+       list_remove_head((&psli->mboxq), mbq, LPFC_MBOXQ_t,
+                        list);
+       if (mbq) {
+               psli->mboxq_cnt--;
+       }
+
+       return mbq;
+}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
new file mode 100644 (file)
index 0000000..4397e11
--- /dev/null
@@ -0,0 +1,179 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Enterprise Fibre Channel Host Bus Adapters.                     *
+ * Refer to the README file included with this package for         *
+ * driver version and adapter support.                             *
+ * Copyright (C) 2004 Emulex Corporation.                          *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of the GNU General Public License     *
+ * as published by the Free Software Foundation; either version 2  *
+ * of the License, or (at your option) any later version.          *
+ *                                                                 *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of  *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the   *
+ * GNU General Public License for more details, a copy of which    *
+ * can be found in the file COPYING included with this package.    *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_mem.c 1.79 2005/04/13 14:25:50EDT sf_support Exp  $
+ */
+
+#include <linux/mempool.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_crtn.h"
+
+#define LPFC_MBUF_POOL_SIZE     64      /* max elements in MBUF safety pool */
+#define LPFC_MEM_POOL_SIZE      64      /* max elem in non-DMA safety pool */
+
+static void *
+lpfc_pool_kmalloc(unsigned int gfp_flags, void *data)
+{
+       return kmalloc((unsigned long)data, gfp_flags);
+}
+
+static void
+lpfc_pool_kfree(void *obj, void *data)
+{
+       kfree(obj);
+}
+
+int
+lpfc_mem_alloc(struct lpfc_hba * phba)
+{
+       struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
+       int i;
+
+       phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool",
+                               phba->pcidev, phba->cfg_sg_dma_buf_size, 8, 0);
+       if (!phba->lpfc_scsi_dma_buf_pool)
+               goto fail;
+
+       phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev,
+                                                       LPFC_BPL_SIZE, 8,0);
+       if (!phba->lpfc_mbuf_pool)
+               goto fail_free_dma_buf_pool;
+
+       pool->elements = kmalloc(sizeof(struct lpfc_dmabuf) *
+                                        LPFC_MBUF_POOL_SIZE, GFP_KERNEL);
+       pool->max_count = 0;
+       pool->current_count = 0;
+       for ( i = 0; i < LPFC_MBUF_POOL_SIZE; i++) {
+               pool->elements[i].virt = pci_pool_alloc(phba->lpfc_mbuf_pool,
+                                      GFP_KERNEL, &pool->elements[i].phys);
+               if (!pool->elements[i].virt)
+                       goto fail_free_mbuf_pool;
+               pool->max_count++;
+               pool->current_count++;
+       }
+
+       phba->mbox_mem_pool = mempool_create(LPFC_MEM_POOL_SIZE,
+                               lpfc_pool_kmalloc, lpfc_pool_kfree,
+                               (void *)(unsigned long)sizeof(LPFC_MBOXQ_t));
+       if (!phba->mbox_mem_pool)
+               goto fail_free_mbuf_pool;
+
+       phba->nlp_mem_pool = mempool_create(LPFC_MEM_POOL_SIZE,
+                       lpfc_pool_kmalloc, lpfc_pool_kfree,
+                       (void *)(unsigned long)sizeof(struct lpfc_nodelist));
+       if (!phba->nlp_mem_pool)
+               goto fail_free_mbox_pool;
+
+       return 0;
+
+ fail_free_mbox_pool:
+       mempool_destroy(phba->mbox_mem_pool);
+ fail_free_mbuf_pool:
+       while (--i)
+               pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
+                                                pool->elements[i].phys);
+       kfree(pool->elements);
+       pci_pool_destroy(phba->lpfc_mbuf_pool);
+ fail_free_dma_buf_pool:
+       pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
+ fail:
+       return -ENOMEM;
+}
+
+void
+lpfc_mem_free(struct lpfc_hba * phba)
+{
+       struct lpfc_sli *psli = &phba->sli;
+       struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
+       LPFC_MBOXQ_t *mbox, *next_mbox;
+       struct lpfc_dmabuf   *mp;
+       int i;
+
+       list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
+               mp = (struct lpfc_dmabuf *) (mbox->context1);
+               if (mp) {
+                       lpfc_mbuf_free(phba, mp->virt, mp->phys);
+                       kfree(mp);
+               }
+               list_del(&mbox->list);
+               mempool_free(mbox, phba->mbox_mem_pool);
+       }
+
+       psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+       if (psli->mbox_active) {
+               mbox = psli->mbox_active;
+               mp = (struct lpfc_dmabuf *) (mbox->context1);
+               if (mp) {
+                       lpfc_mbuf_free(phba, mp->virt, mp->phys);
+                       kfree(mp);
+               }
+               mempool_free(mbox, phba->mbox_mem_pool);
+               psli->mbox_active = NULL;
+       }
+
+       for (i = 0; i < pool->current_count; i++)
+               pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
+                                                pool->elements[i].phys);
+       kfree(pool->elements);
+       mempool_destroy(phba->nlp_mem_pool);
+       mempool_destroy(phba->mbox_mem_pool);
+
+       pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
+       pci_pool_destroy(phba->lpfc_mbuf_pool);
+}
+
+void *
+lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
+{
+       struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
+       void *ret;
+
+       ret = pci_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle);
+
+       if (!ret && ( mem_flags & MEM_PRI) && pool->current_count) {
+               pool->current_count--;
+               ret = pool->elements[pool->current_count].virt;
+               *handle = pool->elements[pool->current_count].phys;
+       }
+       return ret;
+}
+
+void
+lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
+{
+       struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
+
+       if (pool->current_count < pool->max_count) {
+               pool->elements[pool->current_count].virt = virt;
+               pool->elements[pool->current_count].phys = dma;
+               pool->current_count++;
+       } else {
+               pci_pool_free(phba->lpfc_mbuf_pool, virt, dma);
+       }
+       return;
+}
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
new file mode 100644 (file)
index 0000000..e7470a4
--- /dev/null
@@ -0,0 +1,1842 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Enterprise Fibre Channel Host Bus Adapters.                     *
+ * Refer to the README file included with this package for         *
+ * driver version and adapter support.                             *
+ * Copyright (C) 2004 Emulex Corporation.                          *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of the GNU General Public License     *
+ * as published by the Free Software Foundation; either version 2  *
+ * of the License, or (at your option) any later version.          *
+ *                                                                 *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of  *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the   *
+ * GNU General Public License for more details, a copy of which    *
+ * can be found in the file COPYING included with this package.    *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_nportdisc.c 1.179 2005/04/13 11:59:13EDT sf_support Exp  $
+ */
+
+#include <linux/blkdev.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+
+
+/* Called to verify a rcv'ed ADISC was intended for us. */
+static int
+lpfc_check_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+                struct lpfc_name * nn, struct lpfc_name * pn)
+{
+       /* Compare the ADISC rsp WWNN / WWPN matches our internal node
+        * table entry for that node.
+        */
+       if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)) != 0)
+               return (0);
+
+       if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)) != 0)
+               return (0);
+
+       /* we match, return success */
+       return (1);
+}
+
+
+int
+lpfc_check_sparm(struct lpfc_hba * phba,
+                struct lpfc_nodelist * ndlp, struct serv_parm * sp,
+                uint32_t class)
+{
+       volatile struct serv_parm *hsp = &phba->fc_sparam;
+       /* First check for supported version */
+
+       /* Next check for class validity */
+       if (sp->cls1.classValid) {
+
+               if (sp->cls1.rcvDataSizeMsb > hsp->cls1.rcvDataSizeMsb)
+                       sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb;
+               if (sp->cls1.rcvDataSizeLsb > hsp->cls1.rcvDataSizeLsb)
+                       sp->cls1.rcvDataSizeLsb = hsp->cls1.rcvDataSizeLsb;
+       } else if (class == CLASS1) {
+               return (0);
+       }
+
+       if (sp->cls2.classValid) {
+
+               if (sp->cls2.rcvDataSizeMsb > hsp->cls2.rcvDataSizeMsb)
+                       sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb;
+               if (sp->cls2.rcvDataSizeLsb > hsp->cls2.rcvDataSizeLsb)
+                       sp->cls2.rcvDataSizeLsb = hsp->cls2.rcvDataSizeLsb;
+       } else if (class == CLASS2) {
+               return (0);
+       }
+
+       if (sp->cls3.classValid) {
+
+               if (sp->cls3.rcvDataSizeMsb > hsp->cls3.rcvDataSizeMsb)
+                       sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb;
+               if (sp->cls3.rcvDataSizeLsb > hsp->cls3.rcvDataSizeLsb)
+                       sp->cls3.rcvDataSizeLsb = hsp->cls3.rcvDataSizeLsb;
+       } else if (class == CLASS3) {
+               return (0);
+       }
+
+       if (sp->cmn.bbRcvSizeMsb > hsp->cmn.bbRcvSizeMsb)
+               sp->cmn.bbRcvSizeMsb = hsp->cmn.bbRcvSizeMsb;
+       if (sp->cmn.bbRcvSizeLsb > hsp->cmn.bbRcvSizeLsb)
+               sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb;
+
+       /* If check is good, copy wwpn wwnn into ndlp */
+       memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
+       memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
+       return (1);
+}
+
+static void *
+lpfc_check_elscmpl_iocb(struct lpfc_hba * phba,
+                     struct lpfc_iocbq *cmdiocb,
+                     struct lpfc_iocbq *rspiocb)
+{
+       struct lpfc_dmabuf *pcmd, *prsp;
+       uint32_t *lp;
+       void     *ptr = NULL;
+       IOCB_t   *irsp;
+
+       irsp = &rspiocb->iocb;
+       pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+       /* For lpfc_els_abort, context2 could be zero'ed to delay
+        * freeing associated memory till after ABTS completes.
+        */
+       if (pcmd) {
+               prsp =  list_get_first(&pcmd->list, struct lpfc_dmabuf,
+                                      list);
+               if (prsp) {
+                       lp = (uint32_t *) prsp->virt;
+                       ptr = (void *)((uint8_t *)lp + sizeof(uint32_t));
+               }
+       }
+       else {
+               /* Force ulpStatus error since we are returning NULL ptr */
+               if (!(irsp->ulpStatus)) {
+                       irsp->ulpStatus = IOSTAT_LOCAL_REJECT;
+                       irsp->un.ulpWord[4] = IOERR_SLI_ABORTED;
+               }
+               ptr = NULL;
+       }
+       return (ptr);
+}
+
+
+/*
+ * Free resources / clean up outstanding I/Os
+ * associated with a LPFC_NODELIST entry. This
+ * routine effectively results in a "software abort".
+ */
+int
+lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+       int send_abts)
+{
+       struct lpfc_sli *psli;
+       struct lpfc_sli_ring *pring;
+       struct lpfc_iocbq *iocb, *next_iocb;
+       IOCB_t *icmd;
+       int    found = 0;
+
+       /* Abort outstanding I/O on NPort <nlp_DID> */
+       lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+                       "%d:0201 Abort outstanding I/O on NPort x%x "
+                       "Data: x%x x%x x%x\n",
+                       phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
+                       ndlp->nlp_state, ndlp->nlp_rpi);
+
+       psli = &phba->sli;
+       pring = &psli->ring[LPFC_ELS_RING];
+
+       /* First check the txq */
+       do {
+               found = 0;
+               spin_lock_irq(phba->host->host_lock);
+               list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
+                       /* Check to see if iocb matches the nport we are looking
+                          for */
+                       if ((lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))) {
+                               found = 1;
+                               /* It matches, so deque and call compl with an
+                                  error */
+                               list_del(&iocb->list);
+                               pring->txq_cnt--;
+                               if (iocb->iocb_cmpl) {
+                                       icmd = &iocb->iocb;
+                                       icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+                                       icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+                                       spin_unlock_irq(phba->host->host_lock);
+                                       (iocb->iocb_cmpl) (phba, iocb, iocb);
+                                       spin_lock_irq(phba->host->host_lock);
+                               } else {
+                                       list_add_tail(&iocb->list,
+                                                       &phba->lpfc_iocb_list);
+                               }
+                               break;
+                       }
+               }
+               spin_unlock_irq(phba->host->host_lock);
+       } while (found);
+
+       /* Everything on txcmplq will be returned by firmware
+        * with a no rpi / linkdown / abort error.  For ring 0,
+        * ELS discovery, we want to get rid of it right here.
+        */
+       /* Next check the txcmplq */
+       do {
+               found = 0;
+               spin_lock_irq(phba->host->host_lock);
+               list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
+                                        list) {
+                       /* Check to see if iocb matches the nport we are looking
+                          for */
+                       if ((lpfc_check_sli_ndlp (phba, pring, iocb, ndlp))) {
+                               found = 1;
+                               /* It matches, so deque and call compl with an
+                                  error */
+                               list_del(&iocb->list);
+                               pring->txcmplq_cnt--;
+
+                               icmd = &iocb->iocb;
+                               /* If the driver is completing an ELS
+                                * command early, flush it out of the firmware.
+                                */
+                               if (send_abts &&
+                                  (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) &&
+                                  (icmd->un.elsreq64.bdl.ulpIoTag32)) {
+                                       lpfc_sli_issue_abort_iotag32(phba,
+                                                            pring, iocb);
+                               }
+                               if (iocb->iocb_cmpl) {
+                                       icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+                                       icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+                                       spin_unlock_irq(phba->host->host_lock);
+                                       (iocb->iocb_cmpl) (phba, iocb, iocb);
+                                       spin_lock_irq(phba->host->host_lock);
+                               } else {
+                                       list_add_tail(&iocb->list,
+                                                       &phba->lpfc_iocb_list);
+                               }
+                               break;
+                       }
+               }
+               spin_unlock_irq(phba->host->host_lock);
+       } while(found);
+
+       /* If we are delaying issuing an ELS command, cancel it */
+       if (ndlp->nlp_flag & NLP_DELAY_TMO) {
+               ndlp->nlp_flag &= ~NLP_DELAY_TMO;
+               del_timer_sync(&ndlp->nlp_delayfunc);
+               if (!list_empty(&ndlp->els_retry_evt.evt_listp))
+                       list_del_init(&ndlp->els_retry_evt.evt_listp);
+       }
+       return (0);
+}
+
+static int
+lpfc_rcv_plogi(struct lpfc_hba * phba,
+                     struct lpfc_nodelist * ndlp,
+                     struct lpfc_iocbq *cmdiocb)
+{
+       struct lpfc_dmabuf *pcmd;
+       uint32_t *lp;
+       IOCB_t *icmd;
+       struct serv_parm *sp;
+       LPFC_MBOXQ_t *mbox;
+       struct ls_rjt stat;
+       int rc;
+
+       memset(&stat, 0, sizeof (struct ls_rjt));
+       if (phba->hba_state <= LPFC_FLOGI) {
+               /* Before responding to PLOGI, check for pt2pt mode.
+                * If we are pt2pt, with an outstanding FLOGI, abort
+                * the FLOGI and resend it first.
+                */
+               if (phba->fc_flag & FC_PT2PT) {
+                       lpfc_els_abort_flogi(phba);
+                       if (!(phba->fc_flag & FC_PT2PT_PLOGI)) {
+                               /* If the other side is supposed to initiate
+                                * the PLOGI anyway, just ACC it now and
+                                * move on with discovery.
+                                */
+                               phba->fc_edtov = FF_DEF_EDTOV;
+                               phba->fc_ratov = FF_DEF_RATOV;
+                               /* Start discovery - this should just do
+                                  CLEAR_LA */
+                               lpfc_disc_start(phba);
+                       }
+                       else {
+                               lpfc_initial_flogi(phba);
+                       }
+               }
+               else {
+                       stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
+                       stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+                       lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb,
+                                           ndlp);
+                       return 0;
+               }
+       }
+       pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+       lp = (uint32_t *) pcmd->virt;
+       sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
+       if ((lpfc_check_sparm(phba, ndlp, sp, CLASS3) == 0)) {
+               /* Reject this request because invalid parameters */
+               stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+               stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
+               lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+               return (0);
+       }
+       icmd = &cmdiocb->iocb;
+
+       /* PLOGI chkparm OK */
+       lpfc_printf_log(phba,
+                       KERN_INFO,
+                       LOG_ELS,
+                       "%d:0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
+                       phba->brd_no,
+                       ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
+                       ndlp->nlp_rpi);
+
+       if ((phba->cfg_fcp_class == 2) &&
+           (sp->cls2.classValid)) {
+               ndlp->nlp_fcp_info |= CLASS2;
+       } else {
+               ndlp->nlp_fcp_info |= CLASS3;
+       }
+       ndlp->nlp_class_sup = 0;
+       if (sp->cls1.classValid)
+               ndlp->nlp_class_sup |= FC_COS_CLASS1;
+       if (sp->cls2.classValid)
+               ndlp->nlp_class_sup |= FC_COS_CLASS2;
+       if (sp->cls3.classValid)
+               ndlp->nlp_class_sup |= FC_COS_CLASS3;
+       if (sp->cls4.classValid)
+               ndlp->nlp_class_sup |= FC_COS_CLASS4;
+       ndlp->nlp_maxframe =
+               ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
+
+       /* no need to reg_login if we are already in one of these states */
+       switch(ndlp->nlp_state) {
+       case  NLP_STE_NPR_NODE:
+               if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
+                       break;
+       case  NLP_STE_REG_LOGIN_ISSUE:
+       case  NLP_STE_PRLI_ISSUE:
+       case  NLP_STE_UNMAPPED_NODE:
+       case  NLP_STE_MAPPED_NODE:
+               lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, 0);
+               return (1);
+       }
+
+       if ((phba->fc_flag & FC_PT2PT)
+           && !(phba->fc_flag & FC_PT2PT_PLOGI)) {
+               /* rcv'ed PLOGI decides what our NPortId will be */
+               phba->fc_myDID = icmd->un.rcvels.parmRo;
+               mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+               if (mbox == NULL)
+                       goto out;
+               lpfc_config_link(phba, mbox);
+               mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+               rc = lpfc_sli_issue_mbox
+                       (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
+               if (rc == MBX_NOT_FINISHED) {
+                       mempool_free( mbox, phba->mbox_mem_pool);
+                       goto out;
+               }
+
+               lpfc_can_disctmo(phba);
+       }
+       mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (mbox == NULL)
+               goto out;
+
+       if (lpfc_reg_login(phba, icmd->un.rcvels.remoteID,
+                          (uint8_t *) sp, mbox, 0)) {
+               mempool_free( mbox, phba->mbox_mem_pool);
+               goto out;
+       }
+
+       /* ACC PLOGI rsp command needs to execute first,
+        * queue this mbox command to be processed later.
+        */
+       mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
+       mbox->context2  = ndlp;
+       ndlp->nlp_flag |= NLP_ACC_REGLOGIN;
+
+       /* If there is an outstanding PLOGI issued, abort it before
+        * sending ACC rsp to PLOGI recieved.
+        */
+       if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
+               /* software abort outstanding PLOGI */
+               lpfc_els_abort(phba, ndlp, 1);
+       }
+       ndlp->nlp_flag |= NLP_RCV_PLOGI;
+       lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0);
+       return (1);
+
+out:
+       stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+       stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
+       lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+       return (0);
+}
+
+static int
+lpfc_rcv_padisc(struct lpfc_hba * phba,
+               struct lpfc_nodelist * ndlp,
+               struct lpfc_iocbq *cmdiocb)
+{
+       struct lpfc_dmabuf *pcmd;
+       struct serv_parm *sp;
+       struct lpfc_name *pnn, *ppn;
+       struct ls_rjt stat;
+       ADISC *ap;
+       IOCB_t *icmd;
+       uint32_t *lp;
+       uint32_t cmd;
+
+       pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+       lp = (uint32_t *) pcmd->virt;
+
+       cmd = *lp++;
+       if (cmd == ELS_CMD_ADISC) {
+               ap = (ADISC *) lp;
+               pnn = (struct lpfc_name *) & ap->nodeName;
+               ppn = (struct lpfc_name *) & ap->portName;
+       } else {
+               sp = (struct serv_parm *) lp;
+               pnn = (struct lpfc_name *) & sp->nodeName;
+               ppn = (struct lpfc_name *) & sp->portName;
+       }
+
+       icmd = &cmdiocb->iocb;
+       if ((icmd->ulpStatus == 0) &&
+           (lpfc_check_adisc(phba, ndlp, pnn, ppn))) {
+               if (cmd == ELS_CMD_ADISC) {
+                       lpfc_els_rsp_adisc_acc(phba, cmdiocb, ndlp);
+               }
+               else {
+                       lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp,
+                               NULL, 0);
+               }
+               return (1);
+       }
+       /* Reject this request because invalid parameters */
+       stat.un.b.lsRjtRsvd0 = 0;
+       stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+       stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
+       stat.un.b.vendorUnique = 0;
+       lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+
+       ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
+       /* 1 sec timeout */
+       mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+
+       spin_lock_irq(phba->host->host_lock);
+       ndlp->nlp_flag |= NLP_DELAY_TMO;
+       spin_unlock_irq(phba->host->host_lock);
+       ndlp->nlp_state = NLP_STE_NPR_NODE;
+       lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+       return (0);
+}
+
+static int
+lpfc_rcv_logo(struct lpfc_hba * phba,
+                     struct lpfc_nodelist * ndlp,
+                     struct lpfc_iocbq *cmdiocb)
+{
+       /* Put ndlp on NPR list with 1 sec timeout for plogi, ACC logo */
+       /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
+        * PLOGIs during LOGO storms from a device.
+        */
+       ndlp->nlp_flag |= NLP_LOGO_ACC;
+       lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+
+       if (!(ndlp->nlp_type & NLP_FABRIC)) {
+               /* Only try to re-login if this is NOT a Fabric Node */
+               ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
+               mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
+               spin_lock_irq(phba->host->host_lock);
+               ndlp->nlp_flag |= NLP_DELAY_TMO;
+               spin_unlock_irq(phba->host->host_lock);
+       }
+
+       ndlp->nlp_state = NLP_STE_NPR_NODE;
+       lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+
+       ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+       /* The driver has to wait until the ACC completes before it continues
+        * processing the LOGO.  The action will resume in
+        * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
+        * unreg_login, the driver waits so the ACC does not get aborted.
+        */
+       return (0);
+}
+
+static void
+lpfc_rcv_prli(struct lpfc_hba * phba,
+                     struct lpfc_nodelist * ndlp,
+                     struct lpfc_iocbq *cmdiocb)
+{
+       struct lpfc_dmabuf *pcmd;
+       uint32_t *lp;
+       PRLI *npr;
+       struct fc_rport *rport = ndlp->rport;
+       u32 roles;
+
+       pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+       lp = (uint32_t *) pcmd->virt;
+       npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t));
+
+       ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
+       ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+       if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
+           (npr->prliType == PRLI_FCP_TYPE)) {
+               if (npr->initiatorFunc)
+                       ndlp->nlp_type |= NLP_FCP_INITIATOR;
+               if (npr->targetFunc)
+                       ndlp->nlp_type |= NLP_FCP_TARGET;
+               if (npr->Retry)
+                       ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
+       }
+       if (rport) {
+               /* We need to update the rport role values */
+               roles = FC_RPORT_ROLE_UNKNOWN;
+               if (ndlp->nlp_type & NLP_FCP_INITIATOR)
+                       roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+               if (ndlp->nlp_type & NLP_FCP_TARGET)
+                       roles |= FC_RPORT_ROLE_FCP_TARGET;
+               fc_remote_port_rolechg(rport, roles);
+       }
+}
+
+static uint32_t
+lpfc_disc_set_adisc(struct lpfc_hba * phba,
+                     struct lpfc_nodelist * ndlp)
+{
+       /* Check config parameter use-adisc or FCP-2 */
+       if ((phba->cfg_use_adisc == 0) &&
+               !(phba->fc_flag & FC_RSCN_MODE)) {
+               if (!(ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
+                       return (0);
+       }
+       spin_lock_irq(phba->host->host_lock);
+       ndlp->nlp_flag |= NLP_NPR_ADISC;
+       spin_unlock_irq(phba->host->host_lock);
+       return (1);
+}
+
+static uint32_t
+lpfc_disc_noop(struct lpfc_hba * phba,
+               struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+       /* This routine does nothing, just return the current state */
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_disc_illegal(struct lpfc_hba * phba,
+                  struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+       lpfc_printf_log(phba,
+                       KERN_ERR,
+                       LOG_DISCOVERY,
+                       "%d:0253 Illegal State Transition: node x%x event x%x, "
+                       "state x%x Data: x%x x%x\n",
+                       phba->brd_no,
+                       ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
+                       ndlp->nlp_flag);
+       return (ndlp->nlp_state);
+}
+
+/* Start of Discovery State Machine routines */
+
+static uint32_t
+lpfc_rcv_plogi_unused_node(struct lpfc_hba * phba,
+                          struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+       struct lpfc_iocbq *cmdiocb;
+
+       cmdiocb = (struct lpfc_iocbq *) arg;
+
+       if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) {
+               ndlp->nlp_state = NLP_STE_UNUSED_NODE;
+               lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
+               return (ndlp->nlp_state);
+       }
+       lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+       return (NLP_STE_FREED_NODE);
+}
+
+static uint32_t
+lpfc_rcv_els_unused_node(struct lpfc_hba * phba,
+                        struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+       lpfc_issue_els_logo(phba, ndlp, 0);
+       lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_logo_unused_node(struct lpfc_hba * phba,
+                         struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+       struct lpfc_iocbq     *cmdiocb;
+
+       cmdiocb = (struct lpfc_iocbq *) arg;
+
+       spin_lock_irq(phba->host->host_lock);
+       ndlp->nlp_flag |= NLP_LOGO_ACC;
+       spin_unlock_irq(phba->host->host_lock);
+       lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+       lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
+
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_cmpl_logo_unused_node(struct lpfc_hba * phba,
+                         struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+       lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+       return (NLP_STE_FREED_NODE);
+}
+
+static uint32_t
+lpfc_device_rm_unused_node(struct lpfc_hba * phba,
+                          struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+       lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+       return (NLP_STE_FREED_NODE);
+}
+
+static uint32_t
+lpfc_rcv_plogi_plogi_issue(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+                          void *arg, uint32_t evt)
+{
+       struct lpfc_iocbq *cmdiocb = arg;
+       struct lpfc_dmabuf *pcmd;
+       struct serv_parm *sp;
+       uint32_t *lp;
+       struct ls_rjt stat;
+       int port_cmp;
+
+       pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+       lp = (uint32_t *) pcmd->virt;
+       sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
+
+       memset(&stat, 0, sizeof (struct ls_rjt));
+
+       /* For a PLOGI, we only accept if our portname is less
+        * than the remote portname.
+        */
+       phba->fc_stat.elsLogiCol++;
+       port_cmp = memcmp(&phba->fc_portname, &sp->portName,
+                         sizeof (struct lpfc_name));
+
+       if (port_cmp >= 0) {
+               /* Reject this request because the remote node will accept
+                  ours */
+               stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+               stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
+               lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+       }
+       else {
+               lpfc_rcv_plogi(phba, ndlp, cmdiocb);
+       } /* if our portname was less */
+
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_els_plogi_issue(struct lpfc_hba * phba,
+                         struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+       struct lpfc_iocbq     *cmdiocb;
+
+       cmdiocb = (struct lpfc_iocbq *) arg;
+
+       /* software abort outstanding PLOGI */
+       lpfc_els_abort(phba, ndlp, 1);
+       mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
+       spin_lock_irq(phba->host->host_lock);
+       ndlp->nlp_flag |= NLP_DELAY_TMO;
+       spin_unlock_irq(phba->host->host_lock);
+
+       if (evt == NLP_EVT_RCV_LOGO) {
+               lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+       }
+       else {
+               lpfc_issue_els_logo(phba, ndlp, 0);
+       }
+
+       /* Put ndlp in npr list set plogi timer for 1 sec */
+       ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
+       ndlp->nlp_state = NLP_STE_NPR_NODE;
+       lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
+                           struct lpfc_nodelist * ndlp, void *arg,
+                           uint32_t evt)
+{
+       struct lpfc_iocbq *cmdiocb, *rspiocb;
+       struct lpfc_dmabuf *pcmd, *prsp;
+       uint32_t *lp;
+       IOCB_t *irsp;
+       struct serv_parm *sp;
+       LPFC_MBOXQ_t *mbox;
+
+       cmdiocb = (struct lpfc_iocbq *) arg;
+       rspiocb = cmdiocb->context_un.rsp_iocb;
+
+       if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
+               return (ndlp->nlp_state);
+       }
+
+       irsp = &rspiocb->iocb;
+
+       if (irsp->ulpStatus)
+               goto out;
+
+       pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+       prsp = list_get_first(&pcmd->list,
+                             struct lpfc_dmabuf,
+                             list);
+       lp = (uint32_t *) prsp->virt;
+
+       sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
+       if (!lpfc_check_sparm(phba, ndlp, sp, CLASS3))
+               goto out;
+
+       /* PLOGI chkparm OK */
+       lpfc_printf_log(phba,
+                       KERN_INFO,
+                       LOG_ELS,
+                       "%d:0121 PLOGI chkparm OK "
+                       "Data: x%x x%x x%x x%x\n",
+                       phba->brd_no,
+                       ndlp->nlp_DID, ndlp->nlp_state,
+                       ndlp->nlp_flag, ndlp->nlp_rpi);
+
+       if ((phba->cfg_fcp_class == 2) &&
+           (sp->cls2.classValid)) {
+               ndlp->nlp_fcp_info |= CLASS2;
+       } else {
+               ndlp->nlp_fcp_info |= CLASS3;
+       }
+       ndlp->nlp_class_sup = 0;
+       if (sp->cls1.classValid)
+               ndlp->nlp_class_sup |= FC_COS_CLASS1;
+       if (sp->cls2.classValid)
+               ndlp->nlp_class_sup |= FC_COS_CLASS2;
+       if (sp->cls3.classValid)
+               ndlp->nlp_class_sup |= FC_COS_CLASS3;
+       if (sp->cls4.classValid)
+               ndlp->nlp_class_sup |= FC_COS_CLASS4;
+       ndlp->nlp_maxframe =
+               ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
+               sp->cmn.bbRcvSizeLsb;
+
+       if (!(mbox = mempool_alloc(phba->mbox_mem_pool,
+                                  GFP_KERNEL)))
+               goto out;
+
+       lpfc_unreg_rpi(phba, ndlp);
+       if (lpfc_reg_login
+           (phba, irsp->un.elsreq64.remoteID,
+            (uint8_t *) sp, mbox, 0) == 0) {
+               /* set_slim mailbox command needs to
+                * execute first, queue this command to
+                * be processed later.
+                */
+               switch(ndlp->nlp_DID) {
+               case NameServer_DID:
+                       mbox->mbox_cmpl =
+                               lpfc_mbx_cmpl_ns_reg_login;
+                       break;
+               case FDMI_DID:
+                       mbox->mbox_cmpl =
+                               lpfc_mbx_cmpl_fdmi_reg_login;
+                       break;
+               default:
+                       mbox->mbox_cmpl =
+                               lpfc_mbx_cmpl_reg_login;
+               }
+               mbox->context2 = ndlp;
+               if (lpfc_sli_issue_mbox(phba, mbox,
+                                       (MBX_NOWAIT | MBX_STOP_IOCB))
+                   != MBX_NOT_FINISHED) {
+                       ndlp->nlp_state =
+                               NLP_STE_REG_LOGIN_ISSUE;
+                       lpfc_nlp_list(phba, ndlp,
+                                     NLP_REGLOGIN_LIST);
+                       return (ndlp->nlp_state);
+               }
+               mempool_free(mbox, phba->mbox_mem_pool);
+       } else {
+               mempool_free(mbox, phba->mbox_mem_pool);
+       }
+
+
+ out:
+       /* Free this node since the driver cannot login or has the wrong
+          sparm */
+       lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+       return (NLP_STE_FREED_NODE);
+}
+
+static uint32_t
+lpfc_device_rm_plogi_issue(struct lpfc_hba * phba,
+                          struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+       /* software abort outstanding PLOGI */
+       lpfc_els_abort(phba, ndlp, 1);
+
+       lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+       return (NLP_STE_FREED_NODE);
+}
+
+static uint32_t
+lpfc_device_recov_plogi_issue(struct lpfc_hba * phba,
+                           struct lpfc_nodelist * ndlp, void *arg,
+                           uint32_t evt)
+{
+       /* software abort outstanding PLOGI */
+       lpfc_els_abort(phba, ndlp, 1);
+
+       ndlp->nlp_state = NLP_STE_NPR_NODE;
+       lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+       spin_lock_irq(phba->host->host_lock);
+       ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+       spin_unlock_irq(phba->host->host_lock);
+
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_plogi_adisc_issue(struct lpfc_hba * phba,
+                           struct lpfc_nodelist * ndlp, void *arg,
+                           uint32_t evt)
+{
+       struct lpfc_iocbq *cmdiocb;
+
+       /* software abort outstanding ADISC */
+       lpfc_els_abort(phba, ndlp, 1);
+
+       cmdiocb = (struct lpfc_iocbq *) arg;
+
+       if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) {
+               return (ndlp->nlp_state);
+       }
+       ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
+       lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
+       lpfc_issue_els_plogi(phba, ndlp, 0);
+
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_prli_adisc_issue(struct lpfc_hba * phba,
+                           struct lpfc_nodelist * ndlp, void *arg,
+                           uint32_t evt)
+{
+       struct lpfc_iocbq *cmdiocb;
+
+       cmdiocb = (struct lpfc_iocbq *) arg;
+
+       lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_logo_adisc_issue(struct lpfc_hba * phba,
+                           struct lpfc_nodelist * ndlp, void *arg,
+                           uint32_t evt)
+{
+       struct lpfc_iocbq *cmdiocb;
+
+       cmdiocb = (struct lpfc_iocbq *) arg;
+
+       /* software abort outstanding ADISC */
+       lpfc_els_abort(phba, ndlp, 0);
+
+       lpfc_rcv_logo(phba, ndlp, cmdiocb);
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_padisc_adisc_issue(struct lpfc_hba * phba,
+                           struct lpfc_nodelist * ndlp, void *arg,
+                           uint32_t evt)
+{
+       struct lpfc_iocbq *cmdiocb;
+
+       cmdiocb = (struct lpfc_iocbq *) arg;
+
+       lpfc_rcv_padisc(phba, ndlp, cmdiocb);
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_prlo_adisc_issue(struct lpfc_hba * phba,
+                           struct lpfc_nodelist * ndlp, void *arg,
+                           uint32_t evt)
+{
+       struct lpfc_iocbq *cmdiocb;
+
+       cmdiocb = (struct lpfc_iocbq *) arg;
+
+       /* Treat like rcv logo */
+       lpfc_rcv_logo(phba, ndlp, cmdiocb);
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_cmpl_adisc_adisc_issue(struct lpfc_hba * phba,
+                           struct lpfc_nodelist * ndlp, void *arg,
+                           uint32_t evt)
+{
+       struct lpfc_iocbq *cmdiocb, *rspiocb;
+       IOCB_t *irsp;
+       ADISC *ap;
+
+       cmdiocb = (struct lpfc_iocbq *) arg;
+       rspiocb = cmdiocb->context_un.rsp_iocb;
+
+       ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
+       irsp = &rspiocb->iocb;
+
+       if ((irsp->ulpStatus) ||
+               (!lpfc_check_adisc(phba, ndlp, &ap->nodeName, &ap->portName))) {
+               ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
+               /* 1 sec timeout */
+               mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+               spin_lock_irq(phba->host->host_lock);
+               ndlp->nlp_flag |= NLP_DELAY_TMO;
+               spin_unlock_irq(phba->host->host_lock);
+
+               memset(&ndlp->nlp_nodename, 0, sizeof (struct lpfc_name));
+               memset(&ndlp->nlp_portname, 0, sizeof (struct lpfc_name));
+
+               ndlp->nlp_state = NLP_STE_NPR_NODE;
+               lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+               lpfc_unreg_rpi(phba, ndlp);
+               return (ndlp->nlp_state);
+       }
+       ndlp->nlp_state = NLP_STE_MAPPED_NODE;
+       lpfc_nlp_list(phba, ndlp, NLP_MAPPED_LIST);
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_device_rm_adisc_issue(struct lpfc_hba * phba,
+                           struct lpfc_nodelist * ndlp, void *arg,
+                           uint32_t evt)
+{
+       /* software abort outstanding ADISC */
+       lpfc_els_abort(phba, ndlp, 1);
+
+       lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+       return (NLP_STE_FREED_NODE);
+}
+
+static uint32_t
+lpfc_device_recov_adisc_issue(struct lpfc_hba * phba,
+                           struct lpfc_nodelist * ndlp, void *arg,
+                           uint32_t evt)
+{
+       /* software abort outstanding ADISC */
+       lpfc_els_abort(phba, ndlp, 1);
+
+       ndlp->nlp_state = NLP_STE_NPR_NODE;
+       lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+       spin_lock_irq(phba->host->host_lock);
+       ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+       spin_unlock_irq(phba->host->host_lock);
+
+       lpfc_disc_set_adisc(phba, ndlp);
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_plogi_reglogin_issue(struct lpfc_hba * phba,
+                             struct lpfc_nodelist * ndlp, void *arg,
+                             uint32_t evt)
+{
+       struct lpfc_iocbq *cmdiocb;
+
+       cmdiocb = (struct lpfc_iocbq *) arg;
+
+       lpfc_rcv_plogi(phba, ndlp, cmdiocb);
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_prli_reglogin_issue(struct lpfc_hba * phba,
+                            struct lpfc_nodelist * ndlp, void *arg,
+                            uint32_t evt)
+{
+       struct lpfc_iocbq *cmdiocb;
+
+       cmdiocb = (struct lpfc_iocbq *) arg;
+
+       lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_logo_reglogin_issue(struct lpfc_hba * phba,
+                            struct lpfc_nodelist * ndlp, void *arg,
+                            uint32_t evt)
+{
+       struct lpfc_iocbq *cmdiocb;
+
+       cmdiocb = (struct lpfc_iocbq *) arg;
+
+       lpfc_rcv_logo(phba, ndlp, cmdiocb);
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_padisc_reglogin_issue(struct lpfc_hba * phba,
+                              struct lpfc_nodelist * ndlp, void *arg,
+                              uint32_t evt)
+{
+       struct lpfc_iocbq *cmdiocb;
+
+       cmdiocb = (struct lpfc_iocbq *) arg;
+
+       lpfc_rcv_padisc(phba, ndlp, cmdiocb);
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_prlo_reglogin_issue(struct lpfc_hba * phba,
+                            struct lpfc_nodelist * ndlp, void *arg,
+                            uint32_t evt)
+{
+       struct lpfc_iocbq *cmdiocb;
+
+       cmdiocb = (struct lpfc_iocbq *) arg;
+       lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba,
+                                 struct lpfc_nodelist * ndlp,
+                                 void *arg, uint32_t evt)
+{
+       LPFC_MBOXQ_t *pmb;
+       MAILBOX_t *mb;
+       uint32_t did;
+
+       pmb = (LPFC_MBOXQ_t *) arg;
+       mb = &pmb->mb;
+       did = mb->un.varWords[1];
+       if (mb->mbxStatus) {
+               /* RegLogin failed */
+               lpfc_printf_log(phba,
+                               KERN_ERR,
+                               LOG_DISCOVERY,
+                               "%d:0246 RegLogin failed Data: x%x x%x x%x\n",
+                               phba->brd_no,
+                               did, mb->mbxStatus, phba->hba_state);
+
+               mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
+               spin_lock_irq(phba->host->host_lock);
+               ndlp->nlp_flag |= NLP_DELAY_TMO;
+               spin_unlock_irq(phba->host->host_lock);
+
+               lpfc_issue_els_logo(phba, ndlp, 0);
+               /* Put ndlp in npr list set plogi timer for 1 sec */
+               ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
+               ndlp->nlp_state = NLP_STE_NPR_NODE;
+               lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+               return (ndlp->nlp_state);
+       }
+
+       if (ndlp->nlp_rpi != 0)
+               lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
+
+       ndlp->nlp_rpi = mb->un.varWords[0];
+       lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
+
+       /* Only if we are not a fabric nport do we issue PRLI */
+       if (!(ndlp->nlp_type & NLP_FABRIC)) {
+               ndlp->nlp_state = NLP_STE_PRLI_ISSUE;
+               lpfc_nlp_list(phba, ndlp, NLP_PRLI_LIST);
+               lpfc_issue_els_prli(phba, ndlp, 0);
+       } else {
+               ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
+               lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
+       }
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_device_rm_reglogin_issue(struct lpfc_hba * phba,
+                             struct lpfc_nodelist * ndlp, void *arg,
+                             uint32_t evt)
+{
+       lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+       return (NLP_STE_FREED_NODE);
+}
+
+static uint32_t
+lpfc_device_recov_reglogin_issue(struct lpfc_hba * phba,
+                              struct lpfc_nodelist * ndlp, void *arg,
+                              uint32_t evt)
+{
+       ndlp->nlp_state = NLP_STE_NPR_NODE;
+       lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+       spin_lock_irq(phba->host->host_lock);
+       ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+       spin_unlock_irq(phba->host->host_lock);
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_plogi_prli_issue(struct lpfc_hba * phba,
+                         struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+       struct lpfc_iocbq *cmdiocb;
+
+       cmdiocb = (struct lpfc_iocbq *) arg;
+
+       lpfc_rcv_plogi(phba, ndlp, cmdiocb);
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_prli_prli_issue(struct lpfc_hba * phba,
+                        struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+       struct lpfc_iocbq *cmdiocb;
+
+       cmdiocb = (struct lpfc_iocbq *) arg;
+
+       lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_logo_prli_issue(struct lpfc_hba * phba,
+                        struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+       struct lpfc_iocbq *cmdiocb;
+
+       cmdiocb = (struct lpfc_iocbq *) arg;
+
+       /* Software abort outstanding PRLI before sending acc */
+       lpfc_els_abort(phba, ndlp, 1);
+
+       lpfc_rcv_logo(phba, ndlp, cmdiocb);
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_padisc_prli_issue(struct lpfc_hba * phba,
+                          struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+       struct lpfc_iocbq *cmdiocb;
+
+       cmdiocb = (struct lpfc_iocbq *) arg;
+
+       lpfc_rcv_padisc(phba, ndlp, cmdiocb);
+       return (ndlp->nlp_state);
+}
+
+/* This routine is envoked when we rcv a PRLO request from a nport
+ * we are logged into.  We should send back a PRLO rsp setting the
+ * appropriate bits.
+ * NEXT STATE = PRLI_ISSUE
+ */
+static uint32_t
+lpfc_rcv_prlo_prli_issue(struct lpfc_hba * phba,
+                        struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+       struct lpfc_iocbq *cmdiocb;
+
+       cmdiocb = (struct lpfc_iocbq *) arg;
+       lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_cmpl_prli_prli_issue(struct lpfc_hba * phba,
+                         struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+       struct lpfc_iocbq *cmdiocb, *rspiocb;
+       IOCB_t *irsp;
+       PRLI *npr;
+
+       cmdiocb = (struct lpfc_iocbq *) arg;
+       rspiocb = cmdiocb->context_un.rsp_iocb;
+       npr = (PRLI *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
+
+       irsp = &rspiocb->iocb;
+       if (irsp->ulpStatus) {
+               ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
+               lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
+               return (ndlp->nlp_state);
+       }
+
+       /* Check out PRLI rsp */
+       ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
+       ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+       if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
+           (npr->prliType == PRLI_FCP_TYPE)) {
+               if (npr->initiatorFunc)
+                       ndlp->nlp_type |= NLP_FCP_INITIATOR;
+               if (npr->targetFunc)
+                       ndlp->nlp_type |= NLP_FCP_TARGET;
+               if (npr->Retry)
+                       ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
+       }
+
+       ndlp->nlp_state = NLP_STE_MAPPED_NODE;
+       lpfc_nlp_list(phba, ndlp, NLP_MAPPED_LIST);
+       return (ndlp->nlp_state);
+}
+
+/*! lpfc_device_rm_prli_issue
+  *
+  * \pre
+  * \post
+  * \param   phba
+  * \param   ndlp
+  * \param   arg
+  * \param   evt
+  * \return  uint32_t
+  *
+  * \b Description:
+  *    This routine is envoked when we a request to remove a nport we are in the
+  *    process of PRLIing. We should software abort outstanding prli, unreg
+  *    login, send a logout. We will change node state to UNUSED_NODE, put it
+  *    on plogi list so it can be freed when LOGO completes.
+  *
+  */
+static uint32_t
+lpfc_device_rm_prli_issue(struct lpfc_hba * phba,
+                         struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+       /* software abort outstanding PRLI */
+       lpfc_els_abort(phba, ndlp, 1);
+
+       lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+       return (NLP_STE_FREED_NODE);
+}
+
+
+/*! lpfc_device_recov_prli_issue
+  *
+  * \pre
+  * \post
+  * \param   phba
+  * \param   ndlp
+  * \param   arg
+  * \param   evt
+  * \return  uint32_t
+  *
+  * \b Description:
+  *    The routine is envoked when the state of a device is unknown, like
+  *    during a link down. We should remove the nodelist entry from the
+  *    unmapped list, issue a UNREG_LOGIN, do a software abort of the
+  *    outstanding PRLI command, then free the node entry.
+  */
+static uint32_t
+lpfc_device_recov_prli_issue(struct lpfc_hba * phba,
+                          struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+       /* software abort outstanding PRLI */
+       lpfc_els_abort(phba, ndlp, 1);
+
+       ndlp->nlp_state = NLP_STE_NPR_NODE;
+       lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+       spin_lock_irq(phba->host->host_lock);
+       ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+       spin_unlock_irq(phba->host->host_lock);
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_plogi_unmap_node(struct lpfc_hba * phba,
+                         struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+       struct lpfc_iocbq *cmdiocb;
+
+       cmdiocb = (struct lpfc_iocbq *) arg;
+
+       lpfc_rcv_plogi(phba, ndlp, cmdiocb);
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_prli_unmap_node(struct lpfc_hba * phba,
+                        struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+       struct lpfc_iocbq *cmdiocb;
+
+       cmdiocb = (struct lpfc_iocbq *) arg;
+
+       lpfc_rcv_prli(phba, ndlp, cmdiocb);
+       lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_logo_unmap_node(struct lpfc_hba * phba,
+                        struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+       struct lpfc_iocbq *cmdiocb;
+
+       cmdiocb = (struct lpfc_iocbq *) arg;
+
+       lpfc_rcv_logo(phba, ndlp, cmdiocb);
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_padisc_unmap_node(struct lpfc_hba * phba,
+                          struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+       struct lpfc_iocbq *cmdiocb;
+
+       cmdiocb = (struct lpfc_iocbq *) arg;
+
+       lpfc_rcv_padisc(phba, ndlp, cmdiocb);
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_prlo_unmap_node(struct lpfc_hba * phba,
+                        struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+       struct lpfc_iocbq *cmdiocb;
+
+       cmdiocb = (struct lpfc_iocbq *) arg;
+
+       /* Treat like rcv logo */
+       lpfc_rcv_logo(phba, ndlp, cmdiocb);
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_device_recov_unmap_node(struct lpfc_hba * phba,
+                          struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+       ndlp->nlp_state = NLP_STE_NPR_NODE;
+       lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+       ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+       lpfc_disc_set_adisc(phba, ndlp);
+
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_plogi_mapped_node(struct lpfc_hba * phba,
+                          struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+       struct lpfc_iocbq *cmdiocb;
+
+       cmdiocb = (struct lpfc_iocbq *) arg;
+
+       lpfc_rcv_plogi(phba, ndlp, cmdiocb);
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_prli_mapped_node(struct lpfc_hba * phba,
+                         struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+       struct lpfc_iocbq *cmdiocb;
+
+       cmdiocb = (struct lpfc_iocbq *) arg;
+
+       lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_logo_mapped_node(struct lpfc_hba * phba,
+                         struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+       struct lpfc_iocbq *cmdiocb;
+
+       cmdiocb = (struct lpfc_iocbq *) arg;
+
+       lpfc_rcv_logo(phba, ndlp, cmdiocb);
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_padisc_mapped_node(struct lpfc_hba * phba,
+                           struct lpfc_nodelist * ndlp, void *arg,
+                           uint32_t evt)
+{
+       struct lpfc_iocbq *cmdiocb;
+
+       cmdiocb = (struct lpfc_iocbq *) arg;
+
+       lpfc_rcv_padisc(phba, ndlp, cmdiocb);
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_prlo_mapped_node(struct lpfc_hba * phba,
+                         struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+       struct lpfc_iocbq *cmdiocb;
+
+       cmdiocb = (struct lpfc_iocbq *) arg;
+
+       /* flush the target */
+       spin_lock_irq(phba->host->host_lock);
+       lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
+                              ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
+       spin_unlock_irq(phba->host->host_lock);
+
+       /* Treat like rcv logo */
+       lpfc_rcv_logo(phba, ndlp, cmdiocb);
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_device_recov_mapped_node(struct lpfc_hba * phba,
+                           struct lpfc_nodelist * ndlp, void *arg,
+                           uint32_t evt)
+{
+       ndlp->nlp_state = NLP_STE_NPR_NODE;
+       lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+       spin_lock_irq(phba->host->host_lock);
+       ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+       spin_unlock_irq(phba->host->host_lock);
+       lpfc_disc_set_adisc(phba, ndlp);
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_plogi_npr_node(struct lpfc_hba * phba,
+                           struct lpfc_nodelist * ndlp, void *arg,
+                           uint32_t evt)
+{
+       struct lpfc_iocbq *cmdiocb;
+
+       cmdiocb = (struct lpfc_iocbq *) arg;
+
+       /* Ignore PLOGI if we have an outstanding LOGO */
+       if (ndlp->nlp_flag & NLP_LOGO_SND) {
+               return (ndlp->nlp_state);
+       }
+
+       if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) {
+               spin_lock_irq(phba->host->host_lock);
+               ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC);
+               spin_unlock_irq(phba->host->host_lock);
+               return (ndlp->nlp_state);
+       }
+
+       /* send PLOGI immediately, move to PLOGI issue state */
+       if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
+                       ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
+                       lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
+                       lpfc_issue_els_plogi(phba, ndlp, 0);
+       }
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_prli_npr_node(struct lpfc_hba * phba,
+                           struct lpfc_nodelist * ndlp, void *arg,
+                           uint32_t evt)
+{
+       struct lpfc_iocbq     *cmdiocb;
+       struct ls_rjt          stat;
+
+       cmdiocb = (struct lpfc_iocbq *) arg;
+
+       memset(&stat, 0, sizeof (struct ls_rjt));
+       stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+       stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+       lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+
+       if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
+               if (ndlp->nlp_flag & NLP_NPR_ADISC) {
+                       ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
+                       lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST);
+                       lpfc_issue_els_adisc(phba, ndlp, 0);
+               } else {
+                       ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
+                       lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
+                       lpfc_issue_els_plogi(phba, ndlp, 0);
+               }
+       }
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_logo_npr_node(struct lpfc_hba * phba,
+                           struct lpfc_nodelist * ndlp, void *arg,
+                           uint32_t evt)
+{
+       struct lpfc_iocbq     *cmdiocb;
+
+       cmdiocb = (struct lpfc_iocbq *) arg;
+
+       lpfc_rcv_logo(phba, ndlp, cmdiocb);
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba,
+                           struct lpfc_nodelist * ndlp, void *arg,
+                           uint32_t evt)
+{
+       struct lpfc_iocbq     *cmdiocb;
+
+       cmdiocb = (struct lpfc_iocbq *) arg;
+
+       lpfc_rcv_padisc(phba, ndlp, cmdiocb);
+
+       if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
+               if (ndlp->nlp_flag & NLP_NPR_ADISC) {
+                       ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
+                       lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST);
+                       lpfc_issue_els_adisc(phba, ndlp, 0);
+               } else {
+                       ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
+                       lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
+                       lpfc_issue_els_plogi(phba, ndlp, 0);
+               }
+       }
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_prlo_npr_node(struct lpfc_hba * phba,
+                           struct lpfc_nodelist * ndlp, void *arg,
+                           uint32_t evt)
+{
+       struct lpfc_iocbq     *cmdiocb;
+
+       cmdiocb = (struct lpfc_iocbq *) arg;
+
+       lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+
+       if (ndlp->nlp_flag & NLP_DELAY_TMO) {
+               if (ndlp->nlp_last_elscmd == (unsigned long)ELS_CMD_PLOGI) {
+                       return (ndlp->nlp_state);
+               } else {
+                       spin_lock_irq(phba->host->host_lock);
+                       ndlp->nlp_flag &= ~NLP_DELAY_TMO;
+                       spin_unlock_irq(phba->host->host_lock);
+                       del_timer_sync(&ndlp->nlp_delayfunc);
+                       if (!list_empty(&ndlp->els_retry_evt.evt_listp))
+                               list_del_init(&ndlp->els_retry_evt.evt_listp);
+               }
+       }
+
+       ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
+       lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
+       lpfc_issue_els_plogi(phba, ndlp, 0);
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_cmpl_logo_npr_node(struct lpfc_hba * phba,
+               struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+       lpfc_unreg_rpi(phba, ndlp);
+       /* This routine does nothing, just return the current state */
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_cmpl_reglogin_npr_node(struct lpfc_hba * phba,
+                           struct lpfc_nodelist * ndlp, void *arg,
+                           uint32_t evt)
+{
+       LPFC_MBOXQ_t *pmb;
+       MAILBOX_t *mb;
+
+       pmb = (LPFC_MBOXQ_t *) arg;
+       mb = &pmb->mb;
+
+       /* save rpi */
+       if (ndlp->nlp_rpi != 0)
+               lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
+
+       ndlp->nlp_rpi = mb->un.varWords[0];
+       lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
+
+       return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_device_rm_npr_node(struct lpfc_hba * phba,
+                           struct lpfc_nodelist * ndlp, void *arg,
+                           uint32_t evt)
+{
+       lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+       return (NLP_STE_FREED_NODE);
+}
+
+static uint32_t
+lpfc_device_recov_npr_node(struct lpfc_hba * phba,
+                           struct lpfc_nodelist * ndlp, void *arg,
+                           uint32_t evt)
+{
+       spin_lock_irq(phba->host->host_lock);
+       ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+       spin_unlock_irq(phba->host->host_lock);
+       return (ndlp->nlp_state);
+}
+
+
+/* This next section defines the NPort Discovery State Machine */
+
+/* There are 4 different double linked lists nodelist entries can reside on.
+ * The plogi list and adisc list are used when Link Up discovery or RSCN
+ * processing is needed. Each list holds the nodes that we will send PLOGI
+ * or ADISC on. These lists will keep track of what nodes will be effected
+ * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up).
+ * The unmapped_list will contain all nodes that we have successfully logged
+ * into at the Fibre Channel level. The mapped_list will contain all nodes
+ * that are mapped FCP targets.
+ */
+/*
+ * The bind list is a list of undiscovered (potentially non-existent) nodes
+ * that we have saved binding information on. This information is used when
+ * nodes transition from the unmapped to the mapped list.
+ */
+/* For UNUSED_NODE state, the node has just been allocated .
+ * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on
+ * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list
+ * and put on the unmapped list. For ADISC processing, the node is taken off
+ * the ADISC list and placed on either the mapped or unmapped list (depending
+ * on its previous state). Once on the unmapped list, a PRLI is issued and the
+ * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is
+ * changed to UNMAPPED_NODE. If the completion indicates a mapped
+ * node, the node is taken off the unmapped list. The binding list is checked
+ * for a valid binding, or a binding is automatically assigned. If binding
+ * assignment is unsuccessful, the node is left on the unmapped list. If
+ * binding assignment is successful, the associated binding list entry (if
+ * any) is removed, and the node is placed on the mapped list.
+ */
+/*
+ * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
+ * lists will receive a DEVICE_RECOVERY event. If the linkdown or nodev timers
+ * expire, all effected nodes will receive a DEVICE_RM event.
+ */
+/*
+ * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
+ * to either the ADISC or PLOGI list.  After a Nameserver query or ALPA loopmap
+ * check, additional nodes may be added or removed (via DEVICE_RM) to / from
+ * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated,
+ * we will first process the ADISC list.  32 entries are processed initially and
+ * ADISC is initited for each one.  Completions / Events for each node are
+ * funnelled thru the state machine.  As each node finishes ADISC processing, it
+ * starts ADISC for any nodes waiting for ADISC processing. If no nodes are
+ * waiting, and the ADISC list count is identically 0, then we are done. For
+ * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we
+ * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI
+ * list.  32 entries are processed initially and PLOGI is initited for each one.
+ * Completions / Events for each node are funnelled thru the state machine.  As
+ * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting
+ * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is
+ * indentically 0, then we are done. We have now completed discovery / RSCN
+ * handling. Upon completion, ALL nodes should be on either the mapped or
+ * unmapped lists.
+ */
+
+static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
+     (struct lpfc_hba *, struct lpfc_nodelist *, void *, uint32_t) = {
+       /* Action routine                  Event       Current State  */
+       lpfc_rcv_plogi_unused_node,     /* RCV_PLOGI   UNUSED_NODE    */
+       lpfc_rcv_els_unused_node,       /* RCV_PRLI        */
+       lpfc_rcv_logo_unused_node,      /* RCV_LOGO        */
+       lpfc_rcv_els_unused_node,       /* RCV_ADISC       */
+       lpfc_rcv_els_unused_node,       /* RCV_PDISC       */
+       lpfc_rcv_els_unused_node,       /* RCV_PRLO        */
+       lpfc_disc_illegal,              /* CMPL_PLOGI      */
+       lpfc_disc_illegal,              /* CMPL_PRLI       */
+       lpfc_cmpl_logo_unused_node,     /* CMPL_LOGO       */
+       lpfc_disc_illegal,              /* CMPL_ADISC      */
+       lpfc_disc_illegal,              /* CMPL_REG_LOGIN  */
+       lpfc_device_rm_unused_node,     /* DEVICE_RM       */
+       lpfc_disc_illegal,              /* DEVICE_RECOVERY */
+
+       lpfc_rcv_plogi_plogi_issue,     /* RCV_PLOGI   PLOGI_ISSUE    */
+       lpfc_rcv_els_plogi_issue,       /* RCV_PRLI        */
+       lpfc_rcv_els_plogi_issue,       /* RCV_LOGO        */
+       lpfc_rcv_els_plogi_issue,       /* RCV_ADISC       */
+       lpfc_rcv_els_plogi_issue,       /* RCV_PDISC       */
+       lpfc_rcv_els_plogi_issue,       /* RCV_PRLO        */
+       lpfc_cmpl_plogi_plogi_issue,    /* CMPL_PLOGI      */
+       lpfc_disc_illegal,              /* CMPL_PRLI       */
+       lpfc_disc_illegal,              /* CMPL_LOGO       */
+       lpfc_disc_illegal,              /* CMPL_ADISC      */
+       lpfc_disc_illegal,              /* CMPL_REG_LOGIN  */
+       lpfc_device_rm_plogi_issue,     /* DEVICE_RM       */
+       lpfc_device_recov_plogi_issue,  /* DEVICE_RECOVERY */
+
+       lpfc_rcv_plogi_adisc_issue,     /* RCV_PLOGI   ADISC_ISSUE    */
+       lpfc_rcv_prli_adisc_issue,      /* RCV_PRLI        */
+       lpfc_rcv_logo_adisc_issue,      /* RCV_LOGO        */
+       lpfc_rcv_padisc_adisc_issue,    /* RCV_ADISC       */
+       lpfc_rcv_padisc_adisc_issue,    /* RCV_PDISC       */
+       lpfc_rcv_prlo_adisc_issue,      /* RCV_PRLO        */
+       lpfc_disc_illegal,              /* CMPL_PLOGI      */
+       lpfc_disc_illegal,              /* CMPL_PRLI       */
+       lpfc_disc_illegal,              /* CMPL_LOGO       */
+       lpfc_cmpl_adisc_adisc_issue,    /* CMPL_ADISC      */
+       lpfc_disc_illegal,              /* CMPL_REG_LOGIN  */
+       lpfc_device_rm_adisc_issue,     /* DEVICE_RM       */
+       lpfc_device_recov_adisc_issue,  /* DEVICE_RECOVERY */
+
+       lpfc_rcv_plogi_reglogin_issue,  /* RCV_PLOGI  REG_LOGIN_ISSUE */
+       lpfc_rcv_prli_reglogin_issue,   /* RCV_PLOGI       */
+       lpfc_rcv_logo_reglogin_issue,   /* RCV_LOGO        */
+       lpfc_rcv_padisc_reglogin_issue, /* RCV_ADISC       */
+       lpfc_rcv_padisc_reglogin_issue, /* RCV_PDISC       */
+       lpfc_rcv_prlo_reglogin_issue,   /* RCV_PRLO        */
+       lpfc_disc_illegal,              /* CMPL_PLOGI      */
+       lpfc_disc_illegal,              /* CMPL_PRLI       */
+       lpfc_disc_illegal,              /* CMPL_LOGO       */
+       lpfc_disc_illegal,              /* CMPL_ADISC      */
+       lpfc_cmpl_reglogin_reglogin_issue,/* CMPL_REG_LOGIN  */
+       lpfc_device_rm_reglogin_issue,  /* DEVICE_RM       */
+       lpfc_device_recov_reglogin_issue,/* DEVICE_RECOVERY */
+
+       lpfc_rcv_plogi_prli_issue,      /* RCV_PLOGI   PRLI_ISSUE     */
+       lpfc_rcv_prli_prli_issue,       /* RCV_PRLI        */
+       lpfc_rcv_logo_prli_issue,       /* RCV_LOGO        */
+       lpfc_rcv_padisc_prli_issue,     /* RCV_ADISC       */
+       lpfc_rcv_padisc_prli_issue,     /* RCV_PDISC       */
+       lpfc_rcv_prlo_prli_issue,       /* RCV_PRLO        */
+       lpfc_disc_illegal,              /* CMPL_PLOGI      */
+       lpfc_cmpl_prli_prli_issue,      /* CMPL_PRLI       */
+       lpfc_disc_illegal,              /* CMPL_LOGO       */
+       lpfc_disc_illegal,              /* CMPL_ADISC      */
+       lpfc_disc_illegal,              /* CMPL_REG_LOGIN  */
+       lpfc_device_rm_prli_issue,      /* DEVICE_RM       */
+       lpfc_device_recov_prli_issue,   /* DEVICE_RECOVERY */
+
+       lpfc_rcv_plogi_unmap_node,      /* RCV_PLOGI   UNMAPPED_NODE  */
+       lpfc_rcv_prli_unmap_node,       /* RCV_PRLI        */
+       lpfc_rcv_logo_unmap_node,       /* RCV_LOGO        */
+       lpfc_rcv_padisc_unmap_node,     /* RCV_ADISC       */
+       lpfc_rcv_padisc_unmap_node,     /* RCV_PDISC       */
+       lpfc_rcv_prlo_unmap_node,       /* RCV_PRLO        */
+       lpfc_disc_illegal,              /* CMPL_PLOGI      */
+       lpfc_disc_illegal,              /* CMPL_PRLI       */
+       lpfc_disc_illegal,              /* CMPL_LOGO       */
+       lpfc_disc_illegal,              /* CMPL_ADISC      */
+       lpfc_disc_illegal,              /* CMPL_REG_LOGIN  */
+       lpfc_disc_illegal,              /* DEVICE_RM       */
+       lpfc_device_recov_unmap_node,   /* DEVICE_RECOVERY */
+
+       lpfc_rcv_plogi_mapped_node,     /* RCV_PLOGI   MAPPED_NODE    */
+       lpfc_rcv_prli_mapped_node,      /* RCV_PRLI        */
+       lpfc_rcv_logo_mapped_node,      /* RCV_LOGO        */
+       lpfc_rcv_padisc_mapped_node,    /* RCV_ADISC       */
+       lpfc_rcv_padisc_mapped_node,    /* RCV_PDISC       */
+       lpfc_rcv_prlo_mapped_node,      /* RCV_PRLO        */
+       lpfc_disc_illegal,              /* CMPL_PLOGI      */
+       lpfc_disc_illegal,              /* CMPL_PRLI       */
+       lpfc_disc_illegal,              /* CMPL_LOGO       */
+       lpfc_disc_illegal,              /* CMPL_ADISC      */
+       lpfc_disc_illegal,              /* CMPL_REG_LOGIN  */
+       lpfc_disc_illegal,              /* DEVICE_RM       */
+       lpfc_device_recov_mapped_node,  /* DEVICE_RECOVERY */
+
+       lpfc_rcv_plogi_npr_node,        /* RCV_PLOGI   NPR_NODE    */
+       lpfc_rcv_prli_npr_node,         /* RCV_PRLI        */
+       lpfc_rcv_logo_npr_node,         /* RCV_LOGO        */
+       lpfc_rcv_padisc_npr_node,       /* RCV_ADISC       */
+       lpfc_rcv_padisc_npr_node,       /* RCV_PDISC       */
+       lpfc_rcv_prlo_npr_node,         /* RCV_PRLO        */
+       lpfc_disc_noop,                 /* CMPL_PLOGI      */
+       lpfc_disc_noop,                 /* CMPL_PRLI       */
+       lpfc_cmpl_logo_npr_node,        /* CMPL_LOGO       */
+       lpfc_disc_noop,                 /* CMPL_ADISC      */
+       lpfc_cmpl_reglogin_npr_node,    /* CMPL_REG_LOGIN  */
+       lpfc_device_rm_npr_node,        /* DEVICE_RM       */
+       lpfc_device_recov_npr_node,     /* DEVICE_RECOVERY */
+};
+
+int
+lpfc_disc_state_machine(struct lpfc_hba * phba,
+                       struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+       uint32_t cur_state, rc;
+       uint32_t(*func) (struct lpfc_hba *, struct lpfc_nodelist *, void *,
+                        uint32_t);
+
+       ndlp->nlp_disc_refcnt++;
+       cur_state = ndlp->nlp_state;
+
+       /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
+       lpfc_printf_log(phba,
+                       KERN_INFO,
+                       LOG_DISCOVERY,
+                       "%d:0211 DSM in event x%x on NPort x%x in state %d "
+                       "Data: x%x\n",
+                       phba->brd_no,
+                       evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag);
+
+       func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt];
+       rc = (func) (phba, ndlp, arg, evt);
+
+       /* DSM out state <rc> on NPort <nlp_DID> */
+       lpfc_printf_log(phba,
+                      KERN_INFO,
+                      LOG_DISCOVERY,
+                      "%d:0212 DSM out state %d on NPort x%x Data: x%x\n",
+                      phba->brd_no,
+                      rc, ndlp->nlp_DID, ndlp->nlp_flag);
+
+       ndlp->nlp_disc_refcnt--;
+
+       /* Check to see if ndlp removal is deferred */
+       if ((ndlp->nlp_disc_refcnt == 0)
+           && (ndlp->nlp_flag & NLP_DELAY_REMOVE)) {
+               spin_lock_irq(phba->host->host_lock);
+               ndlp->nlp_flag &= ~NLP_DELAY_REMOVE;
+               spin_unlock_irq(phba->host->host_lock);
+               lpfc_nlp_remove(phba, ndlp);
+               return (NLP_STE_FREED_NODE);
+       }
+       if (rc == NLP_STE_FREED_NODE)
+               return (NLP_STE_FREED_NODE);
+       ndlp->nlp_state = rc;
+       return (rc);
+}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
new file mode 100644 (file)
index 0000000..42fab03
--- /dev/null
@@ -0,0 +1,1246 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Enterprise Fibre Channel Host Bus Adapters.                     *
+ * Refer to the README file included with this package for         *
+ * driver version and adapter support.                             *
+ * Copyright (C) 2004 Emulex Corporation.                          *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of the GNU General Public License     *
+ * as published by the Free Software Foundation; either version 2  *
+ * of the License, or (at your option) any later version.          *
+ *                                                                 *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of  *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the   *
+ * GNU General Public License for more details, a copy of which    *
+ * can be found in the file COPYING included with this package.    *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_scsi.c 1.37 2005/04/13 14:27:09EDT sf_support Exp  $
+ */
+
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "lpfc_version.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+
+#define LPFC_RESET_WAIT  2
+#define LPFC_ABORT_WAIT  2
+
+static inline void lpfc_put_lun(struct fcp_cmnd *fcmd, unsigned int lun)
+{
+       fcmd->fcpLunLsl = 0;
+       fcmd->fcpLunMsl = swab16((uint16_t)lun);
+}
+
+/*
+ * This routine allocates a scsi buffer, which contains all the necessary
+ * information needed to initiate a SCSI I/O.  The non-DMAable buffer region
+ * contains information to build the IOCB.  The DMAable region contains
+ * memory for the FCP CMND, FCP RSP, and the inital BPL.  In addition to
+ * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL
+ * and the BPL BDE is setup in the IOCB.
+ */
+static struct lpfc_scsi_buf *
+lpfc_get_scsi_buf(struct lpfc_hba * phba)
+{
+       struct lpfc_scsi_buf *psb;
+       struct ulp_bde64 *bpl;
+       IOCB_t *iocb;
+       dma_addr_t pdma_phys;
+
+       psb = kmalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
+       if (!psb)
+               return NULL;
+       memset(psb, 0, sizeof (struct lpfc_scsi_buf));
+       psb->scsi_hba = phba;
+
+       /*
+        * Get memory from the pci pool to map the virt space to pci bus space
+        * for an I/O.  The DMA buffer includes space for the struct fcp_cmnd,
+        * struct fcp_rsp and the number of bde's necessary to support the
+        * sg_tablesize.
+        */
+       psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL,
+                                                       &psb->dma_handle);
+       if (!psb->data) {
+               kfree(psb);
+               return NULL;
+       }
+
+       /* Initialize virtual ptrs to dma_buf region. */
+       memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
+
+       psb->fcp_cmnd = psb->data;
+       psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
+       psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
+                                                       sizeof(struct fcp_rsp);
+
+       /* Initialize local short-hand pointers. */
+       bpl = psb->fcp_bpl;
+       pdma_phys = psb->dma_handle;
+
+       /*
+        * The first two bdes are the FCP_CMD and FCP_RSP.  The balance are sg
+        * list bdes.  Initialize the first two and leave the rest for
+        * queuecommand.
+        */
+       bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys));
+       bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys));
+       bpl->tus.f.bdeSize = sizeof (struct fcp_cmnd);
+       bpl->tus.f.bdeFlags = BUFF_USE_CMND;
+       bpl->tus.w = le32_to_cpu(bpl->tus.w);
+       bpl++;
+
+       /* Setup the physical region for the FCP RSP */
+       pdma_phys += sizeof (struct fcp_cmnd);
+       bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys));
+       bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys));
+       bpl->tus.f.bdeSize = sizeof (struct fcp_rsp);
+       bpl->tus.f.bdeFlags = (BUFF_USE_CMND | BUFF_USE_RCV);
+       bpl->tus.w = le32_to_cpu(bpl->tus.w);
+
+       /*
+        * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
+        * initialize it with all known data now.
+        */
+       pdma_phys += (sizeof (struct fcp_rsp));
+       iocb = &psb->cur_iocbq.iocb;
+       iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
+       iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys);
+       iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys);
+       iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
+       iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDL;
+       iocb->ulpBdeCount = 1;
+       iocb->ulpClass = CLASS3;
+
+       return psb;
+}
+
+static void
+lpfc_free_scsi_buf(struct lpfc_scsi_buf * psb)
+{
+       struct lpfc_hba *phba = psb->scsi_hba;
+
+       /*
+        * There are only two special cases to consider.  (1) the scsi command
+        * requested scatter-gather usage or (2) the scsi command allocated
+        * a request buffer, but did not request use_sg.  There is a third
+        * case, but it does not require resource deallocation.
+        */
+       if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) {
+               dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer,
+                               psb->seg_cnt, psb->pCmd->sc_data_direction);
+       } else {
+                if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) {
+                       dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys,
+                                               psb->pCmd->request_bufflen,
+                                               psb->pCmd->sc_data_direction);
+                }
+       }
+
+       list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
+}
+
+static int
+lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
+{
+       struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
+       struct scatterlist *sgel = NULL;
+       struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
+       struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
+       IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+       dma_addr_t physaddr;
+       uint32_t i, num_bde = 0;
+       int datadir = scsi_cmnd->sc_data_direction;
+       int dma_error;
+
+       /*
+        * There are three possibilities here - use scatter-gather segment, use
+        * the single mapping, or neither.  Start the lpfc command prep by
+        * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
+        * data bde entry.
+        */
+       bpl += 2;
+       if (scsi_cmnd->use_sg) {
+               /*
+                * The driver stores the segment count returned from pci_map_sg
+                * because this a count of dma-mappings used to map the use_sg
+                * pages.  They are not guaranteed to be the same for those
+                * architectures that implement an IOMMU.
+                */
+               sgel = (struct scatterlist *)scsi_cmnd->request_buffer;
+               lpfc_cmd->seg_cnt = dma_map_sg(&phba->pcidev->dev, sgel,
+                                               scsi_cmnd->use_sg, datadir);
+               if (lpfc_cmd->seg_cnt == 0)
+                       return 1;
+
+               if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
+                       printk(KERN_ERR "%s: Too many sg segments from "
+                              "dma_map_sg.  Config %d, seg_cnt %d",
+                              __FUNCTION__, phba->cfg_sg_seg_cnt,
+                              lpfc_cmd->seg_cnt);
+                       dma_unmap_sg(&phba->pcidev->dev, sgel,
+                                    lpfc_cmd->seg_cnt, datadir);
+                       return 1;
+               }
+
+               /*
+                * The driver established a maximum scatter-gather segment count
+                * during probe that limits the number of sg elements in any
+                * single scsi command.  Just run through the seg_cnt and format
+                * the bde's.
+                */
+               for (i = 0; i < lpfc_cmd->seg_cnt; i++) {
+                       physaddr = sg_dma_address(sgel);
+                       bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
+                       bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
+                       bpl->tus.f.bdeSize = sg_dma_len(sgel);
+                       if (datadir == DMA_TO_DEVICE)
+                               bpl->tus.f.bdeFlags = 0;
+                       else
+                               bpl->tus.f.bdeFlags = BUFF_USE_RCV;
+                       bpl->tus.w = le32_to_cpu(bpl->tus.w);
+                       bpl++;
+                       sgel++;
+                       num_bde++;
+               }
+       } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) {
+               physaddr = dma_map_single(&phba->pcidev->dev,
+                                         scsi_cmnd->request_buffer,
+                                         scsi_cmnd->request_bufflen,
+                                         datadir);
+               dma_error = dma_mapping_error(physaddr);
+               if (dma_error) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+                               "%d:0718 Unable to dma_map_single "
+                               "request_buffer: x%x\n",
+                               phba->brd_no, dma_error);
+                       return 1;
+               }
+
+               lpfc_cmd->nonsg_phys = physaddr;
+               bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
+               bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
+               bpl->tus.f.bdeSize = scsi_cmnd->request_bufflen;
+               if (datadir == DMA_TO_DEVICE)
+                       bpl->tus.f.bdeFlags = 0;
+               bpl->tus.w = le32_to_cpu(bpl->tus.w);
+               num_bde = 1;
+               bpl++;
+       }
+
+       /*
+        * Finish initializing those IOCB fields that are dependent on the
+        * scsi_cmnd request_buffer
+        */
+       iocb_cmd->un.fcpi64.bdl.bdeSize +=
+               (num_bde * sizeof (struct ulp_bde64));
+       iocb_cmd->ulpBdeCount = 1;
+       iocb_cmd->ulpLe = 1;
+       fcp_cmnd->fcpDl = be32_to_cpu(scsi_cmnd->request_bufflen);
+       return 0;
+}
+
+static void
+lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
+{
+       struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
+       struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
+       struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
+       struct lpfc_hba *phba = lpfc_cmd->scsi_hba;
+       uint32_t fcpi_parm = lpfc_cmd->cur_iocbq.iocb.un.fcpi.fcpi_parm;
+       uint32_t resp_info = fcprsp->rspStatus2;
+       uint32_t scsi_status = fcprsp->rspStatus3;
+       uint32_t host_status = DID_OK;
+       uint32_t rsplen = 0;
+
+       /*
+        *  If this is a task management command, there is no
+        *  scsi packet associated with this lpfc_cmd.  The driver
+        *  consumes it.
+        */
+       if (fcpcmd->fcpCntl2) {
+               scsi_status = 0;
+               goto out;
+       }
+
+       lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
+                       "%d:0730 FCP command failed: RSP "
+                       "Data: x%x x%x x%x x%x x%x x%x\n",
+                       phba->brd_no, resp_info, scsi_status,
+                       be32_to_cpu(fcprsp->rspResId),
+                       be32_to_cpu(fcprsp->rspSnsLen),
+                       be32_to_cpu(fcprsp->rspRspLen),
+                       fcprsp->rspInfo3);
+
+       if (resp_info & RSP_LEN_VALID) {
+               rsplen = be32_to_cpu(fcprsp->rspRspLen);
+               if ((rsplen != 0 && rsplen != 4 && rsplen != 8) ||
+                   (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
+                       host_status = DID_ERROR;
+                       goto out;
+               }
+       }
+
+       if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
+               uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
+               if (snslen > SCSI_SENSE_BUFFERSIZE)
+                       snslen = SCSI_SENSE_BUFFERSIZE;
+
+               memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
+       }
+
+       cmnd->resid = 0;
+       if (resp_info & RESID_UNDER) {
+               cmnd->resid = be32_to_cpu(fcprsp->rspResId);
+
+               lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
+                               "%d:0716 FCP Read Underrun, expected %d, "
+                               "residual %d Data: x%x x%x x%x\n", phba->brd_no,
+                               be32_to_cpu(fcpcmd->fcpDl), cmnd->resid,
+                               fcpi_parm, cmnd->cmnd[0], cmnd->underflow);
+
+               /*
+                * The cmnd->underflow is the minimum number of bytes that must
+                * be transfered for this command.  Provided a sense condition
+                * is not present, make sure the actual amount transferred is at
+                * least the underflow value or fail.
+                */
+               if (!(resp_info & SNS_LEN_VALID) &&
+                   (scsi_status == SAM_STAT_GOOD) &&
+                   (cmnd->request_bufflen - cmnd->resid) < cmnd->underflow) {
+                       lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
+                                       "%d:0717 FCP command x%x residual "
+                                       "underrun converted to error "
+                                       "Data: x%x x%x x%x\n", phba->brd_no,
+                                       cmnd->cmnd[0], cmnd->request_bufflen,
+                                       cmnd->resid, cmnd->underflow);
+
+                       host_status = DID_ERROR;
+               }
+       } else if (resp_info & RESID_OVER) {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
+                               "%d:0720 FCP command x%x residual "
+                               "overrun error. Data: x%x x%x \n",
+                               phba->brd_no, cmnd->cmnd[0],
+                               cmnd->request_bufflen, cmnd->resid);
+               host_status = DID_ERROR;
+
+       /*
+        * Check SLI validation that all the transfer was actually done
+        * (fcpi_parm should be zero). Apply check only to reads.
+        */
+       } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
+                       (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
+                       "%d:0734 FCP Read Check Error Data: "
+                       "x%x x%x x%x x%x\n", phba->brd_no,
+                       be32_to_cpu(fcpcmd->fcpDl),
+                       be32_to_cpu(fcprsp->rspResId),
+                       fcpi_parm, cmnd->cmnd[0]);
+               host_status = DID_ERROR;
+               cmnd->resid = cmnd->request_bufflen;
+       }
+
+ out:
+       cmnd->result = ScsiResult(host_status, scsi_status);
+}
+
+static void
+lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
+                       struct lpfc_iocbq *pIocbOut)
+{
+       struct lpfc_scsi_buf *lpfc_cmd =
+               (struct lpfc_scsi_buf *) pIocbIn->context1;
+       struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
+       struct lpfc_nodelist *pnode = rdata->pnode;
+       struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
+       unsigned long iflag;
+
+       lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
+       lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
+
+       if (lpfc_cmd->status) {
+               if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
+                   (lpfc_cmd->result & IOERR_DRVR_MASK))
+                       lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
+               else if (lpfc_cmd->status >= IOSTAT_CNT)
+                       lpfc_cmd->status = IOSTAT_DEFAULT;
+
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
+                               "%d:0729 FCP cmd x%x failed <%d/%d> status: "
+                               "x%x result: x%x Data: x%x x%x\n",
+                               phba->brd_no, cmd->cmnd[0], cmd->device->id,
+                               cmd->device->lun, lpfc_cmd->status,
+                               lpfc_cmd->result, pIocbOut->iocb.ulpContext,
+                               lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
+
+               switch (lpfc_cmd->status) {
+               case IOSTAT_FCP_RSP_ERROR:
+                       /* Call FCP RSP handler to determine result */
+                       lpfc_handle_fcp_err(lpfc_cmd);
+                       break;
+               case IOSTAT_NPORT_BSY:
+               case IOSTAT_FABRIC_BSY:
+                       cmd->result = ScsiResult(DID_BUS_BUSY, 0);
+                       break;
+               default:
+                       cmd->result = ScsiResult(DID_ERROR, 0);
+                       break;
+               }
+
+               if (pnode) {
+                       if (pnode->nlp_state != NLP_STE_MAPPED_NODE)
+                               cmd->result = ScsiResult(DID_BUS_BUSY,
+                                       SAM_STAT_BUSY);
+               }
+               else {
+                       cmd->result = ScsiResult(DID_NO_CONNECT, 0);
+               }
+       } else {
+               cmd->result = ScsiResult(DID_OK, 0);
+       }
+
+       if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
+               uint32_t *lp = (uint32_t *)cmd->sense_buffer;
+
+               lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
+                               "%d:0710 Iodone <%d/%d> cmd %p, error x%x "
+                               "SNS x%x x%x Data: x%x x%x\n",
+                               phba->brd_no, cmd->device->id,
+                               cmd->device->lun, cmd, cmd->result,
+                               *lp, *(lp + 3), cmd->retries, cmd->resid);
+       }
+
+       spin_lock_irqsave(phba->host->host_lock, iflag);
+       lpfc_free_scsi_buf(lpfc_cmd);
+       cmd->host_scribble = NULL;
+       spin_unlock_irqrestore(phba->host->host_lock, iflag);
+
+       cmd->scsi_done(cmd);
+}
+
+static void
+lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd,
+                       struct lpfc_nodelist *pnode)
+{
+       struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
+       struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
+       IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+       struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
+       int datadir = scsi_cmnd->sc_data_direction;
+
+       lpfc_cmd->fcp_rsp->rspSnsLen = 0;
+
+       lpfc_put_lun(lpfc_cmd->fcp_cmnd, lpfc_cmd->pCmd->device->lun);
+
+       memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
+
+       if (scsi_cmnd->device->tagged_supported) {
+               switch (scsi_cmnd->tag) {
+               case HEAD_OF_QUEUE_TAG:
+                       fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
+                       break;
+               case ORDERED_QUEUE_TAG:
+                       fcp_cmnd->fcpCntl1 = ORDERED_Q;
+                       break;
+               default:
+                       fcp_cmnd->fcpCntl1 = SIMPLE_Q;
+                       break;
+               }
+       } else
+               fcp_cmnd->fcpCntl1 = 0;
+
+       /*
+        * There are three possibilities here - use scatter-gather segment, use
+        * the single mapping, or neither.  Start the lpfc command prep by
+        * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
+        * data bde entry.
+        */
+       if (scsi_cmnd->use_sg) {
+               if (datadir == DMA_TO_DEVICE) {
+                       iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
+                       iocb_cmd->un.fcpi.fcpi_parm = 0;
+                       iocb_cmd->ulpPU = 0;
+                       fcp_cmnd->fcpCntl3 = WRITE_DATA;
+                       phba->fc4OutputRequests++;
+               } else {
+                       iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
+                       iocb_cmd->ulpPU = PARM_READ_CHECK;
+                       iocb_cmd->un.fcpi.fcpi_parm =
+                               scsi_cmnd->request_bufflen;
+                       fcp_cmnd->fcpCntl3 = READ_DATA;
+                       phba->fc4InputRequests++;
+               }
+       } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) {
+               if (datadir == DMA_TO_DEVICE) {
+                       iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
+                       iocb_cmd->un.fcpi.fcpi_parm = 0;
+                       iocb_cmd->ulpPU = 0;
+                       fcp_cmnd->fcpCntl3 = WRITE_DATA;
+                       phba->fc4OutputRequests++;
+               } else {
+                       iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
+                       iocb_cmd->ulpPU = PARM_READ_CHECK;
+                       iocb_cmd->un.fcpi.fcpi_parm =
+                               scsi_cmnd->request_bufflen;
+                       fcp_cmnd->fcpCntl3 = READ_DATA;
+                       phba->fc4InputRequests++;
+               }
+       } else {
+               iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
+               iocb_cmd->un.fcpi.fcpi_parm = 0;
+               iocb_cmd->ulpPU = 0;
+               fcp_cmnd->fcpCntl3 = 0;
+               phba->fc4ControlRequests++;
+       }
+
+       /*
+        * Finish initializing those IOCB fields that are independent
+        * of the scsi_cmnd request_buffer
+        */
+       piocbq->iocb.ulpContext = pnode->nlp_rpi;
+       if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
+               piocbq->iocb.ulpFCP2Rcvy = 1;
+
+       piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
+       piocbq->context1  = lpfc_cmd;
+       piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
+       piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
+}
+
+static int
+lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
+                            struct lpfc_scsi_buf *lpfc_cmd,
+                            uint8_t task_mgmt_cmd)
+{
+       struct lpfc_sli *psli;
+       struct lpfc_iocbq *piocbq;
+       IOCB_t *piocb;
+       struct fcp_cmnd *fcp_cmnd;
+       struct scsi_device *scsi_dev = lpfc_cmd->pCmd->device;
+       struct lpfc_rport_data *rdata = scsi_dev->hostdata;
+       struct lpfc_nodelist *ndlp = rdata->pnode;
+
+       if ((ndlp == 0) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
+               return 0;
+       }
+
+       psli = &phba->sli;
+       piocbq = &(lpfc_cmd->cur_iocbq);
+       piocb = &piocbq->iocb;
+
+       fcp_cmnd = lpfc_cmd->fcp_cmnd;
+       lpfc_put_lun(lpfc_cmd->fcp_cmnd, lpfc_cmd->pCmd->device->lun);
+       fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
+
+       piocb->ulpCommand = CMD_FCP_ICMND64_CR;
+
+       piocb->ulpContext = ndlp->nlp_rpi;
+       if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
+               piocb->ulpFCP2Rcvy = 1;
+       }
+       piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
+
+       /* ulpTimeout is only one byte */
+       if (lpfc_cmd->timeout > 0xff) {
+               /*
+                * Do not timeout the command at the firmware level.
+                * The driver will provide the timeout mechanism.
+                */
+               piocb->ulpTimeout = 0;
+       } else {
+               piocb->ulpTimeout = lpfc_cmd->timeout;
+       }
+
+       lpfc_cmd->rdata = rdata;
+
+       switch (task_mgmt_cmd) {
+       case FCP_LUN_RESET:
+               /* Issue LUN Reset to TGT <num> LUN <num> */
+               lpfc_printf_log(phba,
+                               KERN_INFO,
+                               LOG_FCP,
+                               "%d:0703 Issue LUN Reset to TGT %d LUN %d "
+                               "Data: x%x x%x\n",
+                               phba->brd_no,
+                               scsi_dev->id, scsi_dev->lun,
+                               ndlp->nlp_rpi, ndlp->nlp_flag);
+
+               break;
+       case FCP_ABORT_TASK_SET:
+               /* Issue Abort Task Set to TGT <num> LUN <num> */
+               lpfc_printf_log(phba,
+                               KERN_INFO,
+                               LOG_FCP,
+                               "%d:0701 Issue Abort Task Set to TGT %d LUN %d "
+                               "Data: x%x x%x\n",
+                               phba->brd_no,
+                               scsi_dev->id, scsi_dev->lun,
+                               ndlp->nlp_rpi, ndlp->nlp_flag);
+
+               break;
+       case FCP_TARGET_RESET:
+               /* Issue Target Reset to TGT <num> */
+               lpfc_printf_log(phba,
+                               KERN_INFO,
+                               LOG_FCP,
+                               "%d:0702 Issue Target Reset to TGT %d "
+                               "Data: x%x x%x\n",
+                               phba->brd_no,
+                               scsi_dev->id, ndlp->nlp_rpi,
+                               ndlp->nlp_flag);
+               break;
+       }
+
+       return (1);
+}
+
+static int
+lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba)
+{
+       struct lpfc_iocbq *iocbq;
+       struct lpfc_iocbq *iocbqrsp = NULL;
+       struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
+       int ret;
+
+       ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_TARGET_RESET);
+       if (!ret)
+               return FAILED;
+
+       lpfc_cmd->scsi_hba = phba;
+       iocbq = &lpfc_cmd->cur_iocbq;
+       list_remove_head(lpfc_iocb_list, iocbqrsp, struct lpfc_iocbq, list);
+       if (!iocbqrsp)
+               return FAILED;
+       memset(iocbqrsp, 0, sizeof (struct lpfc_iocbq));
+
+       iocbq->iocb_flag |= LPFC_IO_POLL;
+       ret = lpfc_sli_issue_iocb_wait_high_priority(phba,
+                    &phba->sli.ring[phba->sli.fcp_ring],
+                    iocbq, SLI_IOCB_HIGH_PRIORITY,
+                    iocbqrsp,
+                    lpfc_cmd->timeout);
+       if (ret != IOCB_SUCCESS) {
+               lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
+               ret = FAILED;
+       } else {
+               ret = SUCCESS;
+               lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
+               lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
+               if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
+                       (lpfc_cmd->result & IOERR_DRVR_MASK))
+                               lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
+       }
+
+       /*
+        * All outstanding txcmplq I/Os should have been aborted by the target.
+        * Unfortunately, some targets do not abide by this forcing the driver
+        * to double check.
+        */
+       lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
+                           lpfc_cmd->pCmd->device->id,
+                           lpfc_cmd->pCmd->device->lun, 0, LPFC_CTX_TGT);
+
+       /* Return response IOCB to free list. */
+       list_add_tail(&iocbqrsp->list, lpfc_iocb_list);
+       return ret;
+}
+
+static void
+lpfc_scsi_cmd_iocb_cleanup (struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
+                           struct lpfc_iocbq *pIocbOut)
+{
+       unsigned long iflag;
+       struct lpfc_scsi_buf *lpfc_cmd =
+               (struct lpfc_scsi_buf *) pIocbIn->context1;
+
+       spin_lock_irqsave(phba->host->host_lock, iflag);
+       lpfc_free_scsi_buf(lpfc_cmd);
+       spin_unlock_irqrestore(phba->host->host_lock, iflag);
+}
+
+static void
+lpfc_scsi_cmd_iocb_cmpl_aborted(struct lpfc_hba *phba,
+                               struct lpfc_iocbq *pIocbIn,
+                               struct lpfc_iocbq *pIocbOut)
+{
+       struct scsi_cmnd *ml_cmd =
+               ((struct lpfc_scsi_buf *) pIocbIn->context1)->pCmd;
+
+       lpfc_scsi_cmd_iocb_cleanup (phba, pIocbIn, pIocbOut);
+       ml_cmd->host_scribble = NULL;
+}
+
+const char *
+lpfc_info(struct Scsi_Host *host)
+{
+       struct lpfc_hba    *phba = (struct lpfc_hba *) host->hostdata[0];
+       int len;
+       static char  lpfcinfobuf[384];
+
+       memset(lpfcinfobuf,0,384);
+       if (phba && phba->pcidev){
+               strncpy(lpfcinfobuf, phba->ModelDesc, 256);
+               len = strlen(lpfcinfobuf);
+               snprintf(lpfcinfobuf + len,
+                       384-len,
+                       " on PCI bus %02x device %02x irq %d",
+                       phba->pcidev->bus->number,
+                       phba->pcidev->devfn,
+                       phba->pcidev->irq);
+               len = strlen(lpfcinfobuf);
+               if (phba->Port[0]) {
+                       snprintf(lpfcinfobuf + len,
+                                384-len,
+                                " port %s",
+                                phba->Port);
+               }
+       }
+       return lpfcinfobuf;
+}
+
+static int
+lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
+{
+       struct lpfc_hba *phba =
+               (struct lpfc_hba *) cmnd->device->host->hostdata[0];
+       struct lpfc_sli *psli = &phba->sli;
+       struct lpfc_rport_data *rdata = cmnd->device->hostdata;
+       struct lpfc_nodelist *ndlp = rdata->pnode;
+       struct lpfc_scsi_buf *lpfc_cmd = NULL;
+       struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
+       int err = 0;
+
+       /*
+        * The target pointer is guaranteed not to be NULL because the driver
+        * only clears the device->hostdata field in lpfc_slave_destroy.  This
+        * approach guarantees no further IO calls on this target.
+        */
+       if (!ndlp) {
+               cmnd->result = ScsiResult(DID_NO_CONNECT, 0);
+               goto out_fail_command;
+       }
+
+       /*
+        * A Fibre Channel target is present and functioning only when the node
+        * state is MAPPED.  Any other state is a failure.
+        */
+       if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
+               if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
+                   (ndlp->nlp_state == NLP_STE_UNUSED_NODE)) {
+                       cmnd->result = ScsiResult(DID_NO_CONNECT, 0);
+                       goto out_fail_command;
+               }
+               /*
+                * The device is most likely recovered and the driver
+                * needs a bit more time to finish.  Ask the midlayer
+                * to retry.
+                */
+               goto out_host_busy;
+       }
+
+       list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
+       if (lpfc_cmd == NULL) {
+               printk(KERN_WARNING "%s: No buffer available - list empty, "
+                      "total count %d\n", __FUNCTION__, phba->total_scsi_bufs);
+               goto out_host_busy;
+       }
+
+       /*
+        * Store the midlayer's command structure for the completion phase
+        * and complete the command initialization.
+        */
+       lpfc_cmd->pCmd  = cmnd;
+       lpfc_cmd->rdata = rdata;
+       lpfc_cmd->timeout = 0;
+       cmnd->host_scribble = (unsigned char *)lpfc_cmd;
+       cmnd->scsi_done = done;
+
+       err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
+       if (err)
+               goto out_host_busy_free_buf;
+
+       lpfc_scsi_prep_cmnd(phba, lpfc_cmd, ndlp);
+
+       err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
+                               &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
+       if (err)
+               goto out_host_busy_free_buf;
+       return 0;
+
+ out_host_busy_free_buf:
+       lpfc_free_scsi_buf(lpfc_cmd);
+       cmnd->host_scribble = NULL;
+ out_host_busy:
+       return SCSI_MLQUEUE_HOST_BUSY;
+
+ out_fail_command:
+       done(cmnd);
+       return 0;
+}
+
+static int
+lpfc_abort_handler(struct scsi_cmnd *cmnd)
+{
+       struct lpfc_hba *phba =
+                       (struct lpfc_hba *)cmnd->device->host->hostdata[0];
+       struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
+       struct lpfc_iocbq *iocb, *next_iocb;
+       struct lpfc_iocbq *abtsiocb = NULL;
+       struct lpfc_scsi_buf *lpfc_cmd;
+       struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
+       IOCB_t *cmd, *icmd;
+       unsigned long snum;
+       unsigned int id, lun;
+       unsigned int loop_count = 0;
+       int ret = IOCB_SUCCESS;
+
+       /*
+        * If the host_scribble data area is NULL, then the driver has already
+        * completed this command, but the midlayer did not see the completion
+        * before the eh fired.  Just return SUCCESS.
+        */
+       lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
+       if (!lpfc_cmd)
+               return SUCCESS;
+
+       /* save these now since lpfc_cmd can be freed */
+       id   = lpfc_cmd->pCmd->device->id;
+       lun  = lpfc_cmd->pCmd->device->lun;
+       snum = lpfc_cmd->pCmd->serial_number;
+
+       list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
+               cmd = &iocb->iocb;
+               if (iocb->context1 != lpfc_cmd)
+                       continue;
+
+               list_del_init(&iocb->list);
+               pring->txq_cnt--;
+               if (!iocb->iocb_cmpl) {
+                       list_add_tail(&iocb->list, lpfc_iocb_list);
+               }
+               else {
+                       cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+                       cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+                       lpfc_scsi_cmd_iocb_cmpl_aborted(phba, iocb, iocb);
+               }
+
+               goto out;
+       }
+
+       list_remove_head(lpfc_iocb_list, abtsiocb, struct lpfc_iocbq, list);
+       if (abtsiocb == NULL)
+               return FAILED;
+
+       memset(abtsiocb, 0, sizeof (struct lpfc_iocbq));
+
+       /*
+        * The scsi command was not in the txq.  Check the txcmplq and if it is
+        * found, send an abort to the FW.
+        */
+       list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
+               if (iocb->context1 != lpfc_cmd)
+                       continue;
+
+               iocb->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl_aborted;
+               cmd = &iocb->iocb;
+               icmd = &abtsiocb->iocb;
+               icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
+               icmd->un.acxri.abortContextTag = cmd->ulpContext;
+               icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
+
+               icmd->ulpLe = 1;
+               icmd->ulpClass = cmd->ulpClass;
+               if (phba->hba_state >= LPFC_LINK_UP)
+                       icmd->ulpCommand = CMD_ABORT_XRI_CN;
+               else
+                       icmd->ulpCommand = CMD_CLOSE_XRI_CN;
+
+               if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) ==
+                                                               IOCB_ERROR) {
+                       list_add_tail(&abtsiocb->list, lpfc_iocb_list);
+                       ret = IOCB_ERROR;
+                       break;
+               }
+
+               /* Wait for abort to complete */
+               while (cmnd->host_scribble)
+               {
+                       spin_unlock_irq(phba->host->host_lock);
+                       set_current_state(TASK_UNINTERRUPTIBLE);
+                       schedule_timeout(LPFC_ABORT_WAIT*HZ);
+                       spin_lock_irq(phba->host->host_lock);
+                       if (++loop_count
+                           > (2 * phba->cfg_nodev_tmo)/LPFC_ABORT_WAIT)
+                               break;
+               }
+
+               if(cmnd->host_scribble) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+                                       "%d:0748 abort handler timed "
+                                       "out waiting for abort to "
+                                       "complete. Data: "
+                                       "x%x x%x x%x x%lx\n",
+                                       phba->brd_no, ret, id, lun, snum);
+                       cmnd->host_scribble = NULL;
+                       iocb->iocb_cmpl = lpfc_scsi_cmd_iocb_cleanup;
+                       ret = IOCB_ERROR;
+               }
+
+               break;
+       }
+
+ out:
+       lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
+                       "%d:0749 SCSI layer issued abort device "
+                       "Data: x%x x%x x%x x%lx\n",
+                       phba->brd_no, ret, id, lun, snum);
+
+       return ret == IOCB_SUCCESS ? SUCCESS : FAILED;
+}
+
+static int
+lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
+{
+       struct Scsi_Host *shost = cmnd->device->host;
+       struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0];
+       struct lpfc_sli *psli = &phba->sli;
+       struct lpfc_scsi_buf *lpfc_cmd = NULL;
+       struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
+       struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
+       struct lpfc_iocbq *iocbq, *iocbqrsp = NULL;
+       struct lpfc_rport_data *rdata = cmnd->device->hostdata;
+       struct lpfc_nodelist *pnode = rdata->pnode;
+       int ret = FAILED;
+       int cnt, loopcnt;
+
+       /*
+        * If target is not in a MAPPED state, delay the reset until
+        * target is rediscovered or nodev timeout expires.
+        */
+       while ( 1 ) {
+               if (!pnode)
+                       break;
+
+               if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
+                       spin_unlock_irq(phba->host->host_lock);
+                       set_current_state(TASK_UNINTERRUPTIBLE);
+                       schedule_timeout( HZ/2);
+                       spin_lock_irq(phba->host->host_lock);
+               }
+               if ((pnode) && (pnode->nlp_state == NLP_STE_MAPPED_NODE))
+                       break;
+       }
+
+       list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
+       if (lpfc_cmd == NULL)
+               goto out;
+
+       lpfc_cmd->pCmd = cmnd;
+       lpfc_cmd->timeout = 60;
+       lpfc_cmd->scsi_hba = phba;
+
+       ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_LUN_RESET);
+       if (!ret)
+               goto out_free_scsi_buf;
+
+       iocbq = &lpfc_cmd->cur_iocbq;
+
+       /* get a buffer for this IOCB command response */
+       list_remove_head(lpfc_iocb_list, iocbqrsp, struct lpfc_iocbq, list);
+       if (iocbqrsp == NULL)
+               goto out_free_scsi_buf;
+
+       memset(iocbqrsp, 0, sizeof (struct lpfc_iocbq));
+
+       iocbq->iocb_flag |= LPFC_IO_POLL;
+       iocbq->iocb_cmpl = lpfc_sli_wake_iocb_high_priority;
+
+       ret = lpfc_sli_issue_iocb_wait_high_priority(phba,
+                    &phba->sli.ring[psli->fcp_ring],
+                    iocbq, 0, iocbqrsp, 60);
+       if (ret == IOCB_SUCCESS)
+               ret = SUCCESS;
+
+       lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
+       lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
+       if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT)
+               if (lpfc_cmd->result & IOERR_DRVR_MASK)
+                       lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
+
+       /*
+        * All outstanding txcmplq I/Os should have been aborted by the target.
+        * Unfortunately, some targets do not abide by this forcing the driver
+        * to double check.
+        */
+       lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
+                           cmnd->device->id, cmnd->device->lun, 0,
+                           LPFC_CTX_LUN);
+
+       loopcnt = 0;
+       while((cnt = lpfc_sli_sum_iocb(phba,
+                                      &phba->sli.ring[phba->sli.fcp_ring],
+                                      cmnd->device->id, cmnd->device->lun,
+                                      LPFC_CTX_LUN))) {
+               spin_unlock_irq(phba->host->host_lock);
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               schedule_timeout(LPFC_RESET_WAIT*HZ);
+               spin_lock_irq(phba->host->host_lock);
+
+               if (++loopcnt
+                   > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT)
+                       break;
+       }
+
+       if (cnt) {
+               lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
+                       "%d:0719 LUN Reset I/O flush failure: cnt x%x\n",
+                       phba->brd_no, cnt);
+       }
+
+       list_add_tail(&iocbqrsp->list, lpfc_iocb_list);
+
+out_free_scsi_buf:
+       lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+                       "%d:0713 SCSI layer issued LUN reset (%d, %d) "
+                       "Data: x%x x%x x%x\n",
+                       phba->brd_no, lpfc_cmd->pCmd->device->id,
+                       lpfc_cmd->pCmd->device->lun, ret, lpfc_cmd->status,
+                       lpfc_cmd->result);
+       lpfc_free_scsi_buf(lpfc_cmd);
+out:
+       return ret;
+}
+
+/*
+ * Note: midlayer calls this function with the host_lock held
+ */
+static int
+lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
+{
+       struct Scsi_Host *shost = cmnd->device->host;
+       struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0];
+       struct lpfc_nodelist *ndlp = NULL;
+       int match;
+       int ret = FAILED, i, err_count = 0;
+       int cnt, loopcnt;
+       unsigned int midlayer_id = 0;
+       struct lpfc_scsi_buf * lpfc_cmd = NULL;
+       struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
+
+       list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
+       if (lpfc_cmd == NULL)
+               goto out;
+
+       /* The lpfc_cmd storage is reused.  Set all loop invariants. */
+       lpfc_cmd->timeout = 60;
+       lpfc_cmd->pCmd = cmnd;
+       lpfc_cmd->scsi_hba = phba;
+
+       /*
+        * Since the driver manages a single bus device, reset all
+        * targets known to the driver.  Should any target reset
+        * fail, this routine returns failure to the midlayer.
+        */
+       midlayer_id = cmnd->device->id;
+       for (i = 0; i < MAX_FCP_TARGET; i++) {
+               /* Search the mapped list for this target ID */
+               match = 0;
+               list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
+                       if ((i == ndlp->nlp_sid) && ndlp->rport) {
+                               match = 1;
+                               break;
+                       }
+               }
+               if (!match)
+                       continue;
+
+               lpfc_cmd->pCmd->device->id = i;
+               lpfc_cmd->pCmd->device->hostdata = ndlp->rport->dd_data;
+               ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba);
+               if (ret != SUCCESS) {
+                       lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
+                               "%d:0713 Bus Reset on target %d failed\n",
+                               phba->brd_no, i);
+                       err_count++;
+               }
+       }
+
+       cmnd->device->id = midlayer_id;
+       loopcnt = 0;
+       while((cnt = lpfc_sli_sum_iocb(phba,
+                               &phba->sli.ring[phba->sli.fcp_ring],
+                               0, 0, LPFC_CTX_HOST))) {
+               spin_unlock_irq(phba->host->host_lock);
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               schedule_timeout(LPFC_RESET_WAIT*HZ);
+               spin_lock_irq(phba->host->host_lock);
+
+               if (++loopcnt
+                   > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT)
+                       break;
+       }
+
+       if (cnt) {
+               /* flush all outstanding commands on the host */
+               i = lpfc_sli_abort_iocb(phba,
+                               &phba->sli.ring[phba->sli.fcp_ring], 0, 0, 0,
+                               LPFC_CTX_HOST);
+
+               lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
+                  "%d:0715 Bus Reset I/O flush failure: cnt x%x left x%x\n",
+                  phba->brd_no, cnt, i);
+       }
+
+       if (!err_count)
+               ret = SUCCESS;
+
+       lpfc_free_scsi_buf(lpfc_cmd);
+       lpfc_printf_log(phba,
+                       KERN_ERR,
+                       LOG_FCP,
+                       "%d:0714 SCSI layer issued Bus Reset Data: x%x\n",
+                       phba->brd_no, ret);
+out:
+       return ret;
+}
+
+static int
+lpfc_slave_alloc(struct scsi_device *sdev)
+{
+       struct lpfc_hba *phba = (struct lpfc_hba *)sdev->host->hostdata[0];
+       struct lpfc_nodelist *ndlp = NULL;
+       int match = 0;
+       struct lpfc_scsi_buf *scsi_buf = NULL;
+       uint32_t total = 0, i;
+       uint32_t num_to_alloc = 0;
+       unsigned long flags;
+       struct list_head *listp;
+       struct list_head *node_list[6];
+
+       /*
+        * Store the target pointer in the scsi_device hostdata pointer provided
+        * the driver has already discovered the target id.
+        */
+
+       /* Search the nlp lists other than unmap_list for this target ID */
+       node_list[0] = &phba->fc_npr_list;
+       node_list[1] = &phba->fc_nlpmap_list;
+       node_list[2] = &phba->fc_prli_list;
+       node_list[3] = &phba->fc_reglogin_list;
+       node_list[4] = &phba->fc_adisc_list;
+       node_list[5] = &phba->fc_plogi_list;
+
+       for (i = 0; i < 6 && !match; i++) {
+               listp = node_list[i];
+               if (list_empty(listp))
+                       continue;
+               list_for_each_entry(ndlp, listp, nlp_listp) {
+                       if ((sdev->id == ndlp->nlp_sid) && ndlp->rport) {
+                               match = 1;
+                               break;
+                       }
+               }
+       }
+
+       if (!match)
+               return -ENXIO;
+
+       sdev->hostdata = ndlp->rport->dd_data;
+
+       /*
+        * Populate the cmds_per_lun count scsi_bufs into this host's globally
+        * available list of scsi buffers.  Don't allocate more than the
+        * HBA limit conveyed to the midlayer via the host structure.  Note
+        * that this list of scsi bufs exists for the lifetime of the driver.
+        */
+       total = phba->total_scsi_bufs;
+       num_to_alloc = LPFC_CMD_PER_LUN;
+       if (total >= phba->cfg_hba_queue_depth) {
+               printk(KERN_WARNING "%s, At config limitation of "
+                      "%d allocated scsi_bufs\n", __FUNCTION__, total);
+               return 0;
+       } else if (total + num_to_alloc > phba->cfg_hba_queue_depth) {
+               num_to_alloc = phba->cfg_hba_queue_depth - total;
+       }
+
+       for (i = 0; i < num_to_alloc; i++) {
+               scsi_buf = lpfc_get_scsi_buf(phba);
+               if (!scsi_buf) {
+                       printk(KERN_ERR "%s, failed to allocate "
+                              "scsi_buf\n", __FUNCTION__);
+                       break;
+               }
+
+               spin_lock_irqsave(phba->host->host_lock, flags);
+               phba->total_scsi_bufs++;
+               list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
+               spin_unlock_irqrestore(phba->host->host_lock, flags);
+       }
+       return 0;
+}
+
+static int
+lpfc_slave_configure(struct scsi_device *sdev)
+{
+       struct lpfc_hba *phba = (struct lpfc_hba *) sdev->host->hostdata[0];
+       struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
+
+       if (sdev->tagged_supported)
+               scsi_activate_tcq(sdev, phba->cfg_lun_queue_depth);
+       else
+               scsi_deactivate_tcq(sdev, phba->cfg_lun_queue_depth);
+
+       /*
+        * Initialize the fc transport attributes for the target
+        * containing this scsi device.  Also note that the driver's
+        * target pointer is stored in the starget_data for the
+        * driver's sysfs entry point functions.
+        */
+       rport->dev_loss_tmo = phba->cfg_nodev_tmo + 5;
+
+       return 0;
+}
+
+static void
+lpfc_slave_destroy(struct scsi_device *sdev)
+{
+       sdev->hostdata = NULL;
+       return;
+}
+
+struct scsi_host_template lpfc_template = {
+       .module                 = THIS_MODULE,
+       .name                   = LPFC_DRIVER_NAME,
+       .info                   = lpfc_info,
+       .queuecommand           = lpfc_queuecommand,
+       .eh_abort_handler       = lpfc_abort_handler,
+       .eh_device_reset_handler= lpfc_reset_lun_handler,
+       .eh_bus_reset_handler   = lpfc_reset_bus_handler,
+       .slave_alloc            = lpfc_slave_alloc,
+       .slave_configure        = lpfc_slave_configure,
+       .slave_destroy          = lpfc_slave_destroy,
+       .this_id                = -1,
+       .sg_tablesize           = LPFC_SG_SEG_CNT,
+       .cmd_per_lun            = LPFC_CMD_PER_LUN,
+       .use_clustering         = ENABLE_CLUSTERING,
+       .shost_attrs            = lpfc_host_attrs,
+};
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
new file mode 100644 (file)
index 0000000..4aafba4
--- /dev/null
@@ -0,0 +1,157 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Enterprise Fibre Channel Host Bus Adapters.                     *
+ * Refer to the README file included with this package for         *
+ * driver version and adapter support.                             *
+ * Copyright (C) 2004 Emulex Corporation.                          *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of the GNU General Public License     *
+ * as published by the Free Software Foundation; either version 2  *
+ * of the License, or (at your option) any later version.          *
+ *                                                                 *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of  *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the   *
+ * GNU General Public License for more details, a copy of which    *
+ * can be found in the file COPYING included with this package.    *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_scsi.h 1.83 2005/04/07 08:47:43EDT sf_support Exp  $
+ */
+
+struct lpfc_hba;
+
+#define list_remove_head(list, entry, type, member)            \
+       if (!list_empty(list)) {                                \
+               entry = list_entry((list)->next, type, member); \
+               list_del_init(&entry->member);                  \
+       }
+
+#define list_get_first(list, type, member)                     \
+       (list_empty(list)) ? NULL :                             \
+       list_entry((list)->next, type, member)
+
+/* per-port data that is allocated in the FC transport for us */
+struct lpfc_rport_data {
+       struct lpfc_nodelist *pnode;    /* Pointer to the node structure. */
+};
+
+struct fcp_rsp {
+       uint32_t rspRsvd1;      /* FC Word 0, byte 0:3 */
+       uint32_t rspRsvd2;      /* FC Word 1, byte 0:3 */
+
+       uint8_t rspStatus0;     /* FCP_STATUS byte 0 (reserved) */
+       uint8_t rspStatus1;     /* FCP_STATUS byte 1 (reserved) */
+       uint8_t rspStatus2;     /* FCP_STATUS byte 2 field validity */
+#define RSP_LEN_VALID  0x01    /* bit 0 */
+#define SNS_LEN_VALID  0x02    /* bit 1 */
+#define RESID_OVER     0x04    /* bit 2 */
+#define RESID_UNDER    0x08    /* bit 3 */
+       uint8_t rspStatus3;     /* FCP_STATUS byte 3 SCSI status byte */
+
+       uint32_t rspResId;      /* Residual xfer if residual count field set in
+                                  fcpStatus2 */
+       /* Received in Big Endian format */
+       uint32_t rspSnsLen;     /* Length of sense data in fcpSnsInfo */
+       /* Received in Big Endian format */
+       uint32_t rspRspLen;     /* Length of FCP response data in fcpRspInfo */
+       /* Received in Big Endian format */
+
+       uint8_t rspInfo0;       /* FCP_RSP_INFO byte 0 (reserved) */
+       uint8_t rspInfo1;       /* FCP_RSP_INFO byte 1 (reserved) */
+       uint8_t rspInfo2;       /* FCP_RSP_INFO byte 2 (reserved) */
+       uint8_t rspInfo3;       /* FCP_RSP_INFO RSP_CODE byte 3 */
+
+#define RSP_NO_FAILURE       0x00
+#define RSP_DATA_BURST_ERR   0x01
+#define RSP_CMD_FIELD_ERR    0x02
+#define RSP_RO_MISMATCH_ERR  0x03
+#define RSP_TM_NOT_SUPPORTED 0x04      /* Task mgmt function not supported */
+#define RSP_TM_NOT_COMPLETED 0x05      /* Task mgmt function not performed */
+
+       uint32_t rspInfoRsvd;   /* FCP_RSP_INFO bytes 4-7 (reserved) */
+
+       uint8_t rspSnsInfo[128];
+#define SNS_ILLEGAL_REQ 0x05   /* sense key is byte 3 ([2]) */
+#define SNSCOD_BADCMD 0x20     /* sense code is byte 13 ([12]) */
+};
+
+struct fcp_cmnd {
+       uint32_t fcpLunMsl;     /* most  significant lun word (32 bits) */
+       uint32_t fcpLunLsl;     /* least significant lun word (32 bits) */
+       /* # of bits to shift lun id to end up in right
+        * payload word, little endian = 8, big = 16.
+        */
+#if __BIG_ENDIAN
+#define FC_LUN_SHIFT         16
+#define FC_ADDR_MODE_SHIFT   24
+#else  /*  __LITTLE_ENDIAN */
+#define FC_LUN_SHIFT         8
+#define FC_ADDR_MODE_SHIFT   0
+#endif
+
+       uint8_t fcpCntl0;       /* FCP_CNTL byte 0 (reserved) */
+       uint8_t fcpCntl1;       /* FCP_CNTL byte 1 task codes */
+#define  SIMPLE_Q        0x00
+#define  HEAD_OF_Q       0x01
+#define  ORDERED_Q       0x02
+#define  ACA_Q           0x04
+#define  UNTAGGED        0x05
+       uint8_t fcpCntl2;       /* FCP_CTL byte 2 task management codes */
+#define  FCP_ABORT_TASK_SET  0x02      /* Bit 1 */
+#define  FCP_CLEAR_TASK_SET  0x04      /* bit 2 */
+#define  FCP_BUS_RESET       0x08      /* bit 3 */
+#define  FCP_LUN_RESET       0x10      /* bit 4 */
+#define  FCP_TARGET_RESET    0x20      /* bit 5 */
+#define  FCP_CLEAR_ACA       0x40      /* bit 6 */
+#define  FCP_TERMINATE_TASK  0x80      /* bit 7 */
+       uint8_t fcpCntl3;
+#define  WRITE_DATA      0x01  /* Bit 0 */
+#define  READ_DATA       0x02  /* Bit 1 */
+
+       uint8_t fcpCdb[16];     /* SRB cdb field is copied here */
+       uint32_t fcpDl;         /* Total transfer length */
+
+};
+
+struct lpfc_scsi_buf {
+       struct list_head list;
+       struct scsi_cmnd *pCmd;
+       struct lpfc_hba *scsi_hba;
+       struct lpfc_rport_data *rdata;
+
+       uint32_t timeout;
+
+       uint16_t status;        /* From IOCB Word 7- ulpStatus */
+       uint32_t result;        /* From IOCB Word 4. */
+
+       uint32_t   seg_cnt;     /* Number of scatter-gather segments returned by
+                                * dma_map_sg.  The driver needs this for calls
+                                * to dma_unmap_sg. */
+       dma_addr_t nonsg_phys;  /* Non scatter-gather physical address. */
+
+       /*
+        * data and dma_handle are the kernel virutal and bus address of the
+        * dma-able buffer containing the fcp_cmd, fcp_rsp and a scatter
+        * gather bde list that supports the sg_tablesize value.
+        */
+       void *data;
+       dma_addr_t dma_handle;
+
+       struct fcp_cmnd *fcp_cmnd;
+       struct fcp_rsp *fcp_rsp;
+       struct ulp_bde64 *fcp_bpl;
+
+       /* cur_iocbq has phys of the dma-able buffer.
+        * Iotag is in here
+        */
+       struct lpfc_iocbq cur_iocbq;
+};
+
+#define LPFC_SCSI_DMA_EXT_SIZE 264
+#define LPFC_BPL_SIZE          1024
+
+#define MDAC_DIRECT_CMD                  0x22
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
new file mode 100644 (file)
index 0000000..8d14b28
--- /dev/null
@@ -0,0 +1,2885 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Enterprise Fibre Channel Host Bus Adapters.                     *
+ * Refer to the README file included with this package for         *
+ * driver version and adapter support.                             *
+ * Copyright (C) 2004 Emulex Corporation.                          *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of the GNU General Public License     *
+ * as published by the Free Software Foundation; either version 2  *
+ * of the License, or (at your option) any later version.          *
+ *                                                                 *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of  *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the   *
+ * GNU General Public License for more details, a copy of which    *
+ * can be found in the file COPYING included with this package.    *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_sli.c 1.232 2005/04/13 11:59:16EDT sf_support Exp  $
+ */
+
+#include <linux/blkdev.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_crtn.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_compat.h"
+
+/*
+ * Define macro to log: Mailbox command x%x cannot issue Data
+ * This allows multiple uses of lpfc_msgBlk0311
+ * w/o perturbing log msg utility.
+ */
+#define LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) \
+                       lpfc_printf_log(phba, \
+                               KERN_INFO, \
+                               LOG_MBOX | LOG_SLI, \
+                               "%d:0311 Mailbox command x%x cannot issue " \
+                               "Data: x%x x%x x%x\n", \
+                               phba->brd_no, \
+                               mb->mbxCommand,         \
+                               phba->hba_state,        \
+                               psli->sli_flag, \
+                               flag);
+
+
+/* There are only four IOCB completion types. */
+typedef enum _lpfc_iocb_type {
+       LPFC_UNKNOWN_IOCB,
+       LPFC_UNSOL_IOCB,
+       LPFC_SOL_IOCB,
+       LPFC_ABORT_IOCB
+} lpfc_iocb_type;
+
+/*
+ * Translate the iocb command to an iocb command type used to decide the final
+ * disposition of each completed IOCB.
+ */
+static lpfc_iocb_type
+lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
+{
+       lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
+
+       if (iocb_cmnd > CMD_MAX_IOCB_CMD)
+               return 0;
+
+       switch (iocb_cmnd) {
+       case CMD_XMIT_SEQUENCE_CR:
+       case CMD_XMIT_SEQUENCE_CX:
+       case CMD_XMIT_BCAST_CN:
+       case CMD_XMIT_BCAST_CX:
+       case CMD_ELS_REQUEST_CR:
+       case CMD_ELS_REQUEST_CX:
+       case CMD_CREATE_XRI_CR:
+       case CMD_CREATE_XRI_CX:
+       case CMD_GET_RPI_CN:
+       case CMD_XMIT_ELS_RSP_CX:
+       case CMD_GET_RPI_CR:
+       case CMD_FCP_IWRITE_CR:
+       case CMD_FCP_IWRITE_CX:
+       case CMD_FCP_IREAD_CR:
+       case CMD_FCP_IREAD_CX:
+       case CMD_FCP_ICMND_CR:
+       case CMD_FCP_ICMND_CX:
+       case CMD_ADAPTER_MSG:
+       case CMD_ADAPTER_DUMP:
+       case CMD_XMIT_SEQUENCE64_CR:
+       case CMD_XMIT_SEQUENCE64_CX:
+       case CMD_XMIT_BCAST64_CN:
+       case CMD_XMIT_BCAST64_CX:
+       case CMD_ELS_REQUEST64_CR:
+       case CMD_ELS_REQUEST64_CX:
+       case CMD_FCP_IWRITE64_CR:
+       case CMD_FCP_IWRITE64_CX:
+       case CMD_FCP_IREAD64_CR:
+       case CMD_FCP_IREAD64_CX:
+       case CMD_FCP_ICMND64_CR:
+       case CMD_FCP_ICMND64_CX:
+       case CMD_GEN_REQUEST64_CR:
+       case CMD_GEN_REQUEST64_CX:
+       case CMD_XMIT_ELS_RSP64_CX:
+               type = LPFC_SOL_IOCB;
+               break;
+       case CMD_ABORT_XRI_CN:
+       case CMD_ABORT_XRI_CX:
+       case CMD_CLOSE_XRI_CN:
+       case CMD_CLOSE_XRI_CX:
+       case CMD_XRI_ABORTED_CX:
+       case CMD_ABORT_MXRI64_CN:
+               type = LPFC_ABORT_IOCB;
+               break;
+       case CMD_RCV_SEQUENCE_CX:
+       case CMD_RCV_ELS_REQ_CX:
+       case CMD_RCV_SEQUENCE64_CX:
+       case CMD_RCV_ELS_REQ64_CX:
+               type = LPFC_UNSOL_IOCB;
+               break;
+       default:
+               type = LPFC_UNKNOWN_IOCB;
+               break;
+       }
+
+       return type;
+}
+
+static int
+lpfc_sli_ring_map(struct lpfc_hba * phba, LPFC_MBOXQ_t *pmb)
+{
+       struct lpfc_sli *psli = &phba->sli;
+       MAILBOX_t *pmbox = &pmb->mb;
+       int i, rc;
+
+       for (i = 0; i < psli->num_rings; i++) {
+               phba->hba_state = LPFC_INIT_MBX_CMDS;
+               lpfc_config_ring(phba, i, pmb);
+               rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+               if (rc != MBX_SUCCESS) {
+                       lpfc_printf_log(phba,
+                                       KERN_ERR,
+                                       LOG_INIT,
+                                       "%d:0446 Adapter failed to init, "
+                                       "mbxCmd x%x CFG_RING, mbxStatus x%x, "
+                                       "ring %d\n",
+                                       phba->brd_no,
+                                       pmbox->mbxCommand,
+                                       pmbox->mbxStatus,
+                                       i);
+                       phba->hba_state = LPFC_HBA_ERROR;
+                       return -ENXIO;
+               }
+       }
+       return 0;
+}
+
+static int
+lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba,
+                       struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocb)
+{
+       uint16_t iotag;
+
+       list_add_tail(&piocb->list, &pring->txcmplq);
+       pring->txcmplq_cnt++;
+       if (unlikely(pring->ringno == LPFC_ELS_RING))
+               mod_timer(&phba->els_tmofunc,
+                                       jiffies + HZ * (phba->fc_ratov << 1));
+
+       if (pring->fast_lookup) {
+               /* Setup fast lookup based on iotag for completion */
+               iotag = piocb->iocb.ulpIoTag;
+               if (iotag && (iotag < pring->fast_iotag))
+                       *(pring->fast_lookup + iotag) = piocb;
+               else {
+
+                       /* Cmd ring <ringno> put: iotag <iotag> greater then
+                          configured max <fast_iotag> wd0 <icmd> */
+                       lpfc_printf_log(phba,
+                                       KERN_ERR,
+                                       LOG_SLI,
+                                       "%d:0316 Cmd ring %d put: iotag x%x "
+                                       "greater then configured max x%x "
+                                       "wd0 x%x\n",
+                                       phba->brd_no,
+                                       pring->ringno, iotag,
+                                       pring->fast_iotag,
+                                       *(((uint32_t *)(&piocb->iocb)) + 7));
+               }
+       }
+       return (0);
+}
+
+static struct lpfc_iocbq *
+lpfc_sli_ringtx_get(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
+{
+       struct list_head *dlp;
+       struct lpfc_iocbq *cmd_iocb;
+
+       dlp = &pring->txq;
+       cmd_iocb = NULL;
+       list_remove_head((&pring->txq), cmd_iocb,
+                        struct lpfc_iocbq,
+                        list);
+       if (cmd_iocb) {
+               /* If the first ptr is not equal to the list header,
+                * deque the IOCBQ_t and return it.
+                */
+               pring->txq_cnt--;
+       }
+       return (cmd_iocb);
+}
+
+static IOCB_t *
+lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+       MAILBOX_t *mbox = (MAILBOX_t *)phba->sli.MBhostaddr;
+       PGP *pgp = (PGP *)&mbox->us.s2.port[pring->ringno];
+       uint32_t  max_cmd_idx = pring->numCiocb;
+       IOCB_t *iocb = NULL;
+
+       if ((pring->next_cmdidx == pring->cmdidx) &&
+          (++pring->next_cmdidx >= max_cmd_idx))
+               pring->next_cmdidx = 0;
+
+       if (unlikely(pring->local_getidx == pring->next_cmdidx)) {
+
+               pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
+
+               if (unlikely(pring->local_getidx >= max_cmd_idx)) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                                       "%d:0315 Ring %d issue: portCmdGet %d "
+                                       "is bigger then cmd ring %d\n",
+                                       phba->brd_no, pring->ringno,
+                                       pring->local_getidx, max_cmd_idx);
+
+                       phba->hba_state = LPFC_HBA_ERROR;
+                       /*
+                        * All error attention handlers are posted to
+                        * worker thread
+                        */
+                       phba->work_ha |= HA_ERATT;
+                       phba->work_hs = HS_FFER3;
+                       if (phba->work_wait)
+                               wake_up(phba->work_wait);
+
+                       return NULL;
+               }
+
+               if (pring->local_getidx == pring->next_cmdidx)
+                       return NULL;
+       }
+
+       iocb = IOCB_ENTRY(pring->cmdringaddr, pring->cmdidx);
+
+       return iocb;
+}
+
+static uint32_t
+lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
+{
+       uint32_t search_start;
+
+       if (pring->fast_lookup == NULL) {
+               pring->iotag_ctr++;
+               if (pring->iotag_ctr >= pring->iotag_max)
+                       pring->iotag_ctr = 1;
+               return pring->iotag_ctr;
+       }
+
+       search_start = pring->iotag_ctr;
+
+       do {
+               pring->iotag_ctr++;
+               if (pring->iotag_ctr >= pring->fast_iotag)
+                       pring->iotag_ctr = 1;
+
+               if (*(pring->fast_lookup + pring->iotag_ctr) == NULL)
+                       return pring->iotag_ctr;
+
+       } while (pring->iotag_ctr != search_start);
+
+       /*
+        * Outstanding I/O count for ring <ringno> is at max <fast_iotag>
+        */
+       lpfc_printf_log(phba,
+               KERN_ERR,
+               LOG_SLI,
+               "%d:0318 Outstanding I/O count for ring %d is at max x%x\n",
+               phba->brd_no,
+               pring->ringno,
+               pring->fast_iotag);
+       return (0);
+}
+
+static void
+lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+               IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
+{
+       /*
+        * Allocate and set up an iotag
+        */
+       nextiocb->iocb.ulpIoTag =
+               lpfc_sli_next_iotag(phba, &phba->sli.ring[phba->sli.fcp_ring]);
+
+       /*
+        * Issue iocb command to adapter
+        */
+       lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, sizeof (IOCB_t));
+       wmb();
+       pring->stats.iocb_cmd++;
+
+       /*
+        * If there is no completion routine to call, we can release the
+        * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
+        * that have no rsp ring completion, iocb_cmpl MUST be NULL.
+        */
+       if (nextiocb->iocb_cmpl)
+               lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
+       else {
+               list_add_tail(&nextiocb->list, &phba->lpfc_iocb_list);
+       }
+
+       /*
+        * Let the HBA know what IOCB slot will be the next one the
+        * driver will put a command into.
+        */
+       pring->cmdidx = pring->next_cmdidx;
+       writeb(pring->cmdidx, phba->MBslimaddr
+              + (SLIMOFF + (pring->ringno * 2)) * 4);
+}
+
+static void
+lpfc_sli_update_full_ring(struct lpfc_hba * phba,
+                         struct lpfc_sli_ring *pring)
+{
+       int ringno = pring->ringno;
+
+       pring->flag |= LPFC_CALL_RING_AVAILABLE;
+
+       wmb();
+
+       /*
+        * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
+        * The HBA will tell us when an IOCB entry is available.
+        */
+       writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
+       readl(phba->CAregaddr); /* flush */
+
+       pring->stats.iocb_cmd_full++;
+}
+
+static void
+lpfc_sli_update_ring(struct lpfc_hba * phba,
+                    struct lpfc_sli_ring *pring)
+{
+       int ringno = pring->ringno;
+
+       /*
+        * Tell the HBA that there is work to do in this ring.
+        */
+       wmb();
+       writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
+       readl(phba->CAregaddr); /* flush */
+}
+
+static void
+lpfc_sli_resume_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
+{
+       IOCB_t *iocb;
+       struct lpfc_iocbq *nextiocb;
+
+       /*
+        * Check to see if:
+        *  (a) there is anything on the txq to send
+        *  (b) link is up
+        *  (c) link attention events can be processed (fcp ring only)
+        *  (d) IOCB processing is not blocked by the outstanding mbox command.
+        */
+       if (pring->txq_cnt &&
+           (phba->hba_state > LPFC_LINK_DOWN) &&
+           (pring->ringno != phba->sli.fcp_ring ||
+            phba->sli.sli_flag & LPFC_PROCESS_LA) &&
+           !(pring->flag & LPFC_STOP_IOCB_MBX)) {
+
+               while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
+                      (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
+                       lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
+
+               if (iocb)
+                       lpfc_sli_update_ring(phba, pring);
+               else
+                       lpfc_sli_update_full_ring(phba, pring);
+       }
+
+       return;
+}
+
+/* lpfc_sli_turn_on_ring is only called by lpfc_sli_handle_mb_event below */
+static void
+lpfc_sli_turn_on_ring(struct lpfc_hba * phba, int ringno)
+{
+       PGP *pgp =
+               ((PGP *) &
+                (((MAILBOX_t *)phba->sli.MBhostaddr)->us.s2.port[ringno]));
+
+       /* If the ring is active, flag it */
+       if (phba->sli.ring[ringno].cmdringaddr) {
+               if (phba->sli.ring[ringno].flag & LPFC_STOP_IOCB_MBX) {
+                       phba->sli.ring[ringno].flag &= ~LPFC_STOP_IOCB_MBX;
+                       /*
+                        * Force update of the local copy of cmdGetInx
+                        */
+                       phba->sli.ring[ringno].local_getidx
+                               = le32_to_cpu(pgp->cmdGetInx);
+                       spin_lock_irq(phba->host->host_lock);
+                       lpfc_sli_resume_iocb(phba, &phba->sli.ring[ringno]);
+                       spin_unlock_irq(phba->host->host_lock);
+               }
+       }
+}
+
+static int
+lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
+{
+       uint8_t ret;
+
+       switch (mbxCommand) {
+       case MBX_LOAD_SM:
+       case MBX_READ_NV:
+       case MBX_WRITE_NV:
+       case MBX_RUN_BIU_DIAG:
+       case MBX_INIT_LINK:
+       case MBX_DOWN_LINK:
+       case MBX_CONFIG_LINK:
+       case MBX_CONFIG_RING:
+       case MBX_RESET_RING:
+       case MBX_READ_CONFIG:
+       case MBX_READ_RCONFIG:
+       case MBX_READ_SPARM:
+       case MBX_READ_STATUS:
+       case MBX_READ_RPI:
+       case MBX_READ_XRI:
+       case MBX_READ_REV:
+       case MBX_READ_LNK_STAT:
+       case MBX_REG_LOGIN:
+       case MBX_UNREG_LOGIN:
+       case MBX_READ_LA:
+       case MBX_CLEAR_LA:
+       case MBX_DUMP_MEMORY:
+       case MBX_DUMP_CONTEXT:
+       case MBX_RUN_DIAGS:
+       case MBX_RESTART:
+       case MBX_UPDATE_CFG:
+       case MBX_DOWN_LOAD:
+       case MBX_DEL_LD_ENTRY:
+       case MBX_RUN_PROGRAM:
+       case MBX_SET_MASK:
+       case MBX_SET_SLIM:
+       case MBX_UNREG_D_ID:
+       case MBX_CONFIG_FARP:
+       case MBX_LOAD_AREA:
+       case MBX_RUN_BIU_DIAG64:
+       case MBX_CONFIG_PORT:
+       case MBX_READ_SPARM64:
+       case MBX_READ_RPI64:
+       case MBX_REG_LOGIN64:
+       case MBX_READ_LA64:
+       case MBX_FLASH_WR_ULA:
+       case MBX_SET_DEBUG:
+       case MBX_LOAD_EXP_ROM:
+               ret = mbxCommand;
+               break;
+       default:
+               ret = MBX_SHUTDOWN;
+               break;
+       }
+       return (ret);
+}
+static void
+lpfc_sli_wake_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
+{
+       wait_queue_head_t *pdone_q;
+
+       /*
+        * If pdone_q is empty, the driver thread gave up waiting and
+        * continued running.
+        */
+       pdone_q = (wait_queue_head_t *) pmboxq->context1;
+       if (pdone_q)
+               wake_up_interruptible(pdone_q);
+       return;
+}
+
+void
+lpfc_sli_def_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+       struct lpfc_dmabuf *mp;
+       mp = (struct lpfc_dmabuf *) (pmb->context1);
+       if (mp) {
+               lpfc_mbuf_free(phba, mp->virt, mp->phys);
+               kfree(mp);
+       }
+       mempool_free( pmb, phba->mbox_mem_pool);
+       return;
+}
+
+int
+lpfc_sli_handle_mb_event(struct lpfc_hba * phba)
+{
+       MAILBOX_t *mbox;
+       MAILBOX_t *pmbox;
+       LPFC_MBOXQ_t *pmb;
+       struct lpfc_sli *psli;
+       int i, rc;
+       uint32_t process_next;
+
+       psli = &phba->sli;
+       /* We should only get here if we are in SLI2 mode */
+       if (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE)) {
+               return (1);
+       }
+
+       phba->sli.slistat.mbox_event++;
+
+       /* Get a Mailbox buffer to setup mailbox commands for callback */
+       if ((pmb = phba->sli.mbox_active)) {
+               pmbox = &pmb->mb;
+               mbox = (MAILBOX_t *) phba->sli.MBhostaddr;
+
+               /* First check out the status word */
+               lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof (uint32_t));
+
+               /* Sanity check to ensure the host owns the mailbox */
+               if (pmbox->mbxOwner != OWN_HOST) {
+                       /* Lets try for a while */
+                       for (i = 0; i < 10240; i++) {
+                               /* First copy command data */
+                               lpfc_sli_pcimem_bcopy(mbox, pmbox,
+                                                       sizeof (uint32_t));
+                               if (pmbox->mbxOwner == OWN_HOST)
+                                       goto mbout;
+                       }
+                       /* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus
+                          <status> */
+                       lpfc_printf_log(phba,
+                                       KERN_ERR,
+                                       LOG_MBOX | LOG_SLI,
+                                       "%d:0304 Stray Mailbox Interrupt "
+                                       "mbxCommand x%x mbxStatus x%x\n",
+                                       phba->brd_no,
+                                       pmbox->mbxCommand,
+                                       pmbox->mbxStatus);
+
+                       spin_lock_irq(phba->host->host_lock);
+                       phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
+                       spin_unlock_irq(phba->host->host_lock);
+                       return (1);
+               }
+
+             mbout:
+               del_timer_sync(&phba->sli.mbox_tmo);
+               phba->work_hba_events &= ~WORKER_MBOX_TMO;
+
+               /*
+                * It is a fatal error if unknown mbox command completion.
+                */
+               if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
+                   MBX_SHUTDOWN) {
+
+                       /* Unknow mailbox command compl */
+                       lpfc_printf_log(phba,
+                               KERN_ERR,
+                               LOG_MBOX | LOG_SLI,
+                               "%d:0323 Unknown Mailbox command %x Cmpl\n",
+                               phba->brd_no,
+                               pmbox->mbxCommand);
+                       phba->hba_state = LPFC_HBA_ERROR;
+                       phba->work_hs = HS_FFER3;
+                       lpfc_handle_eratt(phba);
+                       return (0);
+               }
+
+               phba->sli.mbox_active = NULL;
+               if (pmbox->mbxStatus) {
+                       phba->sli.slistat.mbox_stat_err++;
+                       if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
+                               /* Mbox cmd cmpl error - RETRYing */
+                               lpfc_printf_log(phba,
+                                       KERN_INFO,
+                                       LOG_MBOX | LOG_SLI,
+                                       "%d:0305 Mbox cmd cmpl error - "
+                                       "RETRYing Data: x%x x%x x%x x%x\n",
+                                       phba->brd_no,
+                                       pmbox->mbxCommand,
+                                       pmbox->mbxStatus,
+                                       pmbox->un.varWords[0],
+                                       phba->hba_state);
+                               pmbox->mbxStatus = 0;
+                               pmbox->mbxOwner = OWN_HOST;
+                               spin_lock_irq(phba->host->host_lock);
+                               phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+                               spin_unlock_irq(phba->host->host_lock);
+                               rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+                               if (rc == MBX_SUCCESS)
+                                       return (0);
+                       }
+               }
+
+               /* Mailbox cmd <cmd> Cmpl <cmpl> */
+               lpfc_printf_log(phba,
+                               KERN_INFO,
+                               LOG_MBOX | LOG_SLI,
+                               "%d:0307 Mailbox cmd x%x Cmpl x%p "
+                               "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
+                               phba->brd_no,
+                               pmbox->mbxCommand,
+                               pmb->mbox_cmpl,
+                               *((uint32_t *) pmbox),
+                               pmbox->un.varWords[0],
+                               pmbox->un.varWords[1],
+                               pmbox->un.varWords[2],
+                               pmbox->un.varWords[3],
+                               pmbox->un.varWords[4],
+                               pmbox->un.varWords[5],
+                               pmbox->un.varWords[6],
+                               pmbox->un.varWords[7]);
+
+               if (pmb->mbox_cmpl) {
+                       lpfc_sli_pcimem_bcopy(mbox, pmbox, MAILBOX_CMD_SIZE);
+                       pmb->mbox_cmpl(phba,pmb);
+               }
+       }
+
+
+       do {
+               process_next = 0;       /* by default don't loop */
+               spin_lock_irq(phba->host->host_lock);
+               phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+
+               /* Process next mailbox command if there is one */
+               if ((pmb = lpfc_mbox_get(phba))) {
+                       spin_unlock_irq(phba->host->host_lock);
+                       rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+                       if (rc == MBX_NOT_FINISHED) {
+                               pmb->mb.mbxStatus = MBX_NOT_FINISHED;
+                               pmb->mbox_cmpl(phba,pmb);
+                               process_next = 1;
+                               continue;       /* loop back */
+                       }
+               } else {
+                       spin_unlock_irq(phba->host->host_lock);
+                       /* Turn on IOCB processing */
+                       for (i = 0; i < phba->sli.num_rings; i++) {
+                               lpfc_sli_turn_on_ring(phba, i);
+                       }
+
+                       /* Free any lpfc_dmabuf's waiting for mbox cmd cmpls */
+                       while (!list_empty(&phba->freebufList)) {
+                               struct lpfc_dmabuf *mp;
+
+                               mp = NULL;
+                               list_remove_head((&phba->freebufList),
+                                                mp,
+                                                struct lpfc_dmabuf,
+                                                list);
+                               if (mp) {
+                                       lpfc_mbuf_free(phba, mp->virt,
+                                                      mp->phys);
+                                       kfree(mp);
+                               }
+                       }
+               }
+
+       } while (process_next);
+
+       return (0);
+}
+static int
+lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+                           struct lpfc_iocbq *saveq)
+{
+       IOCB_t           * irsp;
+       WORD5            * w5p;
+       uint32_t           Rctl, Type;
+       uint32_t           match, i;
+
+       match = 0;
+       irsp = &(saveq->iocb);
+       if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX)
+           || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX)) {
+               Rctl = FC_ELS_REQ;
+               Type = FC_ELS_DATA;
+       } else {
+               w5p =
+                   (WORD5 *) & (saveq->iocb.un.
+                                ulpWord[5]);
+               Rctl = w5p->hcsw.Rctl;
+               Type = w5p->hcsw.Type;
+
+               /* Firmware Workaround */
+               if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
+                       (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX)) {
+                       Rctl = FC_ELS_REQ;
+                       Type = FC_ELS_DATA;
+                       w5p->hcsw.Rctl = Rctl;
+                       w5p->hcsw.Type = Type;
+               }
+       }
+       /* unSolicited Responses */
+       if (pring->prt[0].profile) {
+               (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, saveq);
+               match = 1;
+       } else {
+               /* We must search, based on rctl / type
+                  for the right routine */
+               for (i = 0; i < pring->num_mask;
+                    i++) {
+                       if ((pring->prt[i].rctl ==
+                            Rctl)
+                           && (pring->prt[i].
+                               type == Type)) {
+                               (pring->prt[i].lpfc_sli_rcv_unsol_event)
+                                       (phba, pring, saveq);
+                               match = 1;
+                               break;
+                       }
+               }
+       }
+       if (match == 0) {
+               /* Unexpected Rctl / Type received */
+               /* Ring <ringno> handler: unexpected
+                  Rctl <Rctl> Type <Type> received */
+               lpfc_printf_log(phba,
+                               KERN_WARNING,
+                               LOG_SLI,
+                               "%d:0313 Ring %d handler: unexpected Rctl x%x "
+                               "Type x%x received \n",
+                               phba->brd_no,
+                               pring->ringno,
+                               Rctl,
+                               Type);
+       }
+       return(1);
+}
+
+static struct lpfc_iocbq *
+lpfc_sli_txcmpl_ring_search_slow(struct lpfc_sli_ring * pring,
+                                struct lpfc_iocbq * prspiocb)
+{
+       IOCB_t *icmd = NULL;
+       IOCB_t *irsp = NULL;
+       struct lpfc_iocbq *cmd_iocb;
+       struct lpfc_iocbq *iocb, *next_iocb;
+       uint16_t iotag;
+
+       irsp = &prspiocb->iocb;
+       iotag = irsp->ulpIoTag;
+       cmd_iocb = NULL;
+
+       /* Search through txcmpl from the begining */
+       list_for_each_entry_safe(iocb, next_iocb, &(pring->txcmplq), list) {
+               icmd = &iocb->iocb;
+               if (iotag == icmd->ulpIoTag) {
+                       /* Found a match.  */
+                       cmd_iocb = iocb;
+                       list_del(&iocb->list);
+                       pring->txcmplq_cnt--;
+                       break;
+               }
+       }
+
+       return (cmd_iocb);
+}
+
+static struct lpfc_iocbq *
+lpfc_sli_txcmpl_ring_iotag_lookup(struct lpfc_hba * phba,
+                       struct lpfc_sli_ring * pring,
+                       struct lpfc_iocbq * prspiocb)
+{
+       IOCB_t *irsp = NULL;
+       struct lpfc_iocbq *cmd_iocb = NULL;
+       uint16_t iotag;
+
+       if (unlikely(pring->fast_lookup == NULL))
+               return NULL;
+
+       /* Use fast lookup based on iotag for completion */
+       irsp = &prspiocb->iocb;
+       iotag = irsp->ulpIoTag;
+       if (iotag < pring->fast_iotag) {
+               cmd_iocb = *(pring->fast_lookup + iotag);
+               *(pring->fast_lookup + iotag) = NULL;
+               if (cmd_iocb) {
+                       list_del(&cmd_iocb->list);
+                       pring->txcmplq_cnt--;
+                       return cmd_iocb;
+               } else {
+                       /*
+                        * This is clearly an error.  A ring that uses iotags
+                        * should never have a interrupt for a completion that
+                        * is not on the ring.  Return NULL and log a error.
+                        */
+                       lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "%d:0327 Rsp ring %d error -  command "
+                               "completion for iotag x%x not found\n",
+                               phba->brd_no, pring->ringno, iotag);
+                       return NULL;
+               }
+       }
+
+       /*
+        * Rsp ring <ringno> get: iotag <iotag> greater then
+        * configured max <fast_iotag> wd0 <irsp>.  This is an
+        * error.  Just return NULL.
+        */
+       lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                       "%d:0317 Rsp ring %d get: iotag x%x greater then "
+                       "configured max x%x wd0 x%x\n",
+                       phba->brd_no, pring->ringno, iotag, pring->fast_iotag,
+                       *(((uint32_t *) irsp) + 7));
+       return NULL;
+}
+
+static int
+lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
+                         struct lpfc_iocbq *saveq)
+{
+       struct lpfc_iocbq * cmdiocbp;
+       int rc = 1;
+       unsigned long iflag;
+
+       /* Based on the iotag field, get the cmd IOCB from the txcmplq */
+       spin_lock_irqsave(phba->host->host_lock, iflag);
+       cmdiocbp = lpfc_sli_txcmpl_ring_search_slow(pring, saveq);
+       if (cmdiocbp) {
+               if (cmdiocbp->iocb_cmpl) {
+                       /*
+                        * Post all ELS completions to the worker thread.
+                        * All other are passed to the completion callback.
+                        */
+                       if (pring->ringno == LPFC_ELS_RING) {
+                               spin_unlock_irqrestore(phba->host->host_lock,
+                                                      iflag);
+                               (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
+                               spin_lock_irqsave(phba->host->host_lock, iflag);
+                       }
+                       else {
+                               if (cmdiocbp->iocb_flag & LPFC_IO_POLL)
+                                       rc = 0;
+
+                               spin_unlock_irqrestore(phba->host->host_lock,
+                                                      iflag);
+                               (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
+                               spin_lock_irqsave(phba->host->host_lock, iflag);
+                       }
+               } else {
+                       list_add_tail(&cmdiocbp->list, &phba->lpfc_iocb_list);
+               }
+       } else {
+               /*
+                * Unknown initiating command based on the response iotag.
+                * This could be the case on the ELS ring because of
+                * lpfc_els_abort().
+                */
+               if (pring->ringno != LPFC_ELS_RING) {
+                       /*
+                        * Ring <ringno> handler: unexpected completion IoTag
+                        * <IoTag>
+                        */
+                       lpfc_printf_log(phba,
+                               KERN_WARNING,
+                               LOG_SLI,
+                               "%d:0322 Ring %d handler: unexpected "
+                               "completion IoTag x%x Data: x%x x%x x%x x%x\n",
+                               phba->brd_no,
+                               pring->ringno,
+                               saveq->iocb.ulpIoTag,
+                               saveq->iocb.ulpStatus,
+                               saveq->iocb.un.ulpWord[4],
+                               saveq->iocb.ulpCommand,
+                               saveq->iocb.ulpContext);
+               }
+       }
+       spin_unlock_irqrestore(phba->host->host_lock, iflag);
+       return rc;
+}
+
+/*
+ * This routine presumes LPFC_FCP_RING handling and doesn't bother
+ * to check it explicitly.
+ */
+static int
+lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
+                               struct lpfc_sli_ring * pring, uint32_t mask)
+{
+       IOCB_t *irsp = NULL;
+       struct lpfc_iocbq *cmdiocbq = NULL;
+       struct lpfc_iocbq rspiocbq;
+       PGP *pgp;
+       uint32_t status;
+       uint32_t portRspPut, portRspMax;
+       int rc = 1;
+       lpfc_iocb_type type;
+       unsigned long iflag;
+       uint32_t rsp_cmpl = 0;
+       void __iomem  *to_slim;
+
+       spin_lock_irqsave(phba->host->host_lock, iflag);
+       pring->stats.iocb_event++;
+
+       /* The driver assumes SLI-2 mode */
+       pgp = (PGP *) &((MAILBOX_t *) phba->sli.MBhostaddr)
+               ->us.s2.port[pring->ringno];
+
+       /*
+        * The next available response entry should never exceed the maximum
+        * entries.  If it does, treat it as an adapter hardware error.
+        */
+       portRspMax = pring->numRiocb;
+       portRspPut = le32_to_cpu(pgp->rspPutInx);
+       if (unlikely(portRspPut >= portRspMax)) {
+               /*
+                * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
+                * rsp ring <portRspMax>
+                */
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "%d:0312 Ring %d handler: portRspPut %d "
+                               "is bigger then rsp ring %d\n",
+                               phba->brd_no, pring->ringno, portRspPut,
+                               portRspMax);
+
+               phba->hba_state = LPFC_HBA_ERROR;
+
+               /* All error attention handlers are posted to worker thread */
+               phba->work_ha |= HA_ERATT;
+               phba->work_hs = HS_FFER3;
+               if (phba->work_wait)
+                       wake_up(phba->work_wait);
+
+               spin_unlock_irqrestore(phba->host->host_lock, iflag);
+               return 1;
+       }
+
+       rmb();
+       while (pring->rspidx != portRspPut) {
+               irsp = (IOCB_t *) IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
+               type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
+               pring->stats.iocb_rsp++;
+               rsp_cmpl++;
+
+               if (unlikely(irsp->ulpStatus)) {
+                       /* Rsp ring <ringno> error: IOCB */
+                       lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+                               "%d:0326 Rsp Ring %d error: IOCB Data: "
+                               "x%x x%x x%x x%x x%x x%x x%x x%x\n",
+                               phba->brd_no, pring->ringno,
+                               irsp->un.ulpWord[0], irsp->un.ulpWord[1],
+                               irsp->un.ulpWord[2], irsp->un.ulpWord[3],
+                               irsp->un.ulpWord[4], irsp->un.ulpWord[5],
+                               *(((uint32_t *) irsp) + 6),
+                               *(((uint32_t *) irsp) + 7));
+               }
+
+               switch (type) {
+               case LPFC_ABORT_IOCB:
+               case LPFC_SOL_IOCB:
+                       /*
+                        * Idle exchange closed via ABTS from port.  No iocb
+                        * resources need to be recovered.
+                        */
+                       if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
+                               printk(KERN_INFO "%s: IOCB cmd 0x%x processed. "
+                                      "Skipping completion\n", __FUNCTION__,
+                                      irsp->ulpCommand);
+                               break;
+                       }
+
+                       rspiocbq.iocb.un.ulpWord[4] = irsp->un.ulpWord[4];
+                       rspiocbq.iocb.ulpStatus = irsp->ulpStatus;
+                       rspiocbq.iocb.ulpContext = irsp->ulpContext;
+                       rspiocbq.iocb.ulpIoTag = irsp->ulpIoTag;
+                       cmdiocbq = lpfc_sli_txcmpl_ring_iotag_lookup(phba,
+                                                               pring,
+                                                               &rspiocbq);
+                       if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
+                               spin_unlock_irqrestore(
+                                      phba->host->host_lock, iflag);
+                               (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
+                                                     &rspiocbq);
+                               spin_lock_irqsave(phba->host->host_lock,
+                                                 iflag);
+                       }
+                       break;
+               default:
+                       if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
+                               char adaptermsg[LPFC_MAX_ADPTMSG];
+                               memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
+                               memcpy(&adaptermsg[0], (uint8_t *) irsp,
+                                      MAX_MSG_DATA);
+                               dev_warn(&((phba->pcidev)->dev), "lpfc%d: %s",
+                                        phba->brd_no, adaptermsg);
+                       } else {
+                               /* Unknown IOCB command */
+                               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                                       "%d:0321 Unknown IOCB command "
+                                       "Data: x%x, x%x x%x x%x x%x\n",
+                                       phba->brd_no, type, irsp->ulpCommand,
+                                       irsp->ulpStatus, irsp->ulpIoTag,
+                                       irsp->ulpContext);
+                       }
+                       break;
+               }
+
+               /*
+                * The response IOCB has been processed.  Update the ring
+                * pointer in SLIM.  If the port response put pointer has not
+                * been updated, sync the pgp->rspPutInx and fetch the new port
+                * response put pointer.
+                */
+               if (++pring->rspidx >= portRspMax)
+                       pring->rspidx = 0;
+
+               to_slim = phba->MBslimaddr +
+                       (SLIMOFF + (pring->ringno * 2) + 1) * 4;
+               writeb(pring->rspidx, to_slim);
+
+               if (pring->rspidx == portRspPut)
+                       portRspPut = le32_to_cpu(pgp->rspPutInx);
+       }
+
+       if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
+               pring->stats.iocb_rsp_full++;
+               status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
+               writel(status, phba->CAregaddr);
+               readl(phba->CAregaddr);
+       }
+       if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
+               pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
+               pring->stats.iocb_cmd_empty++;
+
+               /* Force update of the local copy of cmdGetInx */
+               pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
+               lpfc_sli_resume_iocb(phba, pring);
+
+               if ((pring->lpfc_sli_cmd_available))
+                       (pring->lpfc_sli_cmd_available) (phba, pring);
+
+       }
+
+       spin_unlock_irqrestore(phba->host->host_lock, iflag);
+       return rc;
+}
+
+
+int
+lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
+                          struct lpfc_sli_ring * pring, uint32_t mask)
+{
+       IOCB_t *entry;
+       IOCB_t *irsp = NULL;
+       struct lpfc_iocbq *rspiocbp = NULL;
+       struct lpfc_iocbq *next_iocb;
+       struct lpfc_iocbq *cmdiocbp;
+       struct lpfc_iocbq *saveq;
+       struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
+       HGP *hgp;
+       PGP *pgp;
+       MAILBOX_t *mbox;
+       uint8_t iocb_cmd_type;
+       lpfc_iocb_type type;
+       uint32_t status, free_saveq;
+       uint32_t portRspPut, portRspMax;
+       int rc = 1;
+       unsigned long iflag;
+       void __iomem  *to_slim;
+
+       spin_lock_irqsave(phba->host->host_lock, iflag);
+       pring->stats.iocb_event++;
+
+       /* The driver assumes SLI-2 mode */
+       mbox = (MAILBOX_t *) phba->sli.MBhostaddr;
+       pgp = (PGP *) & mbox->us.s2.port[pring->ringno];
+       hgp = (HGP *) & mbox->us.s2.host[pring->ringno];
+
+       /*
+        * The next available response entry should never exceed the maximum
+        * entries.  If it does, treat it as an adapter hardware error.
+        */
+       portRspMax = pring->numRiocb;
+       portRspPut = le32_to_cpu(pgp->rspPutInx);
+       if (portRspPut >= portRspMax) {
+               /*
+                * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
+                * rsp ring <portRspMax>
+                */
+               lpfc_printf_log(phba,
+                               KERN_ERR,
+                               LOG_SLI,
+                               "%d:0312 Ring %d handler: portRspPut %d "
+                               "is bigger then rsp ring %d\n",
+                               phba->brd_no,
+                               pring->ringno, portRspPut, portRspMax);
+
+               phba->hba_state = LPFC_HBA_ERROR;
+               spin_unlock_irqrestore(phba->host->host_lock, iflag);
+
+               phba->work_hs = HS_FFER3;
+               lpfc_handle_eratt(phba);
+
+               return 1;
+       }
+
+       rmb();
+       lpfc_iocb_list = &phba->lpfc_iocb_list;
+       while (pring->rspidx != portRspPut) {
+               /*
+                * Build a completion list and call the appropriate handler.
+                * The process is to get the next available response iocb, get
+                * a free iocb from the list, copy the response data into the
+                * free iocb, insert to the continuation list, and update the
+                * next response index to slim.  This process makes response
+                * iocb's in the ring available to DMA as fast as possible but
+                * pays a penalty for a copy operation.  Since the iocb is
+                * only 32 bytes, this penalty is considered small relative to
+                * the PCI reads for register values and a slim write.  When
+                * the ulpLe field is set, the entire Command has been
+                * received.
+                */
+               entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
+               list_remove_head(lpfc_iocb_list, rspiocbp, struct lpfc_iocbq,
+                                list);
+               if (rspiocbp == NULL) {
+                       printk(KERN_ERR "%s: out of buffers! Failing "
+                              "completion.\n", __FUNCTION__);
+                       break;
+               }
+
+               lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, sizeof (IOCB_t));
+               irsp = &rspiocbp->iocb;
+
+               if (++pring->rspidx >= portRspMax)
+                       pring->rspidx = 0;
+
+               to_slim = phba->MBslimaddr + (SLIMOFF + (pring->ringno * 2)
+                                             + 1) * 4;
+               writeb(pring->rspidx, to_slim);
+
+               if (list_empty(&(pring->iocb_continueq))) {
+                       list_add(&rspiocbp->list, &(pring->iocb_continueq));
+               } else {
+                       list_add_tail(&rspiocbp->list,
+                                     &(pring->iocb_continueq));
+               }
+
+               pring->iocb_continueq_cnt++;
+               if (irsp->ulpLe) {
+                       /*
+                        * By default, the driver expects to free all resources
+                        * associated with this iocb completion.
+                        */
+                       free_saveq = 1;
+                       saveq = list_get_first(&pring->iocb_continueq,
+                                              struct lpfc_iocbq, list);
+                       irsp = &(saveq->iocb);
+                       list_del_init(&pring->iocb_continueq);
+                       pring->iocb_continueq_cnt = 0;
+
+                       pring->stats.iocb_rsp++;
+
+                       if (irsp->ulpStatus) {
+                               /* Rsp ring <ringno> error: IOCB */
+                               lpfc_printf_log(phba,
+                                       KERN_WARNING,
+                                       LOG_SLI,
+                                       "%d:0328 Rsp Ring %d error: IOCB Data: "
+                                       "x%x x%x x%x x%x x%x x%x x%x x%x\n",
+                                       phba->brd_no,
+                                       pring->ringno,
+                                       irsp->un.ulpWord[0],
+                                       irsp->un.ulpWord[1],
+                                       irsp->un.ulpWord[2],
+                                       irsp->un.ulpWord[3],
+                                       irsp->un.ulpWord[4],
+                                       irsp->un.ulpWord[5],
+                                       *(((uint32_t *) irsp) + 6),
+                                       *(((uint32_t *) irsp) + 7));
+                       }
+
+                       /*
+                        * Fetch the IOCB command type and call the correct
+                        * completion routine.  Solicited and Unsolicited
+                        * IOCBs on the ELS ring get freed back to the
+                        * lpfc_iocb_list by the discovery kernel thread.
+                        */
+                       iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
+                       type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
+                       if (type == LPFC_SOL_IOCB) {
+                               spin_unlock_irqrestore(phba->host->host_lock,
+                                                      iflag);
+                               rc = lpfc_sli_process_sol_iocb(phba, pring,
+                                       saveq);
+                               spin_lock_irqsave(phba->host->host_lock, iflag);
+                       } else if (type == LPFC_UNSOL_IOCB) {
+                               spin_unlock_irqrestore(phba->host->host_lock,
+                                                      iflag);
+                               rc = lpfc_sli_process_unsol_iocb(phba, pring,
+                                       saveq);
+                               spin_lock_irqsave(phba->host->host_lock, iflag);
+                       } else if (type == LPFC_ABORT_IOCB) {
+                               if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) &&
+                                   ((cmdiocbp =
+                                     lpfc_sli_txcmpl_ring_search_slow(pring,
+                                               saveq)))) {
+                                       /* Call the specified completion
+                                          routine */
+                                       if (cmdiocbp->iocb_cmpl) {
+                                               spin_unlock_irqrestore(
+                                                      phba->host->host_lock,
+                                                      iflag);
+                                               (cmdiocbp->iocb_cmpl) (phba,
+                                                            cmdiocbp, saveq);
+                                               spin_lock_irqsave(
+                                                         phba->host->host_lock,
+                                                         iflag);
+                                       } else {
+                                               list_add_tail(&cmdiocbp->list,
+                                                               lpfc_iocb_list);
+                                       }
+                               }
+                       } else if (type == LPFC_UNKNOWN_IOCB) {
+                               if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
+
+                                       char adaptermsg[LPFC_MAX_ADPTMSG];
+
+                                       memset(adaptermsg, 0,
+                                              LPFC_MAX_ADPTMSG);
+                                       memcpy(&adaptermsg[0], (uint8_t *) irsp,
+                                              MAX_MSG_DATA);
+                                       dev_warn(&((phba->pcidev)->dev),
+                                                "lpfc%d: %s",
+                                                phba->brd_no, adaptermsg);
+                               } else {
+                                       /* Unknown IOCB command */
+                                       lpfc_printf_log(phba,
+                                               KERN_ERR,
+                                               LOG_SLI,
+                                               "%d:0321 Unknown IOCB command "
+                                               "Data: x%x x%x x%x x%x\n",
+                                               phba->brd_no,
+                                               irsp->ulpCommand,
+                                               irsp->ulpStatus,
+                                               irsp->ulpIoTag,
+                                               irsp->ulpContext);
+                               }
+                       }
+
+                       if (free_saveq) {
+                               if (!list_empty(&saveq->list)) {
+                                       list_for_each_entry_safe(rspiocbp,
+                                                                next_iocb,
+                                                                &saveq->list,
+                                                                list) {
+                                               list_add_tail(&rspiocbp->list,
+                                                             lpfc_iocb_list);
+                                       }
+                               }
+
+                               list_add_tail(&saveq->list, lpfc_iocb_list);
+                       }
+               }
+
+               /*
+                * If the port response put pointer has not been updated, sync
+                * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
+                * response put pointer.
+                */
+               if (pring->rspidx == portRspPut) {
+                       portRspPut = le32_to_cpu(pgp->rspPutInx);
+               }
+       } /* while (pring->rspidx != portRspPut) */
+
+       if ((rspiocbp != 0) && (mask & HA_R0RE_REQ)) {
+               /* At least one response entry has been freed */
+               pring->stats.iocb_rsp_full++;
+               /* SET RxRE_RSP in Chip Att register */
+               status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
+               writel(status, phba->CAregaddr);
+               readl(phba->CAregaddr); /* flush */
+       }
+       if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
+               pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
+               pring->stats.iocb_cmd_empty++;
+
+               /* Force update of the local copy of cmdGetInx */
+               pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
+               lpfc_sli_resume_iocb(phba, pring);
+
+               if ((pring->lpfc_sli_cmd_available))
+                       (pring->lpfc_sli_cmd_available) (phba, pring);
+
+       }
+
+       spin_unlock_irqrestore(phba->host->host_lock, iflag);
+       return rc;
+}
+
+int
+lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+       struct lpfc_iocbq *iocb, *next_iocb;
+       IOCB_t *icmd = NULL, *cmd = NULL;
+       int errcnt;
+       uint16_t iotag;
+
+       errcnt = 0;
+
+       /* Error everything on txq and txcmplq
+        * First do the txq.
+        */
+       spin_lock_irq(phba->host->host_lock);
+       list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
+               list_del_init(&iocb->list);
+               if (iocb->iocb_cmpl) {
+                       icmd = &iocb->iocb;
+                       icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+                       icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+                       spin_unlock_irq(phba->host->host_lock);
+                       (iocb->iocb_cmpl) (phba, iocb, iocb);
+                       spin_lock_irq(phba->host->host_lock);
+               } else {
+                       list_add_tail(&iocb->list, &phba->lpfc_iocb_list);
+               }
+       }
+       pring->txq_cnt = 0;
+       INIT_LIST_HEAD(&(pring->txq));
+
+       /* Next issue ABTS for everything on the txcmplq */
+       list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
+               cmd = &iocb->iocb;
+
+               /*
+                * Imediate abort of IOCB, clear fast_lookup entry,
+                * if any, deque and call compl
+                */
+               iotag = cmd->ulpIoTag;
+               if (iotag && pring->fast_lookup &&
+                   (iotag < pring->fast_iotag))
+                       pring->fast_lookup[iotag] = NULL;
+
+               list_del_init(&iocb->list);
+               pring->txcmplq_cnt--;
+
+               if (iocb->iocb_cmpl) {
+                       cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+                       cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+                       spin_unlock_irq(phba->host->host_lock);
+                       (iocb->iocb_cmpl) (phba, iocb, iocb);
+                       spin_lock_irq(phba->host->host_lock);
+               } else {
+                       list_add_tail(&iocb->list, &phba->lpfc_iocb_list);
+               }
+       }
+
+       INIT_LIST_HEAD(&pring->txcmplq);
+       pring->txcmplq_cnt = 0;
+       spin_unlock_irq(phba->host->host_lock);
+
+       return errcnt;
+}
+
+/******************************************************************************
+* lpfc_sli_send_reset
+*
+* Note: After returning from this function, the HBA cannot be accessed for
+* 1 ms. Since we do not wish to delay in interrupt context, it is the
+* responsibility of the caller to perform the mdelay(1) and flush via readl().
+******************************************************************************/
+static int
+lpfc_sli_send_reset(struct lpfc_hba * phba, uint16_t skip_post)
+{
+       MAILBOX_t *swpmb;
+       volatile uint32_t word0;
+       void __iomem *to_slim;
+       unsigned long flags = 0;
+
+       spin_lock_irqsave(phba->host->host_lock, flags);
+
+       /* A board reset must use REAL SLIM. */
+       phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE;
+
+       word0 = 0;
+       swpmb = (MAILBOX_t *) & word0;
+       swpmb->mbxCommand = MBX_RESTART;
+       swpmb->mbxHc = 1;
+
+       to_slim = phba->MBslimaddr;
+       writel(*(uint32_t *) swpmb, to_slim);
+       readl(to_slim); /* flush */
+
+       /* Only skip post after fc_ffinit is completed */
+       if (skip_post) {
+               word0 = 1;      /* This is really setting up word1 */
+       } else {
+               word0 = 0;      /* This is really setting up word1 */
+       }
+       to_slim = phba->MBslimaddr + sizeof (uint32_t);
+       writel(*(uint32_t *) swpmb, to_slim);
+       readl(to_slim); /* flush */
+
+       /* Turn off parity checking and serr during the physical reset */
+       pci_read_config_word(phba->pcidev, PCI_COMMAND, &phba->pci_cfg_value);
+       pci_write_config_word(phba->pcidev, PCI_COMMAND,
+                             (phba->pci_cfg_value &
+                              ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
+
+       writel(HC_INITFF, phba->HCregaddr);
+
+       phba->hba_state = LPFC_INIT_START;
+       spin_unlock_irqrestore(phba->host->host_lock, flags);
+
+       return 0;
+}
+
+static int
+lpfc_sli_brdreset(struct lpfc_hba * phba, uint16_t skip_post)
+{
+       struct lpfc_sli_ring *pring;
+       int i;
+       struct lpfc_dmabuf *mp, *next_mp;
+       unsigned long flags = 0;
+
+       lpfc_sli_send_reset(phba, skip_post);
+       mdelay(1);
+
+       spin_lock_irqsave(phba->host->host_lock, flags);
+       /* Risk the write on flush case ie no delay after the readl */
+       readl(phba->HCregaddr); /* flush */
+       /* Now toggle INITFF bit set by lpfc_sli_send_reset */
+       writel(0, phba->HCregaddr);
+       readl(phba->HCregaddr); /* flush */
+
+       /* Restore PCI cmd register */
+       pci_write_config_word(phba->pcidev, PCI_COMMAND, phba->pci_cfg_value);
+
+       /* perform board reset */
+       phba->fc_eventTag = 0;
+       phba->fc_myDID = 0;
+       phba->fc_prevDID = Mask_DID;
+
+       /* Reset HBA */
+       lpfc_printf_log(phba,
+               KERN_INFO,
+               LOG_SLI,
+               "%d:0325 Reset HBA Data: x%x x%x x%x\n",
+               phba->brd_no,
+               phba->hba_state,
+               phba->sli.sli_flag,
+               skip_post);
+
+       /* Initialize relevant SLI info */
+       for (i = 0; i < phba->sli.num_rings; i++) {
+               pring = &phba->sli.ring[i];
+               pring->flag = 0;
+               pring->rspidx = 0;
+               pring->next_cmdidx  = 0;
+               pring->local_getidx = 0;
+               pring->cmdidx = 0;
+               pring->missbufcnt = 0;
+       }
+       spin_unlock_irqrestore(phba->host->host_lock, flags);
+
+       if (skip_post) {
+               mdelay(100);
+       } else {
+               mdelay(2000);
+       }
+
+       spin_lock_irqsave(phba->host->host_lock, flags);
+       /* Cleanup preposted buffers on the ELS ring */
+       pring = &phba->sli.ring[LPFC_ELS_RING];
+       list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
+               list_del(&mp->list);
+               pring->postbufq_cnt--;
+               lpfc_mbuf_free(phba, mp->virt, mp->phys);
+               kfree(mp);
+       }
+       spin_unlock_irqrestore(phba->host->host_lock, flags);
+
+       for (i = 0; i < phba->sli.num_rings; i++)
+               lpfc_sli_abort_iocb_ring(phba, &phba->sli.ring[i]);
+
+       return 0;
+}
+
+static int
+lpfc_sli_chipset_init(struct lpfc_hba *phba)
+{
+       uint32_t status, i = 0;
+
+       /* Read the HBA Host Status Register */
+       status = readl(phba->HSregaddr);
+
+       /* Check status register to see what current state is */
+       i = 0;
+       while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
+
+               /* Check every 100ms for 5 retries, then every 500ms for 5, then
+                * every 2.5 sec for 5, then reset board and every 2.5 sec for
+                * 4.
+                */
+               if (i++ >= 20) {
+                       /* Adapter failed to init, timeout, status reg
+                          <status> */
+                       lpfc_printf_log(phba,
+                                       KERN_ERR,
+                                       LOG_INIT,
+                                       "%d:0436 Adapter failed to init, "
+                                       "timeout, status reg x%x\n",
+                                       phba->brd_no,
+                                       status);
+                       phba->hba_state = LPFC_HBA_ERROR;
+                       return -ETIMEDOUT;
+               }
+
+               /* Check to see if any errors occurred during init */
+               if (status & HS_FFERM) {
+                       /* ERROR: During chipset initialization */
+                       /* Adapter failed to init, chipset, status reg
+                          <status> */
+                       lpfc_printf_log(phba,
+                                       KERN_ERR,
+                                       LOG_INIT,
+                                       "%d:0437 Adapter failed to init, "
+                                       "chipset, status reg x%x\n",
+                                       phba->brd_no,
+                                       status);
+                       phba->hba_state = LPFC_HBA_ERROR;
+                       return -EIO;
+               }
+
+               if (i <= 5) {
+                       msleep(10);
+               } else if (i <= 10) {
+                       msleep(500);
+               } else {
+                       msleep(2500);
+               }
+
+               if (i == 15) {
+                       lpfc_sli_brdreset(phba, 0);
+               }
+               /* Read the HBA Host Status Register */
+               status = readl(phba->HSregaddr);
+       }
+
+       /* Check to see if any errors occurred during init */
+       if (status & HS_FFERM) {
+               /* ERROR: During chipset initialization */
+               /* Adapter failed to init, chipset, status reg <status> */
+               lpfc_printf_log(phba,
+                               KERN_ERR,
+                               LOG_INIT,
+                               "%d:0438 Adapter failed to init, chipset, "
+                               "status reg x%x\n",
+                               phba->brd_no,
+                               status);
+               phba->hba_state = LPFC_HBA_ERROR;
+               return -EIO;
+       }
+
+       /* Clear all interrupt enable conditions */
+       writel(0, phba->HCregaddr);
+       readl(phba->HCregaddr); /* flush */
+
+       /* setup host attn register */
+       writel(0xffffffff, phba->HAregaddr);
+       readl(phba->HAregaddr); /* flush */
+       return 0;
+}
+
+int
+lpfc_sli_hba_setup(struct lpfc_hba * phba)
+{
+       LPFC_MBOXQ_t *pmb;
+       uint32_t resetcount = 0, rc = 0, done = 0;
+
+       pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!pmb) {
+               phba->hba_state = LPFC_HBA_ERROR;
+               return -ENOMEM;
+       }
+
+       while (resetcount < 2 && !done) {
+               phba->hba_state = 0;
+               lpfc_sli_brdreset(phba, 0);
+               msleep(2500);
+               rc = lpfc_sli_chipset_init(phba);
+               if (rc)
+                       break;
+
+               resetcount++;
+
+       /* Call pre CONFIG_PORT mailbox command initialization.  A value of 0
+        * means the call was successful.  Any other nonzero value is a failure,
+        * but if ERESTART is returned, the driver may reset the HBA and try
+        * again.
+        */
+               rc = lpfc_config_port_prep(phba);
+               if (rc == -ERESTART) {
+                       phba->hba_state = 0;
+                       continue;
+               } else if (rc) {
+                       break;
+               }
+
+               phba->hba_state = LPFC_INIT_MBX_CMDS;
+               lpfc_config_port(phba, pmb);
+               rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+               if (rc == MBX_SUCCESS)
+                       done = 1;
+               else {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "%d:0442 Adapter failed to init, mbxCmd x%x "
+                               "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
+                               phba->brd_no, pmb->mb.mbxCommand,
+                               pmb->mb.mbxStatus, 0);
+                       phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE;
+               }
+       }
+       if (!done)
+               goto lpfc_sli_hba_setup_error;
+
+       rc = lpfc_sli_ring_map(phba, pmb);
+
+       if (rc)
+               goto lpfc_sli_hba_setup_error;
+
+       phba->sli.sli_flag |= LPFC_PROCESS_LA;
+
+       rc = lpfc_config_port_post(phba);
+       if (rc)
+               goto lpfc_sli_hba_setup_error;
+
+       goto lpfc_sli_hba_setup_exit;
+lpfc_sli_hba_setup_error:
+       phba->hba_state = LPFC_HBA_ERROR;
+lpfc_sli_hba_setup_exit:
+       mempool_free(pmb, phba->mbox_mem_pool);
+       return rc;
+}
+
+static void
+lpfc_mbox_abort(struct lpfc_hba * phba)
+{
+       LPFC_MBOXQ_t *pmbox;
+       MAILBOX_t *mb;
+
+       if (phba->sli.mbox_active) {
+               del_timer_sync(&phba->sli.mbox_tmo);
+               phba->work_hba_events &= ~WORKER_MBOX_TMO;
+               pmbox = phba->sli.mbox_active;
+               mb = &pmbox->mb;
+               phba->sli.mbox_active = NULL;
+               if (pmbox->mbox_cmpl) {
+                       mb->mbxStatus = MBX_NOT_FINISHED;
+                       (pmbox->mbox_cmpl) (phba, pmbox);
+               }
+               phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+       }
+
+       /* Abort all the non active mailbox commands. */
+       spin_lock_irq(phba->host->host_lock);
+       pmbox = lpfc_mbox_get(phba);
+       while (pmbox) {
+               mb = &pmbox->mb;
+               if (pmbox->mbox_cmpl) {
+                       mb->mbxStatus = MBX_NOT_FINISHED;
+                       spin_unlock_irq(phba->host->host_lock);
+                       (pmbox->mbox_cmpl) (phba, pmbox);
+                       spin_lock_irq(phba->host->host_lock);
+               }
+               pmbox = lpfc_mbox_get(phba);
+       }
+       spin_unlock_irq(phba->host->host_lock);
+       return;
+}
+
+/*! lpfc_mbox_timeout
+ *
+ * \pre
+ * \post
+ * \param hba Pointer to per struct lpfc_hba structure
+ * \param l1  Pointer to the driver's mailbox queue.
+ * \return
+ *   void
+ *
+ * \b Description:
+ *
+ * This routine handles mailbox timeout events at timer interrupt context.
+ */
+void
+lpfc_mbox_timeout(unsigned long ptr)
+{
+       struct lpfc_hba *phba;
+       unsigned long iflag;
+
+       phba = (struct lpfc_hba *)ptr;
+       spin_lock_irqsave(phba->host->host_lock, iflag);
+       if (!(phba->work_hba_events & WORKER_MBOX_TMO)) {
+               phba->work_hba_events |= WORKER_MBOX_TMO;
+               if (phba->work_wait)
+                       wake_up(phba->work_wait);
+       }
+       spin_unlock_irqrestore(phba->host->host_lock, iflag);
+}
+
+void
+lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
+{
+       LPFC_MBOXQ_t *pmbox;
+       MAILBOX_t *mb;
+
+       spin_lock_irq(phba->host->host_lock);
+       if (!(phba->work_hba_events & WORKER_MBOX_TMO)) {
+               spin_unlock_irq(phba->host->host_lock);
+               return;
+       }
+
+       pmbox = phba->sli.mbox_active;
+       mb = &pmbox->mb;
+
+       /* Mbox cmd <mbxCommand> timeout */
+       lpfc_printf_log(phba,
+               KERN_ERR,
+               LOG_MBOX | LOG_SLI,
+               "%d:0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
+               phba->brd_no,
+               mb->mbxCommand,
+               phba->hba_state,
+               phba->sli.sli_flag,
+               phba->sli.mbox_active);
+
+       if (phba->sli.mbox_active == pmbox) {
+               phba->sli.mbox_active = NULL;
+               if (pmbox->mbox_cmpl) {
+                       mb->mbxStatus = MBX_NOT_FINISHED;
+                       spin_unlock_irq(phba->host->host_lock);
+                       (pmbox->mbox_cmpl) (phba, pmbox);
+                       spin_lock_irq(phba->host->host_lock);
+               }
+               phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+       }
+
+       spin_unlock_irq(phba->host->host_lock);
+       lpfc_mbox_abort(phba);
+       return;
+}
+
+int
+lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
+{
+       MAILBOX_t *mbox;
+       MAILBOX_t *mb;
+       struct lpfc_sli *psli;
+       uint32_t status, evtctr;
+       uint32_t ha_copy;
+       int i;
+       unsigned long drvr_flag = 0;
+       volatile uint32_t word0, ldata;
+       void __iomem *to_slim;
+
+       psli = &phba->sli;
+
+       spin_lock_irqsave(phba->host->host_lock, drvr_flag);
+
+
+       mb = &pmbox->mb;
+       status = MBX_SUCCESS;
+
+       if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
+               /* Polling for a mbox command when another one is already active
+                * is not allowed in SLI. Also, the driver must have established
+                * SLI2 mode to queue and process multiple mbox commands.
+                */
+
+               if (flag & MBX_POLL) {
+                       spin_unlock_irqrestore(phba->host->host_lock,
+                                              drvr_flag);
+
+                       /* Mbox command <mbxCommand> cannot issue */
+                       LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag)
+                       return (MBX_NOT_FINISHED);
+               }
+
+               if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
+                       spin_unlock_irqrestore(phba->host->host_lock,
+                                              drvr_flag);
+                       /* Mbox command <mbxCommand> cannot issue */
+                       LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag)
+                       return (MBX_NOT_FINISHED);
+               }
+
+               /* Handle STOP IOCB processing flag. This is only meaningful
+                * if we are not polling for mbox completion.
+                */
+               if (flag & MBX_STOP_IOCB) {
+                       flag &= ~MBX_STOP_IOCB;
+                       /* Now flag each ring */
+                       for (i = 0; i < psli->num_rings; i++) {
+                               /* If the ring is active, flag it */
+                               if (psli->ring[i].cmdringaddr) {
+                                       psli->ring[i].flag |=
+                                           LPFC_STOP_IOCB_MBX;
+                               }
+                       }
+               }
+
+               /* Another mailbox command is still being processed, queue this
+                * command to be processed later.
+                */
+               lpfc_mbox_put(phba, pmbox);
+
+               /* Mbox cmd issue - BUSY */
+               lpfc_printf_log(phba,
+                       KERN_INFO,
+                       LOG_MBOX | LOG_SLI,
+                       "%d:0308 Mbox cmd issue - BUSY Data: x%x x%x x%x x%x\n",
+                       phba->brd_no,
+                       mb->mbxCommand,
+                       phba->hba_state,
+                       psli->sli_flag,
+                       flag);
+
+               psli->slistat.mbox_busy++;
+               spin_unlock_irqrestore(phba->host->host_lock,
+                                      drvr_flag);
+
+               return (MBX_BUSY);
+       }
+
+       /* Handle STOP IOCB processing flag. This is only meaningful
+        * if we are not polling for mbox completion.
+        */
+       if (flag & MBX_STOP_IOCB) {
+               flag &= ~MBX_STOP_IOCB;
+               if (flag == MBX_NOWAIT) {
+                       /* Now flag each ring */
+                       for (i = 0; i < psli->num_rings; i++) {
+                               /* If the ring is active, flag it */
+                               if (psli->ring[i].cmdringaddr) {
+                                       psli->ring[i].flag |=
+                                           LPFC_STOP_IOCB_MBX;
+                               }
+                       }
+               }
+       }
+
+       psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
+
+       /* If we are not polling, we MUST be in SLI2 mode */
+       if (flag != MBX_POLL) {
+               if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
+                       psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+                       spin_unlock_irqrestore(phba->host->host_lock,
+                                              drvr_flag);
+                       /* Mbox command <mbxCommand> cannot issue */
+                       LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag);
+                       return (MBX_NOT_FINISHED);
+               }
+               /* timeout active mbox command */
+               mod_timer(&psli->mbox_tmo, jiffies + HZ * LPFC_MBOX_TMO);
+       }
+
+       /* Mailbox cmd <cmd> issue */
+       lpfc_printf_log(phba,
+               KERN_INFO,
+               LOG_MBOX | LOG_SLI,
+               "%d:0309 Mailbox cmd x%x issue Data: x%x x%x x%x\n",
+               phba->brd_no,
+               mb->mbxCommand,
+               phba->hba_state,
+               psli->sli_flag,
+               flag);
+
+       psli->slistat.mbox_cmd++;
+       evtctr = psli->slistat.mbox_event;
+
+       /* next set own bit for the adapter and copy over command word */
+       mb->mbxOwner = OWN_CHIP;
+
+       if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
+
+               /* First copy command data to host SLIM area */
+               mbox = (MAILBOX_t *) psli->MBhostaddr;
+               lpfc_sli_pcimem_bcopy(mb, mbox, MAILBOX_CMD_SIZE);
+       } else {
+               if (mb->mbxCommand == MBX_CONFIG_PORT) {
+                       /* copy command data into host mbox for cmpl */
+                       mbox = (MAILBOX_t *) psli->MBhostaddr;
+                       lpfc_sli_pcimem_bcopy(mb, mbox, MAILBOX_CMD_SIZE);
+               }
+
+               /* First copy mbox command data to HBA SLIM, skip past first
+                  word */
+               to_slim = phba->MBslimaddr + sizeof (uint32_t);
+               lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0],
+                           MAILBOX_CMD_SIZE - sizeof (uint32_t));
+
+               /* Next copy over first word, with mbxOwner set */
+               ldata = *((volatile uint32_t *)mb);
+               to_slim = phba->MBslimaddr;
+               writel(ldata, to_slim);
+               readl(to_slim); /* flush */
+
+               if (mb->mbxCommand == MBX_CONFIG_PORT) {
+                       /* switch over to host mailbox */
+                       psli->sli_flag |= LPFC_SLI2_ACTIVE;
+               }
+       }
+
+       wmb();
+       /* interrupt board to doit right away */
+       writel(CA_MBATT, phba->CAregaddr);
+       readl(phba->CAregaddr); /* flush */
+
+       switch (flag) {
+       case MBX_NOWAIT:
+               /* Don't wait for it to finish, just return */
+               psli->mbox_active = pmbox;
+               break;
+
+       case MBX_POLL:
+               i = 0;
+               psli->mbox_active = NULL;
+               if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
+                       /* First read mbox status word */
+                       mbox = (MAILBOX_t *) psli->MBhostaddr;
+                       word0 = *((volatile uint32_t *)mbox);
+                       word0 = le32_to_cpu(word0);
+               } else {
+                       /* First read mbox status word */
+                       word0 = readl(phba->MBslimaddr);
+               }
+
+               /* Read the HBA Host Attention Register */
+               ha_copy = readl(phba->HAregaddr);
+
+               /* Wait for command to complete */
+               while (((word0 & OWN_CHIP) == OWN_CHIP)
+                      || !(ha_copy & HA_MBATT)) {
+                       if (i++ >= 100) {
+                               psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+                               spin_unlock_irqrestore(phba->host->host_lock,
+                                                      drvr_flag);
+                               return (MBX_NOT_FINISHED);
+                       }
+
+                       /* Check if we took a mbox interrupt while we were
+                          polling */
+                       if (((word0 & OWN_CHIP) != OWN_CHIP)
+                           && (evtctr != psli->slistat.mbox_event))
+                               break;
+
+                       spin_unlock_irqrestore(phba->host->host_lock,
+                                              drvr_flag);
+
+                       /* Can be in interrupt context, do not sleep */
+                       /* (or might be called with interrupts disabled) */
+                       mdelay(i);
+
+                       spin_lock_irqsave(phba->host->host_lock, drvr_flag);
+
+                       if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
+                               /* First copy command data */
+                               mbox = (MAILBOX_t *) psli->MBhostaddr;
+                               word0 = *((volatile uint32_t *)mbox);
+                               word0 = le32_to_cpu(word0);
+                               if (mb->mbxCommand == MBX_CONFIG_PORT) {
+                                       MAILBOX_t *slimmb;
+                                       volatile uint32_t slimword0;
+                                       /* Check real SLIM for any errors */
+                                       slimword0 = readl(phba->MBslimaddr);
+                                       slimmb = (MAILBOX_t *) & slimword0;
+                                       if (((slimword0 & OWN_CHIP) != OWN_CHIP)
+                                           && slimmb->mbxStatus) {
+                                               psli->sli_flag &=
+                                                   ~LPFC_SLI2_ACTIVE;
+                                               word0 = slimword0;
+                                       }
+                               }
+                       } else {
+                               /* First copy command data */
+                               word0 = readl(phba->MBslimaddr);
+                       }
+                       /* Read the HBA Host Attention Register */
+                       ha_copy = readl(phba->HAregaddr);
+               }
+
+               if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
+                       /* First copy command data */
+                       mbox = (MAILBOX_t *) psli->MBhostaddr;
+                       /* copy results back to user */
+                       lpfc_sli_pcimem_bcopy(mbox, mb, MAILBOX_CMD_SIZE);
+               } else {
+                       /* First copy command data */
+                       lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
+                                                       MAILBOX_CMD_SIZE);
+                       if ((mb->mbxCommand == MBX_DUMP_MEMORY) &&
+                               pmbox->context2) {
+                               lpfc_memcpy_from_slim((void *)pmbox->context2,
+                                     phba->MBslimaddr + DMP_RSP_OFFSET,
+                                                     mb->un.varDmp.word_cnt);
+                       }
+               }
+
+               writel(HA_MBATT, phba->HAregaddr);
+               readl(phba->HAregaddr); /* flush */
+
+               psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+               status = mb->mbxStatus;
+       }
+
+       spin_unlock_irqrestore(phba->host->host_lock, drvr_flag);
+       return (status);
+}
+
+static int
+lpfc_sli_ringtx_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
+                   struct lpfc_iocbq * piocb)
+{
+       /* Insert the caller's iocb in the txq tail for later processing. */
+       list_add_tail(&piocb->list, &pring->txq);
+       pring->txq_cnt++;
+       return (0);
+}
+
+static struct lpfc_iocbq *
+lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+                  struct lpfc_iocbq ** piocb)
+{
+       struct lpfc_iocbq * nextiocb;
+
+       nextiocb = lpfc_sli_ringtx_get(phba, pring);
+       if (!nextiocb) {
+               nextiocb = *piocb;
+               *piocb = NULL;
+       }
+
+       return nextiocb;
+}
+
+int
+lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+                   struct lpfc_iocbq *piocb, uint32_t flag)
+{
+       struct lpfc_iocbq *nextiocb;
+       IOCB_t *iocb;
+
+       /*
+        * We should never get an IOCB if we are in a < LINK_DOWN state
+        */
+       if (unlikely(phba->hba_state < LPFC_LINK_DOWN))
+               return IOCB_ERROR;
+
+       /*
+        * Check to see if we are blocking IOCB processing because of a
+        * outstanding mbox command.
+        */
+       if (unlikely(pring->flag & LPFC_STOP_IOCB_MBX))
+               goto iocb_busy;
+
+       if (unlikely(phba->hba_state == LPFC_LINK_DOWN)) {
+               /*
+                * Only CREATE_XRI, CLOSE_XRI, ABORT_XRI, and QUE_RING_BUF
+                * can be issued if the link is not up.
+                */
+               switch (piocb->iocb.ulpCommand) {
+               case CMD_QUE_RING_BUF_CN:
+               case CMD_QUE_RING_BUF64_CN:
+               case CMD_CLOSE_XRI_CN:
+               case CMD_ABORT_XRI_CN:
+                       /*
+                        * For IOCBs, like QUE_RING_BUF, that have no rsp ring
+                        * completion, iocb_cmpl MUST be 0.
+                        */
+                       if (piocb->iocb_cmpl)
+                               piocb->iocb_cmpl = NULL;
+                       /*FALLTHROUGH*/
+               case CMD_CREATE_XRI_CR:
+                       break;
+               default:
+                       goto iocb_busy;
+               }
+
+       /*
+        * For FCP commands, we must be in a state where we can process link
+        * attention events.
+        */
+       } else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
+                  !(phba->sli.sli_flag & LPFC_PROCESS_LA)))
+               goto iocb_busy;
+
+       /*
+        * Check to see if this is a high priority command.
+        * If so bypass tx queue processing.
+        */
+       if (unlikely((flag & SLI_IOCB_HIGH_PRIORITY) &&
+                    (iocb = lpfc_sli_next_iocb_slot(phba, pring)))) {
+               lpfc_sli_submit_iocb(phba, pring, iocb, piocb);
+               piocb = NULL;
+       }
+
+       while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
+              (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
+               lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
+
+       if (iocb)
+               lpfc_sli_update_ring(phba, pring);
+       else
+               lpfc_sli_update_full_ring(phba, pring);
+
+       if (!piocb)
+               return IOCB_SUCCESS;
+
+       goto out_busy;
+
+ iocb_busy:
+       pring->stats.iocb_cmd_delay++;
+
+ out_busy:
+
+       if (!(flag & SLI_IOCB_RET_IOCB)) {
+               lpfc_sli_ringtx_put(phba, pring, piocb);
+               return IOCB_SUCCESS;
+       }
+
+       return IOCB_BUSY;
+}
+
+int
+lpfc_sli_setup(struct lpfc_hba *phba)
+{
+       int i, totiocb = 0;
+       struct lpfc_sli *psli = &phba->sli;
+       struct lpfc_sli_ring *pring;
+
+       psli->num_rings = MAX_CONFIGURED_RINGS;
+       psli->sli_flag = 0;
+       psli->fcp_ring = LPFC_FCP_RING;
+       psli->next_ring = LPFC_FCP_NEXT_RING;
+       psli->ip_ring = LPFC_IP_RING;
+
+       for (i = 0; i < psli->num_rings; i++) {
+               pring = &psli->ring[i];
+               switch (i) {
+               case LPFC_FCP_RING:     /* ring 0 - FCP */
+                       /* numCiocb and numRiocb are used in config_port */
+                       pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
+                       pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
+                       pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
+                       pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
+                       pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
+                       pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
+                       pring->iotag_ctr = 0;
+                       pring->iotag_max =
+                           (phba->cfg_hba_queue_depth * 2);
+                       pring->fast_iotag = pring->iotag_max;
+                       pring->num_mask = 0;
+                       break;
+               case LPFC_IP_RING:      /* ring 1 - IP */
+                       /* numCiocb and numRiocb are used in config_port */
+                       pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
+                       pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
+                       pring->num_mask = 0;
+                       break;
+               case LPFC_ELS_RING:     /* ring 2 - ELS / CT */
+                       /* numCiocb and numRiocb are used in config_port */
+                       pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
+                       pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
+                       pring->fast_iotag = 0;
+                       pring->iotag_ctr = 0;
+                       pring->iotag_max = 4096;
+                       pring->num_mask = 4;
+                       pring->prt[0].profile = 0;      /* Mask 0 */
+                       pring->prt[0].rctl = FC_ELS_REQ;
+                       pring->prt[0].type = FC_ELS_DATA;
+                       pring->prt[0].lpfc_sli_rcv_unsol_event =
+                           lpfc_els_unsol_event;
+                       pring->prt[1].profile = 0;      /* Mask 1 */
+                       pring->prt[1].rctl = FC_ELS_RSP;
+                       pring->prt[1].type = FC_ELS_DATA;
+                       pring->prt[1].lpfc_sli_rcv_unsol_event =
+                           lpfc_els_unsol_event;
+                       pring->prt[2].profile = 0;      /* Mask 2 */
+                       /* NameServer Inquiry */
+                       pring->prt[2].rctl = FC_UNSOL_CTL;
+                       /* NameServer */
+                       pring->prt[2].type = FC_COMMON_TRANSPORT_ULP;
+                       pring->prt[2].lpfc_sli_rcv_unsol_event =
+                           lpfc_ct_unsol_event;
+                       pring->prt[3].profile = 0;      /* Mask 3 */
+                       /* NameServer response */
+                       pring->prt[3].rctl = FC_SOL_CTL;
+                       /* NameServer */
+                       pring->prt[3].type = FC_COMMON_TRANSPORT_ULP;
+                       pring->prt[3].lpfc_sli_rcv_unsol_event =
+                           lpfc_ct_unsol_event;
+                       break;
+               }
+               totiocb += (pring->numCiocb + pring->numRiocb);
+       }
+       if (totiocb > MAX_SLI2_IOCB) {
+               /* Too many cmd / rsp ring entries in SLI2 SLIM */
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "%d:0462 Too many cmd / rsp ring entries in "
+                               "SLI2 SLIM Data: x%x x%x\n",
+                               phba->brd_no, totiocb, MAX_SLI2_IOCB);
+       }
+
+       return 0;
+}
+
+int
+lpfc_sli_queue_setup(struct lpfc_hba * phba)
+{
+       struct lpfc_sli *psli;
+       struct lpfc_sli_ring *pring;
+       int i, cnt;
+
+       psli = &phba->sli;
+       spin_lock_irq(phba->host->host_lock);
+       INIT_LIST_HEAD(&psli->mboxq);
+       /* Initialize list headers for txq and txcmplq as double linked lists */
+       for (i = 0; i < psli->num_rings; i++) {
+               pring = &psli->ring[i];
+               pring->ringno = i;
+               pring->next_cmdidx  = 0;
+               pring->local_getidx = 0;
+               pring->cmdidx = 0;
+               INIT_LIST_HEAD(&pring->txq);
+               INIT_LIST_HEAD(&pring->txcmplq);
+               INIT_LIST_HEAD(&pring->iocb_continueq);
+               INIT_LIST_HEAD(&pring->postbufq);
+               cnt = pring->fast_iotag;
+               spin_unlock_irq(phba->host->host_lock);
+               if (cnt) {
+                       pring->fast_lookup =
+                               kmalloc(cnt * sizeof (struct lpfc_iocbq *),
+                                       GFP_KERNEL);
+                       if (pring->fast_lookup == 0) {
+                               return (0);
+                       }
+                       memset((char *)pring->fast_lookup, 0,
+                              cnt * sizeof (struct lpfc_iocbq *));
+               }
+               spin_lock_irq(phba->host->host_lock);
+       }
+       spin_unlock_irq(phba->host->host_lock);
+       return (1);
+}
+
+int
+lpfc_sli_hba_down(struct lpfc_hba * phba)
+{
+       struct lpfc_sli *psli;
+       struct lpfc_sli_ring *pring;
+       LPFC_MBOXQ_t *pmb;
+       struct lpfc_iocbq *iocb, *next_iocb;
+       IOCB_t *icmd = NULL;
+       int i;
+       unsigned long flags = 0;
+
+       psli = &phba->sli;
+       lpfc_hba_down_prep(phba);
+
+       spin_lock_irqsave(phba->host->host_lock, flags);
+
+       for (i = 0; i < psli->num_rings; i++) {
+               pring = &psli->ring[i];
+               pring->flag |= LPFC_DEFERRED_RING_EVENT;
+
+               /*
+                * Error everything on the txq since these iocbs have not been
+                * given to the FW yet.
+                */
+               pring->txq_cnt = 0;
+
+               list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
+                       list_del_init(&iocb->list);
+                       if (iocb->iocb_cmpl) {
+                               icmd = &iocb->iocb;
+                               icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+                               icmd->un.ulpWord[4] = IOERR_SLI_DOWN;
+                               spin_unlock_irqrestore(phba->host->host_lock,
+                                                      flags);
+                               (iocb->iocb_cmpl) (phba, iocb, iocb);
+                               spin_lock_irqsave(phba->host->host_lock, flags);
+                       } else {
+                               list_add_tail(&iocb->list,
+                                             &phba->lpfc_iocb_list);
+                       }
+               }
+
+               INIT_LIST_HEAD(&(pring->txq));
+
+               if (pring->fast_lookup) {
+                       kfree(pring->fast_lookup);
+                       pring->fast_lookup = NULL;
+               }
+
+       }
+
+       spin_unlock_irqrestore(phba->host->host_lock, flags);
+
+       /* Return any active mbox cmds */
+       del_timer_sync(&psli->mbox_tmo);
+       spin_lock_irqsave(phba->host->host_lock, flags);
+       phba->work_hba_events &= ~WORKER_MBOX_TMO;
+       if (psli->mbox_active) {
+               pmb = psli->mbox_active;
+               pmb->mb.mbxStatus = MBX_NOT_FINISHED;
+               if (pmb->mbox_cmpl) {
+                       spin_unlock_irqrestore(phba->host->host_lock, flags);
+                       pmb->mbox_cmpl(phba,pmb);
+                       spin_lock_irqsave(phba->host->host_lock, flags);
+               }
+       }
+       psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+       psli->mbox_active = NULL;
+
+       /* Return any pending mbox cmds */
+       while ((pmb = lpfc_mbox_get(phba)) != NULL) {
+               pmb->mb.mbxStatus = MBX_NOT_FINISHED;
+               if (pmb->mbox_cmpl) {
+                       spin_unlock_irqrestore(phba->host->host_lock, flags);
+                       pmb->mbox_cmpl(phba,pmb);
+                       spin_lock_irqsave(phba->host->host_lock, flags);
+               }
+       }
+
+       INIT_LIST_HEAD(&psli->mboxq);
+
+       spin_unlock_irqrestore(phba->host->host_lock, flags);
+
+       /*
+        * Provided the hba is not in an error state, reset it.  It is not
+        * capable of IO anymore.
+        */
+       if (phba->hba_state != LPFC_HBA_ERROR) {
+               phba->hba_state = LPFC_INIT_START;
+               lpfc_sli_brdreset(phba, 1);
+       }
+
+       return 1;
+}
+
+void
+lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
+{
+       uint32_t *src = srcp;
+       uint32_t *dest = destp;
+       uint32_t ldata;
+       int i;
+
+       for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
+               ldata = *src;
+               ldata = le32_to_cpu(ldata);
+               *dest = ldata;
+               src++;
+               dest++;
+       }
+}
+
+int
+lpfc_sli_ringpostbuf_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
+                        struct lpfc_dmabuf * mp)
+{
+       /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
+          later */
+       list_add_tail(&mp->list, &pring->postbufq);
+
+       pring->postbufq_cnt++;
+       return 0;
+}
+
+
+struct lpfc_dmabuf *
+lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+                        dma_addr_t phys)
+{
+       struct lpfc_dmabuf *mp, *next_mp;
+       struct list_head *slp = &pring->postbufq;
+
+       /* Search postbufq, from the begining, looking for a match on phys */
+       list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
+               if (mp->phys == phys) {
+                       list_del_init(&mp->list);
+                       pring->postbufq_cnt--;
+                       return mp;
+               }
+       }
+
+       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                       "%d:0410 Cannot find virtual addr for mapped buf on "
+                       "ring %d Data x%llx x%p x%p x%x\n",
+                       phba->brd_no, pring->ringno, (unsigned long long)phys,
+                       slp->next, slp->prev, pring->postbufq_cnt);
+       return NULL;
+}
+
+static void
+lpfc_sli_abort_elsreq_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+                          struct lpfc_iocbq * rspiocb)
+{
+       struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
+       /* Free the resources associated with the ELS_REQUEST64 IOCB the driver
+        * just aborted.
+        * In this case, context2  = cmd,  context2->next = rsp, context3 = bpl
+        */
+       if (cmdiocb->context2) {
+               buf_ptr1 = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+               /* Free the response IOCB before completing the abort
+                  command.  */
+               buf_ptr = NULL;
+               list_remove_head((&buf_ptr1->list), buf_ptr,
+                                struct lpfc_dmabuf, list);
+               if (buf_ptr) {
+                       lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+                       kfree(buf_ptr);
+               }
+               lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
+               kfree(buf_ptr1);
+       }
+
+       if (cmdiocb->context3) {
+               buf_ptr = (struct lpfc_dmabuf *) cmdiocb->context3;
+               lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+               kfree(buf_ptr);
+       }
+
+       list_add_tail(&cmdiocb->list, &phba->lpfc_iocb_list);
+       return;
+}
+
+int
+lpfc_sli_issue_abort_iotag32(struct lpfc_hba * phba,
+                            struct lpfc_sli_ring * pring,
+                            struct lpfc_iocbq * cmdiocb)
+{
+       struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
+       struct lpfc_iocbq *abtsiocbp = NULL;
+       IOCB_t *icmd = NULL;
+       IOCB_t *iabt = NULL;
+
+       /* issue ABTS for this IOCB based on iotag */
+       list_remove_head(lpfc_iocb_list, abtsiocbp, struct lpfc_iocbq, list);
+       if (abtsiocbp == NULL)
+               return 0;
+       memset(abtsiocbp, 0, sizeof (struct lpfc_iocbq));
+
+       iabt = &abtsiocbp->iocb;
+       icmd = &cmdiocb->iocb;
+       switch (icmd->ulpCommand) {
+       case CMD_ELS_REQUEST64_CR:
+               /* Even though we abort the ELS command, the firmware may access
+                * the BPL or other resources before it processes our
+                * ABORT_MXRI64. Thus we must delay reusing the cmdiocb
+                * resources till the actual abort request completes.
+                */
+               abtsiocbp->context1 = (void *)((unsigned long)icmd->ulpCommand);
+               abtsiocbp->context2 = cmdiocb->context2;
+               abtsiocbp->context3 = cmdiocb->context3;
+               cmdiocb->context2 = NULL;
+               cmdiocb->context3 = NULL;
+               abtsiocbp->iocb_cmpl = lpfc_sli_abort_elsreq_cmpl;
+               break;
+       default:
+               list_add_tail(&abtsiocbp->list, lpfc_iocb_list);
+               return 0;
+       }
+
+       iabt->un.amxri.abortType = ABORT_TYPE_ABTS;
+       iabt->un.amxri.iotag32 = icmd->un.elsreq64.bdl.ulpIoTag32;
+
+       iabt->ulpLe = 1;
+       iabt->ulpClass = CLASS3;
+       iabt->ulpCommand = CMD_ABORT_MXRI64_CN;
+
+       if (lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0) == IOCB_ERROR) {
+               list_add_tail(&abtsiocbp->list, lpfc_iocb_list);
+               return 0;
+       }
+
+       return 1;
+}
+
+static int
+lpfc_sli_validate_iocb_cmd(struct lpfc_scsi_buf *lpfc_cmd, uint16_t tgt_id,
+                            uint64_t lun_id, struct lpfc_iocbq *iocb,
+                            uint32_t ctx, lpfc_ctx_cmd ctx_cmd)
+{
+       int rc = 1;
+
+       if (lpfc_cmd == NULL)
+               return rc;
+
+       switch (ctx_cmd) {
+       case LPFC_CTX_LUN:
+               if ((lpfc_cmd->pCmd->device->id == tgt_id) &&
+                   (lpfc_cmd->pCmd->device->lun == lun_id))
+                       rc = 0;
+               break;
+       case LPFC_CTX_TGT:
+               if (lpfc_cmd->pCmd->device->id == tgt_id)
+                       rc = 0;
+               break;
+       case LPFC_CTX_CTX:
+               if (iocb->iocb.ulpContext == ctx)
+                       rc = 0;
+       case LPFC_CTX_HOST:
+               rc = 0;
+               break;
+       default:
+               printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
+                       __FUNCTION__, ctx_cmd);
+               break;
+       }
+
+       return rc;
+}
+
+int
+lpfc_sli_sum_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+               uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd ctx_cmd)
+{
+       struct lpfc_iocbq *iocb, *next_iocb;
+       IOCB_t *cmd = NULL;
+       struct lpfc_scsi_buf *lpfc_cmd;
+       int sum = 0, ret_val = 0;
+
+       /* Next check the txcmplq */
+       list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
+               cmd = &iocb->iocb;
+
+               /* Must be a FCP command */
+               if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
+                   (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
+                   (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
+                       continue;
+               }
+
+               /* context1 MUST be a struct lpfc_scsi_buf */
+               lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1);
+               ret_val = lpfc_sli_validate_iocb_cmd(lpfc_cmd, tgt_id, lun_id,
+                                                    NULL, 0, ctx_cmd);
+               if (ret_val != 0)
+                       continue;
+               sum++;
+       }
+       return sum;
+}
+
+int
+lpfc_sli_abort_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+                   uint16_t tgt_id, uint64_t lun_id, uint32_t ctx,
+                   lpfc_ctx_cmd abort_cmd)
+{
+       struct lpfc_iocbq *iocb, *next_iocb;
+       struct lpfc_iocbq *abtsiocb = NULL;
+       struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
+       IOCB_t *cmd = NULL;
+       struct lpfc_scsi_buf *lpfc_cmd;
+       int errcnt = 0, ret_val = 0;
+
+       list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
+               cmd = &iocb->iocb;
+
+               /* Must be a FCP command */
+               if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
+                   (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
+                   (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
+                       continue;
+               }
+
+               /* context1 MUST be a struct lpfc_scsi_buf */
+               lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1);
+               ret_val = lpfc_sli_validate_iocb_cmd(lpfc_cmd, tgt_id, lun_id,
+                                                    iocb, ctx, abort_cmd);
+               if (ret_val != 0)
+                       continue;
+
+               /* issue ABTS for this IOCB based on iotag */
+               list_remove_head(lpfc_iocb_list, abtsiocb, struct lpfc_iocbq,
+                                list);
+               if (abtsiocb == NULL) {
+                       errcnt++;
+                       continue;
+               }
+               memset(abtsiocb, 0, sizeof (struct lpfc_iocbq));
+
+               abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
+               abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
+               abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
+               abtsiocb->iocb.ulpLe = 1;
+               abtsiocb->iocb.ulpClass = cmd->ulpClass;
+
+               if (phba->hba_state >= LPFC_LINK_UP)
+                       abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
+               else
+                       abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
+
+               ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0);
+               if (ret_val == IOCB_ERROR) {
+                       list_add_tail(&abtsiocb->list, lpfc_iocb_list);
+                       errcnt++;
+                       continue;
+               }
+       }
+
+       return errcnt;
+}
+
+void
+lpfc_sli_wake_iocb_high_priority(struct lpfc_hba * phba,
+                                struct lpfc_iocbq * queue1,
+                                struct lpfc_iocbq * queue2)
+{
+       if (queue1->context2 && queue2)
+               memcpy(queue1->context2, queue2, sizeof (struct lpfc_iocbq));
+
+       /* The waiter is looking for LPFC_IO_HIPRI bit to be set
+          as a signal to wake up */
+       queue1->iocb_flag |= LPFC_IO_HIPRI;
+       return;
+}
+
+int
+lpfc_sli_issue_iocb_wait_high_priority(struct lpfc_hba * phba,
+                                      struct lpfc_sli_ring * pring,
+                                      struct lpfc_iocbq * piocb,
+                                      uint32_t flag,
+                                      struct lpfc_iocbq * prspiocbq,
+                                      uint32_t timeout)
+{
+       int j, delay_time,  retval = IOCB_ERROR;
+
+       /* The caller must left context1 empty.  */
+       if (piocb->context_un.hipri_wait_queue != 0) {
+               return IOCB_ERROR;
+       }
+
+       /*
+        * If the caller has provided a response iocbq buffer, context2 must
+        * be NULL or its an error.
+        */
+       if (prspiocbq && piocb->context2) {
+               return IOCB_ERROR;
+       }
+
+       piocb->context2 = prspiocbq;
+
+       /* Setup callback routine and issue the command. */
+       piocb->iocb_cmpl = lpfc_sli_wake_iocb_high_priority;
+       retval = lpfc_sli_issue_iocb(phba, pring, piocb,
+                                       flag | SLI_IOCB_HIGH_PRIORITY);
+       if (retval != IOCB_SUCCESS) {
+               piocb->context2 = NULL;
+               return IOCB_ERROR;
+       }
+
+       /*
+        * This high-priority iocb was sent out-of-band.  Poll for its
+        * completion rather than wait for a signal.  Note that the host_lock
+        * is held by the midlayer and must be released here to allow the
+        * interrupt handlers to complete the IO and signal this routine via
+        * the iocb_flag.
+        * Also, the delay_time is computed to be one second longer than
+        * the scsi command timeout to give the FW time to abort on
+        * timeout rather than the driver just giving up.  Typically,
+        * the midlayer does not specify a time for this command so the
+        * driver is free to enforce its own timeout.
+        */
+
+       delay_time = ((timeout + 1) * 1000) >> 6;
+       retval = IOCB_ERROR;
+       spin_unlock_irq(phba->host->host_lock);
+       for (j = 0; j < 64; j++) {
+               msleep(delay_time);
+               if (piocb->iocb_flag & LPFC_IO_HIPRI) {
+                       piocb->iocb_flag &= ~LPFC_IO_HIPRI;
+                       retval = IOCB_SUCCESS;
+                       break;
+               }
+       }
+
+       spin_lock_irq(phba->host->host_lock);
+       piocb->context2 = NULL;
+       return retval;
+}
+int
+lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
+                        uint32_t timeout)
+{
+       DECLARE_WAIT_QUEUE_HEAD(done_q);
+       DECLARE_WAITQUEUE(wq_entry, current);
+       uint32_t timeleft = 0;
+       int retval;
+
+       /* The caller must leave context1 empty. */
+       if (pmboxq->context1 != 0) {
+               return (MBX_NOT_FINISHED);
+       }
+
+       /* setup wake call as IOCB callback */
+       pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
+       /* setup context field to pass wait_queue pointer to wake function  */
+       pmboxq->context1 = &done_q;
+
+       /* start to sleep before we wait, to avoid races */
+       set_current_state(TASK_INTERRUPTIBLE);
+       add_wait_queue(&done_q, &wq_entry);
+
+       /* now issue the command */
+       retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+
+       if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
+               timeleft = schedule_timeout(timeout * HZ);
+               pmboxq->context1 = NULL;
+               /* if schedule_timeout returns 0, we timed out and were not
+                  woken up */
+               if (timeleft == 0) {
+                       retval = MBX_TIMEOUT;
+               } else {
+                       retval = MBX_SUCCESS;
+               }
+       }
+
+
+       set_current_state(TASK_RUNNING);
+       remove_wait_queue(&done_q, &wq_entry);
+       return retval;
+}
+
+irqreturn_t
+lpfc_intr_handler(int irq, void *dev_id, struct pt_regs * regs)
+{
+       struct lpfc_hba *phba;
+       uint32_t ha_copy;
+       uint32_t work_ha_copy;
+       unsigned long status;
+       int i;
+       uint32_t control;
+
+       /*
+        * Get the driver's phba structure from the dev_id and
+        * assume the HBA is not interrupting.
+        */
+       phba = (struct lpfc_hba *) dev_id;
+
+       if (unlikely(!phba))
+               return IRQ_NONE;
+
+       phba->sli.slistat.sli_intr++;
+
+       /*
+        * Call the HBA to see if it is interrupting.  If not, don't claim
+        * the interrupt
+        */
+
+       /* Ignore all interrupts during initialization. */
+       if (unlikely(phba->hba_state < LPFC_LINK_DOWN))
+               return IRQ_NONE;
+
+       /*
+        * Read host attention register to determine interrupt source
+        * Clear Attention Sources, except Error Attention (to
+        * preserve status) and Link Attention
+        */
+       spin_lock(phba->host->host_lock);
+       ha_copy = readl(phba->HAregaddr);
+       writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
+       readl(phba->HAregaddr); /* flush */
+       spin_unlock(phba->host->host_lock);
+
+       if (unlikely(!ha_copy))
+               return IRQ_NONE;
+
+       work_ha_copy = ha_copy & phba->work_ha_mask;
+
+       if (unlikely(work_ha_copy)) {
+               if (work_ha_copy & HA_LATT) {
+                       if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
+                               /*
+                                * Turn off Link Attention interrupts
+                                * until CLEAR_LA done
+                                */
+                               spin_lock(phba->host->host_lock);
+                               phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
+                               control = readl(phba->HCregaddr);
+                               control &= ~HC_LAINT_ENA;
+                               writel(control, phba->HCregaddr);
+                               readl(phba->HCregaddr); /* flush */
+                               spin_unlock(phba->host->host_lock);
+                       }
+                       else
+                               work_ha_copy &= ~HA_LATT;
+               }
+
+               if (work_ha_copy & ~(HA_ERATT|HA_MBATT|HA_LATT)) {
+                       for (i = 0; i < phba->sli.num_rings; i++) {
+                               if (work_ha_copy & (HA_RXATT << (4*i))) {
+                                       /*
+                                        * Turn off Slow Rings interrupts
+                                        */
+                                       spin_lock(phba->host->host_lock);
+                                       control = readl(phba->HCregaddr);
+                                       control &= ~(HC_R0INT_ENA << i);
+                                       writel(control, phba->HCregaddr);
+                                       readl(phba->HCregaddr); /* flush */
+                                       spin_unlock(phba->host->host_lock);
+                               }
+                       }
+               }
+
+               if (work_ha_copy & HA_ERATT) {
+                       phba->hba_state = LPFC_HBA_ERROR;
+                       /*
+                        * There was a link/board error.  Read the
+                        * status register to retrieve the error event
+                        * and process it.
+                        */
+                       phba->sli.slistat.err_attn_event++;
+                       /* Save status info */
+                       phba->work_hs = readl(phba->HSregaddr);
+                       phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
+                       phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
+
+                       /* Clear Chip error bit */
+                       writel(HA_ERATT, phba->HAregaddr);
+                       readl(phba->HAregaddr); /* flush */
+
+                       /*
+                        * Reseting the HBA is the only reliable way
+                        * to shutdown interrupt when there is a
+                        * ERROR.
+                        */
+                       lpfc_sli_send_reset(phba, phba->hba_state);
+               }
+
+               spin_lock(phba->host->host_lock);
+               phba->work_ha |= work_ha_copy;
+               if (phba->work_wait)
+                       wake_up(phba->work_wait);
+               spin_unlock(phba->host->host_lock);
+       }
+
+       ha_copy &= ~(phba->work_ha_mask);
+
+       /*
+        * Process all events on FCP ring.  Take the optimized path for
+        * FCP IO.  Any other IO is slow path and is handled by
+        * the worker thread.
+        */
+       status = (ha_copy & (HA_RXMASK  << (4*LPFC_FCP_RING)));
+       status >>= (4*LPFC_FCP_RING);
+       if (status & HA_RXATT)
+               lpfc_sli_handle_fast_ring_event(phba,
+                                               &phba->sli.ring[LPFC_FCP_RING],
+                                               status);
+       return IRQ_HANDLED;
+
+} /* lpfc_intr_handler */
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
new file mode 100644 (file)
index 0000000..abd9a8c
--- /dev/null
@@ -0,0 +1,216 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Enterprise Fibre Channel Host Bus Adapters.                     *
+ * Refer to the README file included with this package for         *
+ * driver version and adapter support.                             *
+ * Copyright (C) 2004 Emulex Corporation.                          *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of the GNU General Public License     *
+ * as published by the Free Software Foundation; either version 2  *
+ * of the License, or (at your option) any later version.          *
+ *                                                                 *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of  *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the   *
+ * GNU General Public License for more details, a copy of which    *
+ * can be found in the file COPYING included with this package.    *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_sli.h 1.42 2005/03/21 02:01:28EST sf_support Exp  $
+ */
+
+/* forward declaration for LPFC_IOCB_t's use */
+struct lpfc_hba;
+
+/* Define the context types that SLI handles for abort and sums. */
+typedef enum _lpfc_ctx_cmd {
+       LPFC_CTX_LUN,
+       LPFC_CTX_TGT,
+       LPFC_CTX_CTX,
+       LPFC_CTX_HOST
+} lpfc_ctx_cmd;
+
+/* This structure is used to handle IOCB requests / responses */
+struct lpfc_iocbq {
+       /* lpfc_iocbqs are used in double linked lists */
+       struct list_head list;
+       IOCB_t iocb;            /* IOCB cmd */
+       uint8_t retry;          /* retry counter for IOCB cmd - if needed */
+       uint8_t iocb_flag;
+#define LPFC_IO_POLL   1       /* Polling mode iocb */
+#define LPFC_IO_LIBDFC 2       /* libdfc iocb */
+#define LPFC_IO_WAIT   4
+#define LPFC_IO_HIPRI  8       /* High Priority Queue signal flag */
+
+       uint8_t abort_count;
+       uint8_t rsvd2;
+       uint32_t drvrTimeout;   /* driver timeout in seconds */
+       void *context1;         /* caller context information */
+       void *context2;         /* caller context information */
+       void *context3;         /* caller context information */
+       union {
+               wait_queue_head_t *hipri_wait_queue; /* High Priority Queue wait
+                                                       queue */
+               struct lpfc_iocbq  *rsp_iocb;
+               struct lpfcMboxq   *mbox;
+       } context_un;
+
+       void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+                          struct lpfc_iocbq *);
+
+};
+
+#define SLI_IOCB_RET_IOCB      1       /* Return IOCB if cmd ring full */
+#define SLI_IOCB_HIGH_PRIORITY 2       /* High priority command */
+
+#define IOCB_SUCCESS        0
+#define IOCB_BUSY           1
+#define IOCB_ERROR          2
+#define IOCB_TIMEDOUT       3
+
+typedef struct lpfcMboxq {
+       /* MBOXQs are used in single linked lists */
+       struct list_head list;  /* ptr to next mailbox command */
+       MAILBOX_t mb;           /* Mailbox cmd */
+       void *context1;         /* caller context information */
+       void *context2;         /* caller context information */
+
+       void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);
+
+} LPFC_MBOXQ_t;
+
+#define MBX_POLL        1      /* poll mailbox till command done, then
+                                  return */
+#define MBX_NOWAIT      2      /* issue command then return immediately */
+#define MBX_STOP_IOCB   4      /* Stop iocb processing till mbox cmds
+                                  complete */
+
+#define LPFC_MAX_RING_MASK  4  /* max num of rctl/type masks allowed per
+                                  ring */
+#define LPFC_MAX_RING       4  /* max num of SLI rings used by driver */
+
+struct lpfc_sli_ring;
+
+struct lpfc_sli_ring_mask {
+       uint8_t profile;        /* profile associated with ring */
+       uint8_t rctl;   /* rctl / type pair configured for ring */
+       uint8_t type;   /* rctl / type pair configured for ring */
+       uint8_t rsvd;
+       /* rcv'd unsol event */
+       void (*lpfc_sli_rcv_unsol_event) (struct lpfc_hba *,
+                                        struct lpfc_sli_ring *,
+                                        struct lpfc_iocbq *);
+};
+
+
+/* Structure used to hold SLI statistical counters and info */
+struct lpfc_sli_ring_stat {
+       uint64_t iocb_event;     /* IOCB event counters */
+       uint64_t iocb_cmd;       /* IOCB cmd issued */
+       uint64_t iocb_rsp;       /* IOCB rsp received */
+       uint64_t iocb_cmd_delay; /* IOCB cmd ring delay */
+       uint64_t iocb_cmd_full;  /* IOCB cmd ring full */
+       uint64_t iocb_cmd_empty; /* IOCB cmd ring is now empty */
+       uint64_t iocb_rsp_full;  /* IOCB rsp ring full */
+};
+
+/* Structure used to hold SLI ring information */
+struct lpfc_sli_ring {
+       uint16_t flag;          /* ring flags */
+#define LPFC_DEFERRED_RING_EVENT 0x001 /* Deferred processing a ring event */
+#define LPFC_CALL_RING_AVAILABLE 0x002 /* indicates cmd was full */
+#define LPFC_STOP_IOCB_MBX       0x010 /* Stop processing IOCB cmds mbox */
+#define LPFC_STOP_IOCB_EVENT     0x020 /* Stop processing IOCB cmds event */
+#define LPFC_STOP_IOCB_MASK      0x030 /* Stop processing IOCB cmds mask */
+       uint16_t abtsiotag;     /* tracks next iotag to use for ABTS */
+
+       uint32_t local_getidx;   /* last available cmd index (from cmdGetInx) */
+       uint32_t next_cmdidx;    /* next_cmd index */
+       uint8_t rsvd;
+       uint8_t ringno;         /* ring number */
+       uint8_t rspidx;         /* current index in response ring */
+       uint8_t cmdidx;         /* current index in command ring */
+       uint16_t numCiocb;      /* number of command iocb's per ring */
+       uint16_t numRiocb;      /* number of rsp iocb's per ring */
+
+       uint32_t fast_iotag;    /* max fastlookup based iotag           */
+       uint32_t iotag_ctr;     /* keeps track of the next iotag to use */
+       uint32_t iotag_max;     /* max iotag value to use               */
+       struct lpfc_iocbq ** fast_lookup; /* array of IOCB ptrs indexed by
+                                          iotag */
+       struct list_head txq;
+       uint16_t txq_cnt;       /* current length of queue */
+       uint16_t txq_max;       /* max length */
+       struct list_head txcmplq;
+       uint16_t txcmplq_cnt;   /* current length of queue */
+       uint16_t txcmplq_max;   /* max length */
+       uint32_t *cmdringaddr;  /* virtual address for cmd rings */
+       uint32_t *rspringaddr;  /* virtual address for rsp rings */
+       uint32_t missbufcnt;    /* keep track of buffers to post */
+       struct list_head postbufq;
+       uint16_t postbufq_cnt;  /* current length of queue */
+       uint16_t postbufq_max;  /* max length */
+       struct list_head iocb_continueq;
+       uint16_t iocb_continueq_cnt;    /* current length of queue */
+       uint16_t iocb_continueq_max;    /* max length */
+
+       struct lpfc_sli_ring_mask prt[LPFC_MAX_RING_MASK];
+       uint32_t num_mask;      /* number of mask entries in prt array */
+
+       struct lpfc_sli_ring_stat stats;        /* SLI statistical info */
+
+       /* cmd ring available */
+       void (*lpfc_sli_cmd_available) (struct lpfc_hba *,
+                                       struct lpfc_sli_ring *);
+};
+
+/* Structure used to hold SLI statistical counters and info */
+struct lpfc_sli_stat {
+       uint64_t mbox_stat_err;  /* Mbox cmds completed status error */
+       uint64_t mbox_cmd;       /* Mailbox commands issued */
+       uint64_t sli_intr;       /* Count of Host Attention interrupts */
+       uint32_t err_attn_event; /* Error Attn event counters */
+       uint32_t link_event;     /* Link event counters */
+       uint32_t mbox_event;     /* Mailbox event counters */
+       uint32_t mbox_busy;      /* Mailbox cmd busy */
+};
+
+/* Structure used to hold SLI information */
+struct lpfc_sli {
+       uint32_t num_rings;
+       uint32_t sli_flag;
+
+       /* Additional sli_flags */
+#define LPFC_SLI_MBOX_ACTIVE      0x100        /* HBA mailbox is currently active */
+#define LPFC_SLI2_ACTIVE          0x200        /* SLI2 overlay in firmware is active */
+#define LPFC_PROCESS_LA           0x400        /* Able to process link attention */
+
+       struct lpfc_sli_ring ring[LPFC_MAX_RING];
+       int fcp_ring;           /* ring used for FCP initiator commands */
+       int next_ring;
+
+       int ip_ring;            /* ring used for IP network drv cmds */
+
+       struct lpfc_sli_stat slistat;   /* SLI statistical info */
+       struct list_head mboxq;
+       uint16_t mboxq_cnt;     /* current length of queue */
+       uint16_t mboxq_max;     /* max length */
+       LPFC_MBOXQ_t *mbox_active;      /* active mboxq information */
+
+       struct timer_list mbox_tmo;     /* Hold clk to timeout active mbox
+                                          cmd */
+
+       uint32_t *MBhostaddr;   /* virtual address for mbox cmds */
+};
+
+/* Given a pointer to the start of the ring, and the slot number of
+ * the desired iocb entry, calc a pointer to that entry.
+ * (assume iocb entry size is 32 bytes, or 8 words)
+ */
+#define IOCB_ENTRY(ring,slot) ((IOCB_t *)(((char *)(ring)) + ((slot) * 32)))
+
+#define LPFC_MBOX_TMO           30     /* Sec tmo for outstanding mbox
+                                          command */
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
new file mode 100644 (file)
index 0000000..dfacd8d
--- /dev/null
@@ -0,0 +1,32 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Enterprise Fibre Channel Host Bus Adapters.                     *
+ * Refer to the README file included with this package for         *
+ * driver version and adapter support.                             *
+ * Copyright (C) 2004 Emulex Corporation.                          *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of the GNU General Public License     *
+ * as published by the Free Software Foundation; either version 2  *
+ * of the License, or (at your option) any later version.          *
+ *                                                                 *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of  *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the   *
+ * GNU General Public License for more details, a copy of which    *
+ * can be found in the file COPYING included with this package.    *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_version.h 1.49 2005/04/13 15:07:19EDT sf_support Exp  $
+ */
+
+#define LPFC_DRIVER_VERSION "8.0.28"
+
+#define LPFC_DRIVER_NAME "lpfc"
+
+#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
+               LPFC_DRIVER_VERSION
+
+#define DFC_API_VERSION "0.0.0"
index d58f303127f506ea2ea21f6c4f9c8e532e8eae48..377a4666b56834ad9be2125ed97727e6f499ed57 100644 (file)
@@ -209,7 +209,7 @@ static int BuildSgList (Scsi_Cmnd *SCpnt, PADAPTER2000 padapter, PDEV2000 pdev)
        if ( SCpnt->use_sg )
                {
                sg = (struct scatterlist *)SCpnt->request_buffer;
-               zc = pci_map_sg (padapter->pdev, sg, SCpnt->use_sg, scsi_to_pci_dma_dir (SCpnt->sc_data_direction));
+               zc = pci_map_sg (padapter->pdev, sg, SCpnt->use_sg, SCpnt->sc_data_direction);
                for ( z = 0;  z < zc;  z++ )
                        {
                        pdev->scatGath[z].address = cpu_to_le32 (sg_dma_address (sg));
@@ -225,7 +225,9 @@ static int BuildSgList (Scsi_Cmnd *SCpnt, PADAPTER2000 padapter, PDEV2000 pdev)
                outl (0, padapter->mb3);
                return TRUE;
                }
-       SCpnt->SCp.have_data_in = pci_map_single (padapter->pdev, SCpnt->request_buffer, SCpnt->request_bufflen, scsi_to_pci_dma_dir (SCpnt->sc_data_direction));
+       SCpnt->SCp.have_data_in = pci_map_single (padapter->pdev,
+                       SCpnt->request_buffer, SCpnt->request_bufflen,
+                       SCpnt->sc_data_direction);
        outl (SCpnt->SCp.have_data_in, padapter->mb2);
        outl (SCpnt->request_bufflen, padapter->mb3);
        return TRUE;
@@ -340,11 +342,11 @@ unmapProceed:;
                        }
                }
        if ( SCpnt->SCp.have_data_in )
-               pci_unmap_single (padapter->pdev, SCpnt->SCp.have_data_in, SCpnt->request_bufflen, scsi_to_pci_dma_dir(SCpnt->sc_data_direction));
+               pci_unmap_single (padapter->pdev, SCpnt->SCp.have_data_in, SCpnt->request_bufflen, SCpnt->sc_data_direction);
        else 
                {
                if ( SCpnt->use_sg )
-                       pci_unmap_sg (padapter->pdev, (struct scatterlist *)SCpnt->request_buffer, SCpnt->use_sg, scsi_to_pci_dma_dir(SCpnt->sc_data_direction));
+                       pci_unmap_sg (padapter->pdev, (struct scatterlist *)SCpnt->request_buffer, SCpnt->use_sg, SCpnt->sc_data_direction);
                }
 
 irqProceed:;
@@ -495,7 +497,7 @@ int Pci2000_QueueCommand (Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
                                                else
                                                        {
                                                        SCpnt->SCp.have_data_in = pci_map_single (padapter->pdev, SCpnt->request_buffer, SCpnt->request_bufflen,
-                                                                                                         scsi_to_pci_dma_dir(SCpnt->sc_data_direction));
+                                                                                                         SCpnt->sc_data_direction);
                                                        outl (SCpnt->SCp.have_data_in, padapter->mb2);
                                                        }
                                                outl (cdb[5], padapter->mb0);
@@ -511,13 +513,13 @@ int Pci2000_QueueCommand (Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
                                SCpnt->SCp.have_data_in = pci_map_single (padapter->pdev,
                                                                          ((struct scatterlist *)SCpnt->request_buffer)->address,
                                                                          SCpnt->request_bufflen,
-                                                                         scsi_to_pci_dma_dir (SCpnt->sc_data_direction));
+                                                                         SCpnt->sc_data_direction);
                                }
                        else
                                {
                                SCpnt->SCp.have_data_in = pci_map_single (padapter->pdev, SCpnt->request_buffer,
                                                                          SCpnt->request_bufflen,
-                                                                         scsi_to_pci_dma_dir (SCpnt->sc_data_direction));
+                                                                         SCpnt->sc_data_direction);
                                }
                        outl (SCpnt->SCp.have_data_in, padapter->mb2);
                        outl (SCpnt->request_bufflen, padapter->mb3);
index f7a247defba6cd69896f4523c8fbb9d4a14de993..48fdd406c07572d5338a58e94092ea79717485c0 100644 (file)
@@ -1,7 +1,7 @@
 EXTRA_CFLAGS += -DUNIQUE_FW_NAME
 
 qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
-               qla_dbg.o qla_sup.o qla_rscn.o
+               qla_dbg.o qla_sup.o qla_rscn.o qla_attr.o
 
 qla2100-y := ql2100.o ql2100_fw.o
 qla2200-y := ql2200.o ql2200_fw.o
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
new file mode 100644 (file)
index 0000000..2240a0c
--- /dev/null
@@ -0,0 +1,338 @@
+/*
+ *                  QLOGIC LINUX SOFTWARE
+ *
+ * QLogic ISP2x00 device driver for Linux 2.6.x
+ * Copyright (C) 2003-2005 QLogic Corporation
+ * (www.qlogic.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ */
+#include "qla_def.h"
+
+#include <linux/vmalloc.h>
+#include <scsi/scsi_transport_fc.h>
+
+/* SYSFS attributes --------------------------------------------------------- */
+
+static ssize_t
+qla2x00_sysfs_read_fw_dump(struct kobject *kobj, char *buf, loff_t off,
+    size_t count)
+{
+       struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
+           struct device, kobj)));
+
+       if (ha->fw_dump_reading == 0)
+               return 0;
+       if (off > ha->fw_dump_buffer_len)
+               return 0;
+       if (off + count > ha->fw_dump_buffer_len)
+               count = ha->fw_dump_buffer_len - off;
+
+       memcpy(buf, &ha->fw_dump_buffer[off], count);
+
+       return (count);
+}
+
+static ssize_t
+qla2x00_sysfs_write_fw_dump(struct kobject *kobj, char *buf, loff_t off,
+    size_t count)
+{
+       struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
+           struct device, kobj)));
+       int reading;
+       uint32_t dump_size;
+
+       if (off != 0)
+               return (0);
+
+       reading = simple_strtol(buf, NULL, 10);
+       switch (reading) {
+       case 0:
+               if (ha->fw_dump_reading == 1) {
+                       qla_printk(KERN_INFO, ha,
+                           "Firmware dump cleared on (%ld).\n",
+                           ha->host_no);
+
+                       vfree(ha->fw_dump_buffer);
+                       free_pages((unsigned long)ha->fw_dump,
+                           ha->fw_dump_order);
+
+                       ha->fw_dump_reading = 0;
+                       ha->fw_dump_buffer = NULL;
+                       ha->fw_dump = NULL;
+               }
+               break;
+       case 1:
+               if (ha->fw_dump != NULL && !ha->fw_dump_reading) {
+                       ha->fw_dump_reading = 1;
+
+                       dump_size = FW_DUMP_SIZE_1M;
+                       if (ha->fw_memory_size < 0x20000) 
+                               dump_size = FW_DUMP_SIZE_128K;
+                       else if (ha->fw_memory_size < 0x80000) 
+                               dump_size = FW_DUMP_SIZE_512K;
+                       ha->fw_dump_buffer = (char *)vmalloc(dump_size);
+                       if (ha->fw_dump_buffer == NULL) {
+                               qla_printk(KERN_WARNING, ha,
+                                   "Unable to allocate memory for firmware "
+                                   "dump buffer (%d).\n", dump_size);
+
+                               ha->fw_dump_reading = 0;
+                               return (count);
+                       }
+                       qla_printk(KERN_INFO, ha,
+                           "Firmware dump ready for read on (%ld).\n",
+                           ha->host_no);
+                       memset(ha->fw_dump_buffer, 0, dump_size);
+                       if (IS_QLA2100(ha) || IS_QLA2200(ha))
+                               qla2100_ascii_fw_dump(ha);
+                       else
+                               qla2300_ascii_fw_dump(ha);
+                       ha->fw_dump_buffer_len = strlen(ha->fw_dump_buffer);
+               }
+               break;
+       }
+       return (count);
+}
+
+static struct bin_attribute sysfs_fw_dump_attr = {
+       .attr = {
+               .name = "fw_dump",
+               .mode = S_IRUSR | S_IWUSR,
+               .owner = THIS_MODULE,
+       },
+       .size = 0,
+       .read = qla2x00_sysfs_read_fw_dump,
+       .write = qla2x00_sysfs_write_fw_dump,
+};
+
+static ssize_t
+qla2x00_sysfs_read_nvram(struct kobject *kobj, char *buf, loff_t off,
+    size_t count)
+{
+       struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
+           struct device, kobj)));
+       uint16_t        *witer;
+       unsigned long   flags;
+       uint16_t        cnt;
+
+       if (!capable(CAP_SYS_ADMIN) || off != 0 || count != sizeof(nvram_t))
+               return 0;
+
+       /* Read NVRAM. */
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       qla2x00_lock_nvram_access(ha);
+       witer = (uint16_t *)buf;
+       for (cnt = 0; cnt < count / 2; cnt++) {
+               *witer = cpu_to_le16(qla2x00_get_nvram_word(ha,
+                   cnt+ha->nvram_base));
+               witer++;
+       }
+       qla2x00_unlock_nvram_access(ha);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       return (count);
+}
+
+static ssize_t
+qla2x00_sysfs_write_nvram(struct kobject *kobj, char *buf, loff_t off,
+    size_t count)
+{
+       struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
+           struct device, kobj)));
+       uint8_t         *iter;
+       uint16_t        *witer;
+       unsigned long   flags;
+       uint16_t        cnt;
+       uint8_t         chksum;
+
+       if (!capable(CAP_SYS_ADMIN) || off != 0 || count != sizeof(nvram_t))
+               return 0;
+
+       /* Checksum NVRAM. */
+       iter = (uint8_t *)buf;
+       chksum = 0;
+       for (cnt = 0; cnt < count - 1; cnt++)
+               chksum += *iter++;
+       chksum = ~chksum + 1;
+       *iter = chksum;
+
+       /* Write NVRAM. */
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       qla2x00_lock_nvram_access(ha);
+       qla2x00_release_nvram_protection(ha);
+       witer = (uint16_t *)buf;
+       for (cnt = 0; cnt < count / 2; cnt++) {
+               qla2x00_write_nvram_word(ha, cnt+ha->nvram_base,
+                   cpu_to_le16(*witer));
+               witer++;
+       }
+       qla2x00_unlock_nvram_access(ha);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       return (count);
+}
+
+static struct bin_attribute sysfs_nvram_attr = {
+       .attr = {
+               .name = "nvram",
+               .mode = S_IRUSR | S_IWUSR,
+               .owner = THIS_MODULE,
+       },
+       .size = sizeof(nvram_t),
+       .read = qla2x00_sysfs_read_nvram,
+       .write = qla2x00_sysfs_write_nvram,
+};
+
+void
+qla2x00_alloc_sysfs_attr(scsi_qla_host_t *ha)
+{
+       struct Scsi_Host *host = ha->host;
+
+       sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_fw_dump_attr);
+       sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_nvram_attr);
+}
+
+void
+qla2x00_free_sysfs_attr(scsi_qla_host_t *ha)
+{
+       struct Scsi_Host *host = ha->host;
+
+       sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_fw_dump_attr);
+       sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_nvram_attr);
+}
+
+/* Host attributes. */
+
+static void
+qla2x00_get_host_port_id(struct Scsi_Host *shost)
+{
+       scsi_qla_host_t *ha = to_qla_host(shost);
+
+       fc_host_port_id(shost) = ha->d_id.b.domain << 16 |
+           ha->d_id.b.area << 8 | ha->d_id.b.al_pa;
+}
+
+static void
+qla2x00_get_starget_node_name(struct scsi_target *starget)
+{
+       struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
+       scsi_qla_host_t *ha = to_qla_host(host);
+       fc_port_t *fcport;
+       uint64_t node_name = 0;
+
+       list_for_each_entry(fcport, &ha->fcports, list) {
+               if (starget->id == fcport->os_target_id) {
+                       node_name = *(uint64_t *)fcport->node_name;
+                       break;
+               }
+       }
+
+       fc_starget_node_name(starget) = be64_to_cpu(node_name);
+}
+
+static void
+qla2x00_get_starget_port_name(struct scsi_target *starget)
+{
+       struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
+       scsi_qla_host_t *ha = to_qla_host(host);
+       fc_port_t *fcport;
+       uint64_t port_name = 0;
+
+       list_for_each_entry(fcport, &ha->fcports, list) {
+               if (starget->id == fcport->os_target_id) {
+                       port_name = *(uint64_t *)fcport->port_name;
+                       break;
+               }
+       }
+
+       fc_starget_port_name(starget) = be64_to_cpu(port_name);
+}
+
+static void
+qla2x00_get_starget_port_id(struct scsi_target *starget)
+{
+       struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
+       scsi_qla_host_t *ha = to_qla_host(host);
+       fc_port_t *fcport;
+       uint32_t port_id = ~0U;
+
+       list_for_each_entry(fcport, &ha->fcports, list) {
+               if (starget->id == fcport->os_target_id) {
+                       port_id = fcport->d_id.b.domain << 16 |
+                           fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
+                       break;
+               }
+       }
+
+       fc_starget_port_id(starget) = port_id;
+}
+
+static void
+qla2x00_get_rport_loss_tmo(struct fc_rport *rport)
+{
+       struct Scsi_Host *host = rport_to_shost(rport);
+       scsi_qla_host_t *ha = to_qla_host(host);
+
+       rport->dev_loss_tmo = ha->port_down_retry_count + 5;
+}
+
+static void
+qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
+{
+       struct Scsi_Host *host = rport_to_shost(rport);
+       scsi_qla_host_t *ha = to_qla_host(host);
+
+       if (timeout)
+               ha->port_down_retry_count = timeout;
+       else
+               ha->port_down_retry_count = 1;
+
+       rport->dev_loss_tmo = ha->port_down_retry_count + 5;
+}
+
+static struct fc_function_template qla2xxx_transport_functions = {
+
+       .show_host_node_name = 1,
+       .show_host_port_name = 1,
+       .get_host_port_id = qla2x00_get_host_port_id,
+       .show_host_port_id = 1,
+
+       .dd_fcrport_size = sizeof(struct fc_port *),
+
+       .get_starget_node_name = qla2x00_get_starget_node_name,
+       .show_starget_node_name = 1,
+       .get_starget_port_name = qla2x00_get_starget_port_name,
+       .show_starget_port_name = 1,
+       .get_starget_port_id  = qla2x00_get_starget_port_id,
+       .show_starget_port_id = 1,
+
+       .get_rport_dev_loss_tmo = qla2x00_get_rport_loss_tmo,
+       .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
+       .show_rport_dev_loss_tmo = 1,
+
+};
+
+struct scsi_transport_template *
+qla2x00_alloc_transport_tmpl(void)
+{
+       return (fc_attach_transport(&qla2xxx_transport_functions));
+}
+
+void
+qla2x00_init_host_attr(scsi_qla_host_t *ha)
+{
+       fc_host_node_name(ha->host) =
+           be64_to_cpu(*(uint64_t *)ha->init_cb->node_name);
+       fc_host_port_name(ha->host) =
+           be64_to_cpu(*(uint64_t *)ha->init_cb->port_name);
+}
index 0e8ebbc56e81c67affd7a6f996490db57cf33cd0..c4cd4ac414c4f1982ede841a50f54948a478ae65 100644 (file)
@@ -1065,11 +1065,6 @@ qla2x00_print_scsi_cmd(struct scsi_cmnd * cmd)
        printk("  sp flags=0x%x\n", sp->flags);
        printk("  r_start=0x%lx, u_start=0x%lx, f_start=0x%lx, state=%d\n",
            sp->r_start, sp->u_start, sp->f_start, sp->state);
-
-       printk(" e_start= 0x%lx, ext_history=%d, fo retry=%d, loopid=%x, "
-           "port path=%d\n", sp->e_start, sp->ext_history, sp->fo_retry_cnt,
-           sp->lun_queue->fclun->fcport->loop_id,
-           sp->lun_queue->fclun->fcport->cur_path);
 }
 
 #if defined(QL_DEBUG_ROUTINES)
index 36ae03173a5e544e5a912f1ba562acc7841f71c0..7d47b8d9204716ad22e7c0d2a2263e45884b3c1c 100644 (file)
 /*
  * Timeout timer counts in seconds
  */
-#define PORT_RETRY_TIME                        2
+#define PORT_RETRY_TIME                        1
 #define LOOP_DOWN_TIMEOUT              60
 #define LOOP_DOWN_TIME                 255     /* 240 */
 #define        LOOP_DOWN_RESET                 (LOOP_DOWN_TIME - 30)
@@ -241,6 +241,7 @@ typedef struct srb {
        struct list_head list;
 
        struct scsi_qla_host *ha;       /* HA the SP is queued on */
+       struct fc_port *fcport;
 
        struct scsi_cmnd *cmd;          /* Linux SCSI command pkt */
 
@@ -251,11 +252,6 @@ typedef struct srb {
        /* Request state */
        uint16_t state;
 
-       /* Target/LUN queue pointers. */
-       struct os_tgt *tgt_queue;       /* ptr to visible ha's target */
-       struct os_lun *lun_queue;       /* ptr to visible ha's lun */
-       struct fc_lun *fclun;           /* FC LUN context pointer. */
-
        /* Timing counts. */
        unsigned long e_start;          /* Start of extend timeout */
        unsigned long r_start;          /* Start of request */
@@ -1602,73 +1598,6 @@ typedef struct {
        rpt_lun_lst_t list;
 } rpt_lun_cmd_rsp_t;
 
-/*
- * SCSI Target Queue structure
- */
-typedef struct os_tgt {
-       struct os_lun *olun[MAX_LUNS]; /* LUN context pointer. */
-       struct fc_port *fcport;
-       unsigned long flags;
-       uint8_t port_down_retry_count;
-       uint32_t down_timer;
-       struct scsi_qla_host *ha;
-
-       /* Persistent binding information */
-       port_id_t d_id;
-       uint8_t node_name[WWN_SIZE];
-       uint8_t port_name[WWN_SIZE];
-} os_tgt_t;
-
-/*
- * SCSI Target Queue flags
- */
-#define TQF_ONLINE             0               /* Device online to OS. */
-#define TQF_SUSPENDED          1
-#define TQF_RETRY_CMDS         2
-
-/*
- * SCSI LUN Queue structure
- */
-typedef struct os_lun {
-       struct fc_lun *fclun;           /* FC LUN context pointer. */
-       spinlock_t q_lock;              /* Lun Lock */
-
-       unsigned long q_flag;
-#define LUN_MPIO_RESET_CNTS    1       /* Lun */
-#define LUN_MPIO_BUSY          2       /* Lun is changing paths  */
-#define LUN_EXEC_DELAYED       7       /* Lun execution is delayed */
-
-       u_long q_timeout;               /* total command timeouts */
-       atomic_t q_timer;               /* suspend timer */
-       uint32_t q_count;               /* current count */
-       uint32_t q_max;                 /* maxmum count lun can be suspended */
-       uint8_t q_state;                /* lun State */
-#define LUN_STATE_READY                1       /* lun is ready for i/o */
-#define LUN_STATE_RUN          2       /* lun has a timer running */
-#define LUN_STATE_WAIT         3       /* lun is suspended */
-#define LUN_STATE_TIMEOUT      4       /* lun has timed out */
-
-       u_long io_cnt;                  /* total xfer count since boot */
-       u_long out_cnt;                 /* total outstanding IO count */
-       u_long w_cnt;                   /* total writes */
-       u_long r_cnt;                   /* total reads */
-       u_long avg_time;                /*  */
-} os_lun_t;
-
-
-/* LUN BitMask structure definition, array of 32bit words,
- * 1 bit per lun.  When bit == 1, the lun is masked.
- * Most significant bit of mask[0] is lun 0, bit 24 is lun 7.
- */
-typedef struct lun_bit_mask {
-       /* Must allocate at least enough bits to accomodate all LUNs */
-#if ((MAX_FIBRE_LUNS & 0x7) == 0)
-       uint8_t mask[MAX_FIBRE_LUNS >> 3];
-#else
-       uint8_t mask[(MAX_FIBRE_LUNS + 8) >> 3];
-#endif
-} lun_bit_mask_t;
-
 /*
  * Fibre channel port type.
  */
@@ -1686,8 +1615,6 @@ typedef struct lun_bit_mask {
  */
 typedef struct fc_port {
        struct list_head list;
-       struct list_head fcluns;
-
        struct scsi_qla_host *ha;
        struct scsi_qla_host *vis_ha;   /* only used when suspending lun */
 
@@ -1702,8 +1629,7 @@ typedef struct fc_port {
        atomic_t state;
        uint32_t flags;
 
-       os_tgt_t *tgt_queue;
-       uint16_t os_target_id;
+       unsigned int os_target_id;
 
        uint16_t iodesc_idx_sent;
 
@@ -1717,7 +1643,7 @@ typedef struct fc_port {
        uint8_t mp_byte;                /* multi-path byte (not used) */
        uint8_t cur_path;               /* current path id */
 
-       lun_bit_mask_t lun_mask;
+       struct fc_rport *rport;
 } fc_port_t;
 
 /*
@@ -1763,25 +1689,6 @@ typedef struct fc_port {
 /* No loop ID flag. */
 #define FC_NO_LOOP_ID          0x1000
 
-/*
- * Fibre channel LUN structure.
- */
-typedef struct fc_lun {
-        struct list_head list;
-
-       fc_port_t *fcport;
-       fc_port_t *o_fcport;
-       uint16_t lun;
-       atomic_t state;
-       uint8_t device_type;
-
-       uint8_t max_path_retries;
-       uint32_t flags;
-} fc_lun_t;
-
-#define        FLF_VISIBLE_LUN         BIT_0
-#define        FLF_ACTIVE_LUN          BIT_1
-
 /*
  * FC-CT interface
  *
@@ -2175,27 +2082,6 @@ typedef struct scsi_qla_host {
        uint32_t        current_outstanding_cmd; 
        srb_t           *status_srb;    /* Status continuation entry. */
 
-       /*
-        * Need to hold the list_lock with irq's disabled in order to access
-        * the following list.
-        *
-        * This list_lock is of lower priority than the host_lock.
-        */
-       spinlock_t              list_lock ____cacheline_aligned;
-                                               /* lock to guard lists which
-                                                * hold srb_t's */
-        struct list_head        retry_queue;    /* watchdog queue */
-        struct list_head        done_queue;     /* job on done queue */
-        struct list_head        failover_queue; /* failover list link. */
-       struct list_head        scsi_retry_queue;     /* SCSI retry queue */
-       struct list_head        pending_queue;  /* SCSI command pending queue */
-
-       unsigned long    done_q_cnt;
-       unsigned long    pending_in_q;
-        uint32_t       retry_q_cnt; 
-        uint32_t       scsi_retry_q_cnt; 
-        uint32_t       failover_cnt; 
-
        unsigned long   last_irq_cpu;   /* cpu where we got our last irq */
 
        uint16_t           revision;
@@ -2273,9 +2159,6 @@ typedef struct scsi_qla_host {
        struct io_descriptor    io_descriptors[MAX_IO_DESCRIPTORS];
        uint16_t                iodesc_signature;
 
-       /* OS target queue pointers. */
-       os_tgt_t                *otgt[MAX_FIBRE_DEVICES];
-
        /* RSCN queue. */
        uint32_t rscn_queue[MAX_RSCN_COUNT];
        uint8_t rscn_in_ptr;
@@ -2420,8 +2303,6 @@ typedef struct scsi_qla_host {
 #define LOOP_RDY(ha)   (!LOOP_NOT_READY(ha))
 
 #define TGT_Q(ha, t) (ha->otgt[t])
-#define LUN_Q(ha, t, l)        (TGT_Q(ha, t)->olun[l])
-#define GET_LU_Q(ha, t, l) ((TGT_Q(ha,t) != NULL)? TGT_Q(ha, t)->olun[l] : NULL)
 
 #define to_qla_host(x)         ((scsi_qla_host_t *) (x)->hostdata)
 
@@ -2479,7 +2360,6 @@ struct _qla2x00stats  {
 #include "qla_gbl.h"
 #include "qla_dbg.h"
 #include "qla_inline.h"
-#include "qla_listops.h"
 
 /*
 * String arrays
index 5adf2af7ba64daa3b6f1193570c67cf4b12b09b7..e4bfe4d5bbe4b6ed206b7365fdc286a3591a7488 100644 (file)
@@ -24,6 +24,7 @@
 #define        __QLA_GBL_H
 
 #include <linux/interrupt.h>
+#include <scsi/scsi_transport.h>
 
 extern void qla2x00_remove_one(struct pci_dev *);
 extern int qla2x00_probe_one(struct pci_dev *, struct qla_board_info *);
@@ -44,10 +45,10 @@ extern void qla2x00_restart_queues(scsi_qla_host_t *, uint8_t);
 
 extern void qla2x00_rescan_fcports(scsi_qla_host_t *);
 
-extern void qla2x00_tgt_free(scsi_qla_host_t *ha, uint16_t t);
-
 extern int qla2x00_abort_isp(scsi_qla_host_t *);
 
+extern void qla2x00_reg_remote_port(scsi_qla_host_t *, fc_port_t *);
+
 /*
  * Global Data in qla_os.c source file.
  */
@@ -74,25 +75,15 @@ extern int ql2xsuspendcount;
 #if defined(MODULE)
 extern char *ql2xopts;
 #endif
+extern void qla2x00_sp_compl(scsi_qla_host_t *, srb_t *);
 
 extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *);
 
 extern void qla2x00_cmd_timeout(srb_t *);
 
-extern int __qla2x00_suspend_lun(scsi_qla_host_t *, os_lun_t *, int, int, int);
-
-extern void qla2x00_done(scsi_qla_host_t *);
-extern void qla2x00_next(scsi_qla_host_t *);
-extern void qla2x00_flush_failover_q(scsi_qla_host_t *, os_lun_t *);
-extern void qla2x00_reset_lun_fo_counts(scsi_qla_host_t *, os_lun_t *);
-
-extern void qla2x00_extend_timeout(struct scsi_cmnd *, int);
-
 extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int);
 extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *);
 
-extern void qla2x00_abort_queues(scsi_qla_host_t *, uint8_t);
-
 extern void qla2x00_blink_led(scsi_qla_host_t *);
 
 extern int qla2x00_down_timeout(struct semaphore *, unsigned long);
@@ -150,7 +141,7 @@ qla2x00_abort_target(fc_port_t *fcport);
 #endif
 
 extern int
-qla2x00_target_reset(scsi_qla_host_t *, uint16_t, uint16_t);
+qla2x00_target_reset(scsi_qla_host_t *, struct fc_port *);
 
 extern int
 qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *,
@@ -254,4 +245,13 @@ extern void qla2x00_cancel_io_descriptors(scsi_qla_host_t *);
 #define qla2x00_alloc_ioctl_mem(ha)            (0)
 #define qla2x00_free_ioctl_mem(ha)             do { } while (0)
 
+/*
+ * Global Function Prototypes in qla_attr.c source file.
+ */
+extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
+extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
+extern struct scsi_transport_template *qla2x00_alloc_transport_tmpl(void);
+extern void qla2x00_init_host_attr(scsi_qla_host_t *);
+extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
+extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
 #endif /* _QLA_GBL_H */
index 1ab5d92c3868eb349285faf6aa7e3ceca71ae124..0387005fcb6df08dc1f4b3c1c9d924eb7cfdfb1e 100644 (file)
@@ -19,6 +19,7 @@
 #include "qla_def.h"
 
 #include <linux/delay.h>
+#include <scsi/scsi_transport_fc.h>
 
 #include "qla_devtbl.h"
 
@@ -44,34 +45,17 @@ static int qla2x00_init_rings(scsi_qla_host_t *);
 static int qla2x00_fw_ready(scsi_qla_host_t *);
 static int qla2x00_configure_hba(scsi_qla_host_t *);
 static int qla2x00_nvram_config(scsi_qla_host_t *);
-static void qla2x00_init_tgt_map(scsi_qla_host_t *);
 static int qla2x00_configure_loop(scsi_qla_host_t *);
 static int qla2x00_configure_local_loop(scsi_qla_host_t *);
 static void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *);
-static void qla2x00_lun_discovery(scsi_qla_host_t *, fc_port_t *);
-static int qla2x00_rpt_lun_discovery(scsi_qla_host_t *, fc_port_t *,
-    inq_cmd_rsp_t *, dma_addr_t);
-static int qla2x00_report_lun(scsi_qla_host_t *, fc_port_t *);
-static fc_lun_t *qla2x00_cfg_lun(scsi_qla_host_t *, fc_port_t *, uint16_t,
-    inq_cmd_rsp_t *, dma_addr_t);
-static fc_lun_t * qla2x00_add_lun(fc_port_t *, uint16_t);
-static int qla2x00_inquiry(scsi_qla_host_t *, fc_port_t *, uint16_t,
-    inq_cmd_rsp_t *, dma_addr_t);
 static int qla2x00_configure_fabric(scsi_qla_host_t *);
 static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *, struct list_head *);
 static int qla2x00_device_resync(scsi_qla_host_t *);
 static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *,
     uint16_t *);
-static void qla2x00_config_os(scsi_qla_host_t *ha);
-static uint16_t qla2x00_fcport_bind(scsi_qla_host_t *ha, fc_port_t *fcport);
-static os_lun_t * qla2x00_fclun_bind(scsi_qla_host_t *, fc_port_t *,
-    fc_lun_t *);
-static void qla2x00_lun_free(scsi_qla_host_t *, uint16_t, uint16_t);
 
 static int qla2x00_restart_isp(scsi_qla_host_t *);
 static void qla2x00_reset_adapter(scsi_qla_host_t *);
-static os_tgt_t *qla2x00_tgt_alloc(scsi_qla_host_t *, uint16_t);
-static os_lun_t *qla2x00_lun_alloc(scsi_qla_host_t *, uint16_t, uint16_t);
 
 /****************************************************************************/
 /*                QLogic ISP2x00 Hardware Support Functions.                */
@@ -119,9 +103,6 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
 
        qla2x00_reset_chip(ha);
 
-       /* Initialize target map database. */
-       qla2x00_init_tgt_map(ha);
-
        qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n");
        qla2x00_nvram_config(ha);
 
@@ -1529,25 +1510,6 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
        return (rval);
 }
 
-/*
-* qla2x00_init_tgt_map
-*      Initializes target map.
-*
-* Input:
-*      ha = adapter block pointer.
-*
-* Output:
-*      TGT_Q initialized
-*/
-static void
-qla2x00_init_tgt_map(scsi_qla_host_t *ha)
-{
-       uint32_t t;
-
-       for (t = 0; t < MAX_TARGETS; t++)
-               TGT_Q(ha, t) = (os_tgt_t *)NULL;
-}
-
 /**
  * qla2x00_alloc_fcport() - Allocate a generic fcport.
  * @ha: HA context
@@ -1572,7 +1534,6 @@ qla2x00_alloc_fcport(scsi_qla_host_t *ha, int flags)
        fcport->iodesc_idx_sent = IODESC_INVALID_INDEX;
        atomic_set(&fcport->state, FCS_UNCONFIGURED);
        fcport->flags = FCF_RLC_SUPPORT;
-       INIT_LIST_HEAD(&fcport->fcluns);
 
        return (fcport);
 }
@@ -1662,7 +1623,6 @@ qla2x00_configure_loop(scsi_qla_host_t *ha)
                    test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) {
                        rval = QLA_FUNCTION_FAILED;
                } else {
-                       qla2x00_config_os(ha);
                        atomic_set(&ha->loop_state, LOOP_READY);
 
                        DEBUG(printk("scsi(%ld): LOOP READY\n", ha->host_no));
@@ -1907,8 +1867,11 @@ qla2x00_update_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
        if (fcport->flags & FCF_TAPE_PRESENT) {
                spin_lock_irqsave(&ha->hardware_lock, flags);
                for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+                       fc_port_t *sfcp;
+
                        if ((sp = ha->outstanding_cmds[index]) != 0) {
-                               if (sp->fclun->fcport == fcport) {
+                               sfcp = sp->fcport;
+                               if (sfcp == fcport) {
                                        atomic_set(&fcport->state, FCS_ONLINE);
                                        spin_unlock_irqrestore(
                                            &ha->hardware_lock, flags);
@@ -1919,423 +1882,48 @@ qla2x00_update_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
                spin_unlock_irqrestore(&ha->hardware_lock, flags);
        }
 
-       /* Do LUN discovery. */
        if (fcport->port_type == FCT_INITIATOR ||
-           fcport->port_type == FCT_BROADCAST) {
+           fcport->port_type == FCT_BROADCAST)
                fcport->device_type = TYPE_PROCESSOR;
-       } else {
-               qla2x00_lun_discovery(ha, fcport);
-       }
-       atomic_set(&fcport->state, FCS_ONLINE);
-}
 
-/*
- * qla2x00_lun_discovery
- *     Issue SCSI inquiry command for LUN discovery.
- *
- * Input:
- *     ha:             adapter state pointer.
- *     fcport:         FC port structure pointer.
- *
- * Context:
- *     Kernel context.
- */
-static void
-qla2x00_lun_discovery(scsi_qla_host_t *ha, fc_port_t *fcport)
-{
-       inq_cmd_rsp_t   *inq;
-       dma_addr_t      inq_dma;
-       uint16_t        lun;
-
-       inq = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &inq_dma);
-       if (inq == NULL) {
-               qla_printk(KERN_WARNING, ha,
-                   "Memory Allocation failed - INQ\n");
-               return;
-       }
-
-       /* Always add a fc_lun_t structure for lun 0 -- mid-layer requirement */
-       qla2x00_add_lun(fcport, 0);
-
-       /* If report LUN works, exit. */
-       if (qla2x00_rpt_lun_discovery(ha, fcport, inq, inq_dma) !=
-           QLA_SUCCESS) {
-               for (lun = 0; lun < ha->max_probe_luns; lun++) {
-                       /* Configure LUN. */
-                       qla2x00_cfg_lun(ha, fcport, lun, inq, inq_dma);
-               }
-       }
-
-       dma_pool_free(ha->s_dma_pool, inq, inq_dma);
-}
-
-/*
- * qla2x00_rpt_lun_discovery
- *     Issue SCSI report LUN command for LUN discovery.
- *
- * Input:
- *     ha:             adapter state pointer.
- *     fcport:         FC port structure pointer.
- *
- * Returns:
- *     qla2x00 local function return status code.
- *
- * Context:
- *     Kernel context.
- */
-static int
-qla2x00_rpt_lun_discovery(scsi_qla_host_t *ha, fc_port_t *fcport,
-    inq_cmd_rsp_t *inq, dma_addr_t inq_dma)
-{
-       int                     rval;
-       uint32_t                len, cnt;
-       uint16_t                lun;
-
-       /* Assume a failed status */
-       rval = QLA_FUNCTION_FAILED;
-
-       /* No point in continuing if the device doesn't support RLC */
-       if ((fcport->flags & FCF_RLC_SUPPORT) == 0)
-               return (rval);
-
-       rval = qla2x00_report_lun(ha, fcport);
-       if (rval != QLA_SUCCESS)
-               return (rval);
-
-       /* Configure LUN list. */
-       len = be32_to_cpu(ha->rlc_rsp->list.hdr.len);
-       len /= 8;
-       for (cnt = 0; cnt < len; cnt++) {
-               lun = CHAR_TO_SHORT(ha->rlc_rsp->list.lst[cnt].lsb,
-                   ha->rlc_rsp->list.lst[cnt].msb.b);
-
-               DEBUG3(printk("scsi(%ld): RLC lun = (%d)\n", ha->host_no, lun));
-
-               /* We only support 0 through MAX_LUNS-1 range */
-               if (lun < MAX_LUNS) {
-                       qla2x00_cfg_lun(ha, fcport, lun, inq, inq_dma);
-               }
-       }
        atomic_set(&fcport->state, FCS_ONLINE);
 
-       return (rval);
-}
-
-/*
- * qla2x00_report_lun
- *     Issue SCSI report LUN command.
- *
- * Input:
- *     ha:             adapter state pointer.
- *     fcport:         FC port structure pointer.
- *
- * Returns:
- *     qla2x00 local function return status code.
- *
- * Context:
- *     Kernel context.
- */
-static int
-qla2x00_report_lun(scsi_qla_host_t *ha, fc_port_t *fcport)
-{
-       int rval;
-       uint16_t retries;
-       uint16_t comp_status;
-       uint16_t scsi_status;
-       rpt_lun_cmd_rsp_t *rlc;
-       dma_addr_t rlc_dma;
-
-       rval = QLA_FUNCTION_FAILED;
-       rlc = ha->rlc_rsp;
-       rlc_dma = ha->rlc_rsp_dma;
-
-       for (retries = 3; retries; retries--) {
-               memset(rlc, 0, sizeof(rpt_lun_cmd_rsp_t));
-               rlc->p.cmd.entry_type = COMMAND_A64_TYPE;
-               rlc->p.cmd.entry_count = 1;
-               SET_TARGET_ID(ha, rlc->p.cmd.target, fcport->loop_id);
-               rlc->p.cmd.control_flags =
-                   __constant_cpu_to_le16(CF_READ | CF_SIMPLE_TAG);
-               rlc->p.cmd.scsi_cdb[0] = REPORT_LUNS;
-               rlc->p.cmd.scsi_cdb[8] = MSB(sizeof(rpt_lun_lst_t));
-               rlc->p.cmd.scsi_cdb[9] = LSB(sizeof(rpt_lun_lst_t));
-               rlc->p.cmd.dseg_count = __constant_cpu_to_le16(1);
-               rlc->p.cmd.timeout = __constant_cpu_to_le16(10);
-               rlc->p.cmd.byte_count =
-                   __constant_cpu_to_le32(sizeof(rpt_lun_lst_t));
-               rlc->p.cmd.dseg_0_address[0] = cpu_to_le32(
-                   LSD(rlc_dma + sizeof(sts_entry_t)));
-               rlc->p.cmd.dseg_0_address[1] = cpu_to_le32(
-                   MSD(rlc_dma + sizeof(sts_entry_t)));
-               rlc->p.cmd.dseg_0_length =
-                   __constant_cpu_to_le32(sizeof(rpt_lun_lst_t));
-
-               rval = qla2x00_issue_iocb(ha, rlc, rlc_dma,
-                   sizeof(rpt_lun_cmd_rsp_t));
-
-               comp_status = le16_to_cpu(rlc->p.rsp.comp_status);
-               scsi_status = le16_to_cpu(rlc->p.rsp.scsi_status);
-
-               if (rval != QLA_SUCCESS || comp_status != CS_COMPLETE ||
-                   scsi_status & SS_CHECK_CONDITION) {
-
-                       /* Device underrun, treat as OK. */
-                       if (rval == QLA_SUCCESS &&
-                           comp_status == CS_DATA_UNDERRUN &&
-                           scsi_status & SS_RESIDUAL_UNDER) {
-
-                               rval = QLA_SUCCESS;
-                               break;
-                       }
-
-                       DEBUG(printk("scsi(%ld): RLC failed to issue iocb! "
-                           "fcport=[%04x/%p] rval=%x cs=%x ss=%x\n",
-                           ha->host_no, fcport->loop_id, fcport, rval,
-                           comp_status, scsi_status));
-
-                       rval = QLA_FUNCTION_FAILED;
-                       if (scsi_status & SS_CHECK_CONDITION) {
-                               DEBUG2(printk("scsi(%ld): RLC "
-                                   "SS_CHECK_CONDITION Sense Data "
-                                   "%02x %02x %02x %02x %02x %02x %02x %02x\n",
-                                   ha->host_no,
-                                   rlc->p.rsp.req_sense_data[0],
-                                   rlc->p.rsp.req_sense_data[1],
-                                   rlc->p.rsp.req_sense_data[2],
-                                   rlc->p.rsp.req_sense_data[3],
-                                   rlc->p.rsp.req_sense_data[4],
-                                   rlc->p.rsp.req_sense_data[5],
-                                   rlc->p.rsp.req_sense_data[6],
-                                   rlc->p.rsp.req_sense_data[7]));
-                               if (rlc->p.rsp.req_sense_data[2] ==
-                                   ILLEGAL_REQUEST) {
-                                       fcport->flags &= ~(FCF_RLC_SUPPORT);
-                                       break;
-                               }
-                       }
-               } else {
-                       break;
-               }
-       }
-
-       return (rval);
+       if (ha->flags.init_done)
+               qla2x00_reg_remote_port(ha, fcport);
 }
 
-/*
- * qla2x00_cfg_lun
- *     Configures LUN into fcport LUN list.
- *
- * Input:
- *     fcport:         FC port structure pointer.
- *     lun:            LUN number.
- *
- * Context:
- *     Kernel context.
- */
-static fc_lun_t *
-qla2x00_cfg_lun(scsi_qla_host_t *ha, fc_port_t *fcport, uint16_t lun,
-    inq_cmd_rsp_t *inq, dma_addr_t inq_dma) 
-{
-       fc_lun_t *fclun;
-       uint8_t   device_type;
-
-       /* Bypass LUNs that failed. */
-       if (qla2x00_inquiry(ha, fcport, lun, inq, inq_dma) != QLA_SUCCESS) {
-               DEBUG2(printk("scsi(%ld): Failed inquiry - loop id=0x%04x "
-                   "lun=%d\n", ha->host_no, fcport->loop_id, lun));
-
-               return (NULL);
-       }
-       device_type = (inq->inq[0] & 0x1f);
-       switch (device_type) {
-       case TYPE_DISK:
-       case TYPE_PROCESSOR:
-       case TYPE_WORM:
-       case TYPE_ROM:
-       case TYPE_SCANNER:
-       case TYPE_MOD:
-       case TYPE_MEDIUM_CHANGER:
-       case TYPE_ENCLOSURE:
-       case 0x20:
-       case 0x0C:
-               break;
-       case TYPE_TAPE:
-               fcport->flags |= FCF_TAPE_PRESENT;
-               break;
-       default:
-               DEBUG2(printk("scsi(%ld): Unsupported lun type -- "
-                   "loop id=0x%04x lun=%d type=%x\n",
-                   ha->host_no, fcport->loop_id, lun, device_type));
-               return (NULL);
-       }
-
-       fcport->device_type = device_type;
-       fclun = qla2x00_add_lun(fcport, lun);
-
-       if (fclun != NULL) {
-               atomic_set(&fcport->state, FCS_ONLINE);
-       }
-
-       return (fclun);
-}
-
-/*
- * qla2x00_add_lun
- *     Adds LUN to database
- *
- * Input:
- *     fcport:         FC port structure pointer.
- *     lun:            LUN number.
- *
- * Context:
- *     Kernel context.
- */
-static fc_lun_t *
-qla2x00_add_lun(fc_port_t *fcport, uint16_t lun)
+void
+qla2x00_reg_remote_port(scsi_qla_host_t *ha, fc_port_t *fcport)
 {
-       int             found;
-       fc_lun_t        *fclun;
+       struct fc_rport_identifiers rport_ids;
+       struct fc_rport *rport;
 
-       if (fcport == NULL) {
-               DEBUG(printk("scsi: Unable to add lun to NULL port\n"));
-               return (NULL);
-       }
-
-       /* Allocate LUN if not already allocated. */
-       found = 0;
-       list_for_each_entry(fclun, &fcport->fcluns, list) {
-               if (fclun->lun == lun) {
-                       found++;
-                       break;
-               }
-       }
-       if (found)
-               return (NULL);
-
-       fclun = kmalloc(sizeof(fc_lun_t), GFP_ATOMIC);
-       if (fclun == NULL) {
-               printk(KERN_WARNING
-                   "%s(): Memory Allocation failed - FCLUN\n",
-                   __func__);
-               return (NULL);
+       if (fcport->rport) {
+               fc_remote_port_unblock(fcport->rport);
+               return;
        }
 
-       /* Setup LUN structure. */
-       memset(fclun, 0, sizeof(fc_lun_t));
-       fclun->lun = lun;
-       fclun->fcport = fcport;
-       fclun->o_fcport = fcport;
-       fclun->device_type = fcport->device_type;
-       atomic_set(&fcport->state, FCS_UNCONFIGURED);
-
-       list_add_tail(&fclun->list, &fcport->fcluns);
-
-       return (fclun);
-}
-
-/*
- * qla2x00_inquiry
- *     Issue SCSI inquiry command.
- *
- * Input:
- *     ha = adapter block pointer.
- *     fcport = FC port structure pointer.
- *
- * Return:
- *     0  - Success
- *  BIT_0 - error
- *
- * Context:
- *     Kernel context.
- */
-static int
-qla2x00_inquiry(scsi_qla_host_t *ha,
-    fc_port_t *fcport, uint16_t lun, inq_cmd_rsp_t *inq, dma_addr_t inq_dma)
-{
-       int rval;
-       uint16_t retries;
-       uint16_t comp_status;
-       uint16_t scsi_status;
-
-       rval = QLA_FUNCTION_FAILED;
+       rport_ids.node_name = be64_to_cpu(*(uint64_t *)fcport->node_name);
+       rport_ids.port_name = be64_to_cpu(*(uint64_t *)fcport->port_name);
+       rport_ids.port_id = fcport->d_id.b.domain << 16 |
+           fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
+       rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
+       if (fcport->port_type == FCT_INITIATOR)
+               rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+       if (fcport->port_type == FCT_TARGET)
+               rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
 
-       for (retries = 3; retries; retries--) {
-               memset(inq, 0, sizeof(inq_cmd_rsp_t));
-               inq->p.cmd.entry_type = COMMAND_A64_TYPE;
-               inq->p.cmd.entry_count = 1;
-               inq->p.cmd.lun = cpu_to_le16(lun);
-               SET_TARGET_ID(ha, inq->p.cmd.target, fcport->loop_id);
-               inq->p.cmd.control_flags =
-                   __constant_cpu_to_le16(CF_READ | CF_SIMPLE_TAG);
-               inq->p.cmd.scsi_cdb[0] = INQUIRY;
-               inq->p.cmd.scsi_cdb[4] = INQ_DATA_SIZE;
-               inq->p.cmd.dseg_count = __constant_cpu_to_le16(1);
-               inq->p.cmd.timeout = __constant_cpu_to_le16(10);
-               inq->p.cmd.byte_count =
-                   __constant_cpu_to_le32(INQ_DATA_SIZE);
-               inq->p.cmd.dseg_0_address[0] = cpu_to_le32(
-                   LSD(inq_dma + sizeof(sts_entry_t)));
-               inq->p.cmd.dseg_0_address[1] = cpu_to_le32(
-                   MSD(inq_dma + sizeof(sts_entry_t)));
-               inq->p.cmd.dseg_0_length =
-                   __constant_cpu_to_le32(INQ_DATA_SIZE);
-
-               DEBUG5(printk("scsi(%ld): Lun Inquiry - fcport=[%04x/%p],"
-                   " lun (%d)\n",
-                   ha->host_no, fcport->loop_id, fcport, lun));
-
-               rval = qla2x00_issue_iocb(ha, inq, inq_dma,
-                   sizeof(inq_cmd_rsp_t));
-
-               comp_status = le16_to_cpu(inq->p.rsp.comp_status);
-               scsi_status = le16_to_cpu(inq->p.rsp.scsi_status);
-
-               DEBUG5(printk("scsi(%ld): lun (%d) inquiry - "
-                   "inq[0]= 0x%x, comp status 0x%x, scsi status 0x%x, "
-                   "rval=%d\n",
-                   ha->host_no, lun, inq->inq[0], comp_status, scsi_status,
-                   rval));
-
-               if (rval != QLA_SUCCESS || comp_status != CS_COMPLETE ||
-                   scsi_status & SS_CHECK_CONDITION) {
-
-                       DEBUG(printk("scsi(%ld): INQ failed to issue iocb! "
-                           "fcport=[%04x/%p] rval=%x cs=%x ss=%x\n",
-                           ha->host_no, fcport->loop_id, fcport, rval,
-                           comp_status, scsi_status));
-
-                       if (rval == QLA_SUCCESS)
-                               rval = QLA_FUNCTION_FAILED;
-
-                       if (scsi_status & SS_CHECK_CONDITION) {
-                               DEBUG2(printk("scsi(%ld): INQ "
-                                   "SS_CHECK_CONDITION Sense Data "
-                                   "%02x %02x %02x %02x %02x %02x %02x %02x\n",
-                                   ha->host_no,
-                                   inq->p.rsp.req_sense_data[0],
-                                   inq->p.rsp.req_sense_data[1],
-                                   inq->p.rsp.req_sense_data[2],
-                                   inq->p.rsp.req_sense_data[3],
-                                   inq->p.rsp.req_sense_data[4],
-                                   inq->p.rsp.req_sense_data[5],
-                                   inq->p.rsp.req_sense_data[6],
-                                   inq->p.rsp.req_sense_data[7]));
-                       }
+       fcport->rport = rport = fc_remote_port_add(ha->host, 0, &rport_ids);
+       if (!rport)
+               qla_printk(KERN_WARNING, ha,
+                   "Unable to allocate fc remote port!\n");
 
-                       /* Device underrun drop LUN. */
-                       if (comp_status == CS_DATA_UNDERRUN &&
-                           scsi_status & SS_RESIDUAL_UNDER) {
-                               break;
-                       }
-               } else {
-                       break;
-               }
-       }
+       if (rport->scsi_target_id != -1 && rport->scsi_target_id < MAX_TARGETS)
+               fcport->os_target_id = rport->scsi_target_id;
 
-       return (rval);
+       rport->dd_data = fcport;
 }
 
-
 /*
  * qla2x00_configure_fabric
  *      Setup SNS devices with loop ID's.
@@ -2486,12 +2074,12 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
                                break;
                        }
 
-                       /* Login and update database */
-                       qla2x00_fabric_dev_login(ha, fcport, &next_loopid);
-
                        /* Remove device from the new list and add it to DB */
                        list_del(&fcport->list);
                        list_add_tail(&fcport->list, &ha->fcports);
+
+                       /* Login and update database */
+                       qla2x00_fabric_dev_login(ha, fcport, &next_loopid);
                }
        } while (0);
 
@@ -2895,8 +2483,7 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
                        if (atomic_read(&fcport->state) == FCS_ONLINE) {
                                if (format != 3 ||
                                    fcport->port_type != FCT_INITIATOR) {
-                                       atomic_set(&fcport->state,
-                                           FCS_DEVICE_LOST);
+                                       qla2x00_mark_device_lost(ha, fcport, 0);
                                }
                        }
                        fcport->flags &= ~FCF_FARP_DONE;
@@ -3146,7 +2733,6 @@ qla2x00_loop_resync(scsi_qla_host_t *ha)
                                wait_time &&
                                (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)));
                }
-               qla2x00_restart_queues(ha, 1);
        }
 
        if (test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) {
@@ -3160,87 +2746,6 @@ qla2x00_loop_resync(scsi_qla_host_t *ha)
        return (rval);
 }
 
-/*
- *  qla2x00_restart_queues
- *     Restart device queues.
- *
- * Input:
- *     ha = adapter block pointer.
- *
- * Context:
- *     Kernel/Interrupt context.
- */
-void
-qla2x00_restart_queues(scsi_qla_host_t *ha, uint8_t flush) 
-{
-       srb_t           *sp;
-       int             retry_q_cnt = 0;
-       int             pending_q_cnt = 0;
-       struct list_head *list, *temp;
-       unsigned long flags = 0;
-
-       clear_bit(RESTART_QUEUES_NEEDED, &ha->dpc_flags);
-
-       /* start pending queue */
-       pending_q_cnt = ha->qthreads;
-       if (flush) {
-               spin_lock_irqsave(&ha->list_lock,flags);
-               list_for_each_safe(list, temp, &ha->pending_queue) {
-                       sp = list_entry(list, srb_t, list);
-
-                       if ((sp->flags & SRB_TAPE))
-                               continue;
-                        
-                       /* 
-                        * When time expire return request back to OS as BUSY 
-                        */
-                       __del_from_pending_queue(ha, sp);
-                       sp->cmd->result = DID_BUS_BUSY << 16;
-                       sp->cmd->host_scribble = (unsigned char *)NULL;
-                       __add_to_done_queue(ha, sp);
-               }
-               spin_unlock_irqrestore(&ha->list_lock, flags);
-       } else {
-               if (!list_empty(&ha->pending_queue))
-                       qla2x00_next(ha);
-       }
-
-       /*
-        * Clear out our retry queue
-        */
-       if (flush) {
-               spin_lock_irqsave(&ha->list_lock, flags);
-               retry_q_cnt = ha->retry_q_cnt;
-               list_for_each_safe(list, temp, &ha->retry_queue) {
-                       sp = list_entry(list, srb_t, list);
-                       /* when time expire return request back to OS as BUSY */
-                       __del_from_retry_queue(ha, sp);
-                       sp->cmd->result = DID_BUS_BUSY << 16;
-                       sp->cmd->host_scribble = (unsigned char *)NULL;
-                       __add_to_done_queue(ha, sp);
-               }
-               spin_unlock_irqrestore(&ha->list_lock, flags);
-
-               DEBUG2(printk("%s(%ld): callback %d commands.\n",
-                               __func__,
-                               ha->host_no,
-                               retry_q_cnt);)
-       }
-
-       DEBUG2(printk("%s(%ld): active=%ld, retry=%d, pending=%d, "
-                       "done=%ld, scsi retry=%d commands.\n",
-                       __func__,
-                       ha->host_no,
-                       ha->actthreads,
-                       ha->retry_q_cnt,
-                       pending_q_cnt,
-                       ha->done_q_cnt,
-                       ha->scsi_retry_q_cnt);)
-
-       if (!list_empty(&ha->done_queue))
-               qla2x00_done(ha);
-}
-
 void
 qla2x00_rescan_fcports(scsi_qla_host_t *ha)
 {
@@ -3258,396 +2763,6 @@ qla2x00_rescan_fcports(scsi_qla_host_t *ha)
                rescan_done = 1;
        }
        qla2x00_probe_for_all_luns(ha); 
-
-       /* Update OS target and lun structures if necessary. */
-       if (rescan_done) {
-               qla2x00_config_os(ha);
-       }
-}
-
-
-/*
- * qla2x00_config_os
- *     Setup OS target and LUN structures.
- *
- * Input:
- *     ha = adapter state pointer.
- *
- * Context:
- *     Kernel context.
- */
-static void
-qla2x00_config_os(scsi_qla_host_t *ha) 
-{
-       fc_port_t       *fcport;
-       fc_lun_t        *fclun;
-       os_tgt_t        *tq;
-       uint16_t        tgt;
-
-
-       for (tgt = 0; tgt < MAX_TARGETS; tgt++) {
-               if ((tq = TGT_Q(ha, tgt)) == NULL)
-                       continue;
-
-               clear_bit(TQF_ONLINE, &tq->flags);
-       }
-
-       list_for_each_entry(fcport, &ha->fcports, list) {
-               if (atomic_read(&fcport->state) != FCS_ONLINE ||
-                   fcport->port_type == FCT_INITIATOR ||
-                   fcport->port_type == FCT_BROADCAST) {
-                       fcport->os_target_id = MAX_TARGETS;
-                       continue;
-               }
-
-               if (fcport->flags & FCF_FO_MASKED) {
-                       continue;
-               }
-
-               /* Bind FC port to OS target number. */
-               if (qla2x00_fcport_bind(ha, fcport) == MAX_TARGETS) {
-                       continue;
-               }
-
-               /* Bind FC LUN to OS LUN number. */
-               list_for_each_entry(fclun, &fcport->fcluns, list) {
-                       qla2x00_fclun_bind(ha, fcport, fclun);
-               }
-       }
-}
-
-/*
- * qla2x00_fcport_bind
- *     Locates a target number for FC port.
- *
- * Input:
- *     ha = adapter state pointer.
- *     fcport = FC port structure pointer.
- *
- * Returns:
- *     target number
- *
- * Context:
- *     Kernel context.
- */
-static uint16_t
-qla2x00_fcport_bind(scsi_qla_host_t *ha, fc_port_t *fcport) 
-{
-       int             found;
-       uint16_t        tgt;
-       os_tgt_t        *tq;
-
-       /* Check for persistent binding. */
-       for (tgt = 0; tgt < MAX_TARGETS; tgt++) {
-               if ((tq = TGT_Q(ha, tgt)) == NULL)
-                       continue;
-
-               found = 0;
-               switch (ha->binding_type) {
-               case BIND_BY_PORT_ID:
-                       if (fcport->d_id.b24 == tq->d_id.b24) {
-                               memcpy(tq->node_name, fcport->node_name,
-                                   WWN_SIZE);
-                               memcpy(tq->port_name, fcport->port_name,
-                                   WWN_SIZE);
-                               found++;
-                       }
-                       break;
-               case BIND_BY_PORT_NAME:    
-                       if (memcmp(fcport->port_name, tq->port_name,
-                           WWN_SIZE) == 0) {
-                               /*
-                                * In case of persistent binding, update the
-                                * WWNN.
-                                */
-                               memcpy(tq->node_name, fcport->node_name,
-                                   WWN_SIZE);
-                               found++;
-                       }
-                       break;
-               }
-               if (found)
-                   break;      
-       }
-
-       /* TODO: honor the ConfigRequired flag */
-       if (tgt == MAX_TARGETS) {
-               /* Check if targetID 0 available. */
-               tgt = 0;
-
-               if (TGT_Q(ha, tgt) != NULL) {
-                       /* Locate first free target for device. */
-                       for (tgt = 0; tgt < MAX_TARGETS; tgt++) {
-                               if (TGT_Q(ha, tgt) == NULL) {
-                                       break;
-                               }
-                       }
-               }
-               if (tgt != MAX_TARGETS) {
-                       if ((tq = qla2x00_tgt_alloc(ha, tgt)) != NULL) {
-                               memcpy(tq->node_name, fcport->node_name,
-                                   WWN_SIZE);
-                               memcpy(tq->port_name, fcport->port_name,
-                                   WWN_SIZE);
-                               tq->d_id.b24 = fcport->d_id.b24;
-                       }
-               }
-       }
-
-       /* Reset target numbers incase it changed. */
-       fcport->os_target_id = tgt;
-       if (tgt != MAX_TARGETS && tq != NULL) {
-               DEBUG2(printk("scsi(%ld): Assigning target ID=%02d @ %p to "
-                   "loop id=0x%04x, port state=0x%x, port down retry=%d\n",
-                   ha->host_no, tgt, tq, fcport->loop_id,
-                   atomic_read(&fcport->state),
-                   atomic_read(&fcport->port_down_timer)));
-
-               fcport->tgt_queue = tq;
-               fcport->flags |= FCF_PERSISTENT_BOUND;
-               tq->fcport = fcport;
-               set_bit(TQF_ONLINE, &tq->flags);
-               tq->port_down_retry_count = ha->port_down_retry_count;
-       }
-
-       if (tgt == MAX_TARGETS) {
-               qla_printk(KERN_WARNING, ha,
-                   "Unable to bind fcport, loop_id=%x\n", fcport->loop_id);
-       }
-
-       return (tgt);
-}
-
-/*
- * qla2x00_fclun_bind
- *     Binds all FC device LUNS to OS LUNS.
- *
- * Input:
- *     ha:             adapter state pointer.
- *     fcport:         FC port structure pointer.
- *
- * Returns:
- *     target number
- *
- * Context:
- *     Kernel context.
- */
-static os_lun_t *
-qla2x00_fclun_bind(scsi_qla_host_t *ha, fc_port_t *fcport, fc_lun_t *fclun)
-{
-       os_lun_t        *lq;
-       uint16_t        tgt;
-       uint16_t        lun;
-
-       tgt = fcport->os_target_id;
-       lun = fclun->lun;
-
-       /* Allocate LUNs */
-       if (lun >= MAX_LUNS) {
-               DEBUG2(printk("scsi(%ld): Unable to bind lun, invalid "
-                   "lun=(%x).\n", ha->host_no, lun));
-               return (NULL);
-       }
-
-       /* Always alloc LUN 0 so kernel will scan past LUN 0. */
-       if (lun != 0 && (EXT_IS_LUN_BIT_SET(&(fcport->lun_mask), lun))) {
-               return (NULL);
-       }
-
-       if ((lq = qla2x00_lun_alloc(ha, tgt, lun)) == NULL) {
-               qla_printk(KERN_WARNING, ha,
-                   "Unable to bind fclun, loop_id=%x lun=%x\n",
-                   fcport->loop_id, lun);
-               return (NULL);
-       }
-
-       lq->fclun = fclun;
-
-       return (lq);
-}
-
-/*
- * qla2x00_tgt_alloc
- *     Allocate and pre-initialize target queue.
- *
- * Input:
- *     ha = adapter block pointer.
- *     t = SCSI target number.
- *
- * Returns:
- *     NULL = failure
- *
- * Context:
- *     Kernel context.
- */
-static os_tgt_t *
-qla2x00_tgt_alloc(scsi_qla_host_t *ha, uint16_t tgt) 
-{
-       os_tgt_t        *tq;
-
-       /*
-        * If SCSI addressing OK, allocate TGT queue and lock.
-        */
-       if (tgt >= MAX_TARGETS) {
-               DEBUG2(printk("scsi(%ld): Unable to allocate target, invalid "
-                   "target number %d.\n", ha->host_no, tgt));
-               return (NULL);
-       }
-
-       tq = TGT_Q(ha, tgt);
-       if (tq == NULL) {
-               tq = kmalloc(sizeof(os_tgt_t), GFP_ATOMIC);
-               if (tq != NULL) {
-                       DEBUG2(printk("scsi(%ld): Alloc Target %d @ %p\n",
-                           ha->host_no, tgt, tq));
-
-                       memset(tq, 0, sizeof(os_tgt_t));
-                       tq->ha = ha;
-
-                       TGT_Q(ha, tgt) = tq;
-               }
-       }
-       if (tq != NULL) {
-               tq->port_down_retry_count = ha->port_down_retry_count;
-       } else {
-               qla_printk(KERN_WARNING, ha,
-                   "Unable to allocate target.\n");
-               ha->mem_err++;
-       }
-
-       return (tq);
-}
-
-/*
- * qla2x00_tgt_free
- *     Frees target and LUN queues.
- *
- * Input:
- *     ha = adapter block pointer.
- *     t = SCSI target number.
- *
- * Context:
- *     Kernel context.
- */
-void
-qla2x00_tgt_free(scsi_qla_host_t *ha, uint16_t tgt) 
-{
-       os_tgt_t        *tq;
-       uint16_t        lun;
-
-       /*
-        * If SCSI addressing OK, allocate TGT queue and lock.
-        */
-       if (tgt >= MAX_TARGETS) {
-               DEBUG2(printk("scsi(%ld): Unable to de-allocate target, "
-                   "invalid target number %d.\n", ha->host_no, tgt));
-
-               return;
-       }
-
-       tq = TGT_Q(ha, tgt);
-       if (tq != NULL) {
-               TGT_Q(ha, tgt) = NULL;
-
-               /* Free LUN structures. */
-               for (lun = 0; lun < MAX_LUNS; lun++)
-                       qla2x00_lun_free(ha, tgt, lun);
-
-               kfree(tq);
-       }
-
-       return;
-}
-
-/*
- * qla2x00_lun_alloc
- *     Allocate and initialize LUN queue.
- *
- * Input:
- *     ha = adapter block pointer.
- *     t = SCSI target number.
- *     l = LUN number.
- *
- * Returns:
- *     NULL = failure
- *
- * Context:
- *     Kernel context.
- */
-static os_lun_t *
-qla2x00_lun_alloc(scsi_qla_host_t *ha, uint16_t tgt, uint16_t lun) 
-{
-       os_lun_t        *lq;
-
-       /*
-        * If SCSI addressing OK, allocate LUN queue.
-        */
-       if (tgt >= MAX_TARGETS || lun >= MAX_LUNS || TGT_Q(ha, tgt) == NULL) {
-               DEBUG2(printk("scsi(%ld): Unable to allocate lun, invalid "
-                   "parameter.\n", ha->host_no));
-
-               return (NULL);
-       }
-
-       lq = LUN_Q(ha, tgt, lun);
-       if (lq == NULL) {
-               lq = kmalloc(sizeof(os_lun_t), GFP_ATOMIC);
-               if (lq != NULL) {
-                       DEBUG2(printk("scsi(%ld): Alloc Lun %d @ tgt %d.\n",
-                           ha->host_no, lun, tgt));
-
-                       memset(lq, 0, sizeof(os_lun_t));
-                       LUN_Q(ha, tgt, lun) = lq;
-
-                       /*
-                        * The following lun queue initialization code
-                        * must be duplicated in alloc_ioctl_mem function
-                        * for ioctl_lq.
-                        */
-                       lq->q_state = LUN_STATE_READY;
-                       spin_lock_init(&lq->q_lock);
-               }
-       }
-
-       if (lq == NULL) {
-               qla_printk(KERN_WARNING, ha, "Unable to allocate lun.\n");
-       }
-
-       return (lq);
-}
-
-/*
- * qla2x00_lun_free
- *     Frees LUN queue.
- *
- * Input:
- *     ha = adapter block pointer.
- *     t = SCSI target number.
- *
- * Context:
- *     Kernel context.
- */
-static void
-qla2x00_lun_free(scsi_qla_host_t *ha, uint16_t tgt, uint16_t lun) 
-{
-       os_lun_t        *lq;
-
-       /*
-        * If SCSI addressing OK, allocate TGT queue and lock.
-        */
-       if (tgt >= MAX_TARGETS || lun >= MAX_LUNS) {
-               DEBUG2(printk("scsi(%ld): Unable to deallocate lun, invalid "
-                   "parameter.\n", ha->host_no));
-
-               return;
-       }
-
-       if (TGT_Q(ha, tgt) != NULL && (lq = LUN_Q(ha, tgt, lun)) != NULL) {
-               LUN_Q(ha, tgt, lun) = NULL;
-               kfree(lq);
-       }
-
-       return;
 }
 
 /*
@@ -3697,26 +2812,10 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
                                ha->outstanding_cmds[cnt] = NULL;
                                if (ha->actthreads)
                                        ha->actthreads--;
-                               sp->lun_queue->out_cnt--;
-
-                               /*
-                                * Set the cmd host_byte status depending on
-                                * whether the scsi_error_handler is
-                                * active or not.
-                                */
-                               if (sp->flags & SRB_TAPE) {
-                                       sp->cmd->result = DID_NO_CONNECT << 16;
-                               } else {
-                                       if (ha->host->eh_active != EH_ACTIVE)
-                                               sp->cmd->result =
-                                                   DID_BUS_BUSY << 16;
-                                       else
-                                               sp->cmd->result =
-                                                   DID_RESET << 16;
-                               }
                                sp->flags = 0;
+                               sp->cmd->result = DID_RESET << 16;
                                sp->cmd->host_scribble = (unsigned char *)NULL;
-                               add_to_done_queue(ha, sp);
+                               qla2x00_sp_compl(ha, sp);
                        }
                }
                spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3739,11 +2838,6 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
                        /* Enable ISP interrupts. */
                        qla2x00_enable_intrs(ha);
 
-                       /* v2.19.5b6 Return all commands */
-                       qla2x00_abort_queues(ha, 1);
-
-                       /* Restart queues that may have been stopped. */
-                       qla2x00_restart_queues(ha, 1);
                        ha->isp_abort_cnt = 0; 
                        clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags);
                } else {        /* failed the ISP abort */
@@ -3758,7 +2852,6 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
                                         * completely.
                                         */
                                        qla2x00_reset_adapter(ha);
-                                       qla2x00_abort_queues(ha, 0);
                                        ha->flags.online = 0;
                                        clear_bit(ISP_ABORT_RETRY,
                                            &ha->dpc_flags);
index 07c11330f9a3e1ca94c45ab8ad44339d066df477..6a05d1b8d48a13e8b2afd35bdd17359a714bbbc0 100644 (file)
@@ -187,23 +187,6 @@ qla2x00_is_wwn_zero(uint8_t *wwn)
                return (0);
 }
 
-static __inline__ uint8_t
-qla2x00_suspend_lun(scsi_qla_host_t *, os_lun_t *, int, int);
-static __inline__ uint8_t
-qla2x00_delay_lun(scsi_qla_host_t *, os_lun_t *, int);
-
-static __inline__ uint8_t
-qla2x00_suspend_lun(scsi_qla_host_t *ha, os_lun_t *lq, int time, int count)
-{
-       return (__qla2x00_suspend_lun(ha, lq, time, count, 0));
-}
-
-static __inline__ uint8_t
-qla2x00_delay_lun(scsi_qla_host_t *ha, os_lun_t *lq, int time)
-{
-       return (__qla2x00_suspend_lun(ha, lq, time, 1, 1));
-}
-
 static __inline__ void qla2x00_check_fabric_devices(scsi_qla_host_t *);
 /*
  * This routine will wait for fabric devices for
index ec066074c72297baa28e1c81804269a09149e06c..af964bb3d87052c3f61807344f79135339471b9d 100644 (file)
@@ -216,18 +216,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
                        cur_seg++;
                }
        } else {
-               dma_addr_t      req_dma;
-               struct page     *page;
-               unsigned long   offset;
-
-               page = virt_to_page(cmd->request_buffer);
-               offset = ((unsigned long)cmd->request_buffer & ~PAGE_MASK);
-               req_dma = pci_map_page(ha->pdev, page, offset,
-                   cmd->request_bufflen, cmd->sc_data_direction);
-
-               sp->dma_handle = req_dma;
-
-               *cur_dsd++ = cpu_to_le32(req_dma);
+               *cur_dsd++ = cpu_to_le32(sp->dma_handle);
                *cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
        }
 }
@@ -299,19 +288,8 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
                        cur_seg++;
                }
        } else {
-               dma_addr_t      req_dma;
-               struct page     *page;
-               unsigned long   offset;
-
-               page = virt_to_page(cmd->request_buffer);
-               offset = ((unsigned long)cmd->request_buffer & ~PAGE_MASK);
-               req_dma = pci_map_page(ha->pdev, page, offset,
-                   cmd->request_bufflen, cmd->sc_data_direction);
-
-               sp->dma_handle = req_dma;
-
-               *cur_dsd++ = cpu_to_le32(LSD(req_dma));
-               *cur_dsd++ = cpu_to_le32(MSD(req_dma));
+               *cur_dsd++ = cpu_to_le32(LSD(sp->dma_handle));
+               *cur_dsd++ = cpu_to_le32(MSD(sp->dma_handle));
                *cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
        }
 }
@@ -328,13 +306,11 @@ qla2x00_start_scsi(srb_t *sp)
        int             ret;
        unsigned long   flags;
        scsi_qla_host_t *ha;
-       fc_lun_t        *fclun;
        struct scsi_cmnd *cmd;
        uint32_t        *clr_ptr;
        uint32_t        index;
        uint32_t        handle;
        cmd_entry_t     *cmd_pkt;
-       uint32_t        timeout;
        struct scatterlist *sg;
        uint16_t        cnt;
        uint16_t        req_cnt;
@@ -344,10 +320,11 @@ qla2x00_start_scsi(srb_t *sp)
 
        /* Setup device pointers. */
        ret = 0;
-       fclun = sp->lun_queue->fclun;
-       ha = fclun->fcport->ha;
+       ha = sp->ha;
        reg = ha->iobase;
        cmd = sp->cmd;
+       /* So we know we haven't pci_map'ed anything yet */
+       tot_dsds = 0;
 
        /* Send marker if required */
        if (ha->marker_needed != 0) {
@@ -372,8 +349,27 @@ qla2x00_start_scsi(srb_t *sp)
        if (index == MAX_OUTSTANDING_COMMANDS)
                goto queuing_error;
 
+       /* Map the sg table so we have an accurate count of sg entries needed */
+       if (cmd->use_sg) {
+               sg = (struct scatterlist *) cmd->request_buffer;
+               tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
+                   cmd->sc_data_direction);
+               if (tot_dsds == 0)
+                       goto queuing_error;
+       } else if (cmd->request_bufflen) {
+               dma_addr_t      req_dma;
+
+               req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
+                   cmd->request_bufflen, cmd->sc_data_direction);
+               if (dma_mapping_error(req_dma))
+                       goto queuing_error;
+
+               sp->dma_handle = req_dma;
+               tot_dsds = 1;
+       }
+
        /* Calculate the number of request entries needed. */
-       req_cnt = (ha->calc_request_entries)(cmd->request->nr_hw_segments);
+       req_cnt = (ha->calc_request_entries)(tot_dsds);
        if (ha->req_q_cnt < (req_cnt + 2)) {
                cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
                if (ha->req_ring_index < cnt)
@@ -385,19 +381,6 @@ qla2x00_start_scsi(srb_t *sp)
        if (ha->req_q_cnt < (req_cnt + 2))
                goto queuing_error;
 
-       /* Finally, we have enough space, now perform mappings. */
-       tot_dsds = 0;
-       if (cmd->use_sg) {
-               sg = (struct scatterlist *) cmd->request_buffer;
-               tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
-                   cmd->sc_data_direction);
-               if (tot_dsds == 0)
-                       goto queuing_error;
-       } else if (cmd->request_bufflen) {
-           tot_dsds++;
-       }
-       req_cnt = (ha->calc_request_entries)(tot_dsds);
-
        /* Build command packet */
        ha->current_outstanding_cmd = handle;
        ha->outstanding_cmds[handle] = sp;
@@ -412,11 +395,9 @@ qla2x00_start_scsi(srb_t *sp)
        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
 
-       /* Set target ID */
-       SET_TARGET_ID(ha, cmd_pkt->target, fclun->fcport->loop_id);
-
-       /* Set LUN number*/
-       cmd_pkt->lun = cpu_to_le16(fclun->lun);
+       /* Set target ID and LUN number*/
+       SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
+       cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
 
        /* Update tagged queuing modifier */
        cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
@@ -433,18 +414,6 @@ qla2x00_start_scsi(srb_t *sp)
                }
        }
 
-       /*
-        * Allocate at least 5 (+ QLA_CMD_TIMER_DELTA) seconds for RISC timeout.
-        */
-       timeout = (uint32_t)(cmd->timeout_per_command / HZ);
-       if (timeout > 65535)
-               cmd_pkt->timeout = __constant_cpu_to_le16(0);
-       else if (timeout > 25)
-               cmd_pkt->timeout = cpu_to_le16((uint16_t)timeout -
-                   (5 + QLA_CMD_TIMER_DELTA));
-       else
-               cmd_pkt->timeout = cpu_to_le16((uint16_t)timeout);
-
        /* Load SCSI command packet. */
        memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
        cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen);
@@ -466,7 +435,6 @@ qla2x00_start_scsi(srb_t *sp)
 
        ha->actthreads++;
        ha->total_ios++;
-       sp->lun_queue->out_cnt++;
        sp->flags |= SRB_DMA_VALID;
        sp->state = SRB_ACTIVE_STATE;
        sp->u_start = jiffies;
@@ -479,6 +447,14 @@ qla2x00_start_scsi(srb_t *sp)
        return (QLA_SUCCESS);
 
 queuing_error:
+       if (cmd->use_sg && tot_dsds) {
+               sg = (struct scatterlist *) cmd->request_buffer;
+               pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
+                   cmd->sc_data_direction);
+       } else if (tot_dsds) {
+               pci_unmap_single(ha->pdev, sp->dma_handle,
+                   cmd->request_bufflen, cmd->sc_data_direction);
+       }
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
        return (QLA_FUNCTION_FAILED);
index 603d4c683c6ce337c175903fcea2c4d479e148d9..6792cfae56e29cb48a28fcffd4f7a98c03b4fc26 100644 (file)
@@ -27,8 +27,6 @@ static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *);
 static void qla2x00_error_entry(scsi_qla_host_t *, sts_entry_t *);
 static void qla2x00_ms_entry(scsi_qla_host_t *, ms_iocb_entry_t *);
 
-static int qla2x00_check_sense(struct scsi_cmnd *cp, os_lun_t *);
-
 /**
  * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
  * @irq:
@@ -93,7 +91,6 @@ qla2100_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
        }
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
-       qla2x00_next(ha);
        ha->last_irq_cpu = _smp_processor_id();
        ha->total_isr_cnt++;
 
@@ -107,9 +104,6 @@ qla2100_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
                spin_unlock_irqrestore(&ha->mbx_reg_lock, flags);
        }
 
-       if (!list_empty(&ha->done_queue))
-               qla2x00_done(ha);
-
        return (IRQ_HANDLED);
 }
 
@@ -206,7 +200,6 @@ qla2300_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
        }
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
-       qla2x00_next(ha);
        ha->last_irq_cpu = _smp_processor_id();
        ha->total_isr_cnt++;
 
@@ -220,9 +213,6 @@ qla2300_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
                spin_unlock_irqrestore(&ha->mbx_reg_lock, flags);
        }
 
-       if (!list_empty(&ha->done_queue))
-               qla2x00_done(ha);
-
        return (IRQ_HANDLED);
 }
 
@@ -707,14 +697,13 @@ qla2x00_process_completed_request(struct scsi_qla_host *ha, uint32_t index)
 
                if (ha->actthreads)
                        ha->actthreads--;
-               sp->lun_queue->out_cnt--;
                CMD_COMPL_STATUS(sp->cmd) = 0L;
                CMD_SCSI_STATUS(sp->cmd) = 0L;
 
                /* Save ISP completion status */
                sp->cmd->result = DID_OK << 16;
                sp->fo_retry_cnt = 0;
-               add_to_done_queue(ha, sp);
+               qla2x00_sp_compl(ha, sp);
        } else {
                DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n",
                    ha->host_no));
@@ -828,11 +817,8 @@ qla2x00_process_response_queue(struct scsi_qla_host *ha)
 static void
 qla2x00_status_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
 {
-       int             ret;
        unsigned        b, t, l;
        srb_t           *sp;
-       os_lun_t        *lq;
-       os_tgt_t        *tq;
        fc_port_t       *fcport;
        struct scsi_cmnd *cp;
        uint16_t        comp_status;
@@ -882,21 +868,6 @@ qla2x00_status_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
        if (ha->actthreads)
                ha->actthreads--;
 
-       if (sp->lun_queue == NULL) {
-               DEBUG2(printk("scsi(%ld): Status Entry invalid lun pointer.\n",
-                   ha->host_no));
-               qla_printk(KERN_WARNING, ha,
-                   "Status Entry invalid lun pointer.\n");
-
-               set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
-               if (ha->dpc_wait && !ha->dpc_active) 
-                       up(ha->dpc_wait);
-
-               return;
-       }
-
-       sp->lun_queue->out_cnt--;
-
        comp_status = le16_to_cpu(pkt->comp_status);
        /* Mask of reserved bits 12-15, before we examine the scsi status */
        scsi_status = le16_to_cpu(pkt->scsi_status) & SS_MASK;
@@ -911,26 +882,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
        t = cp->device->id;
        l = cp->device->lun,
 
-       tq = sp->tgt_queue;
-       lq = sp->lun_queue;
-
-       /*
-        * If loop is in transient state Report DID_BUS_BUSY
-        */
-       if ((comp_status != CS_COMPLETE || scsi_status != 0)) {
-               if (!(sp->flags & (SRB_IOCTL | SRB_TAPE)) &&
-                   (atomic_read(&ha->loop_down_timer) ||
-                       atomic_read(&ha->loop_state) != LOOP_READY)) {
-
-                       DEBUG2(printk("scsi(%ld:%d:%d:%d): Loop Not Ready - "
-                           "pid=%lx.\n",
-                           ha->host_no, b, t, l, cp->serial_number));
-
-                       qla2x00_extend_timeout(cp, EXTEND_CMD_TIMEOUT);
-                       add_to_retry_queue(ha, sp);
-                       return;
-               }
-       }
+       fcport = sp->fcport;
 
        /* Check for any FCP transport errors. */
        if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
@@ -945,7 +897,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
                            pkt->rsp_info[6], pkt->rsp_info[7]));
 
                        cp->result = DID_BUS_BUSY << 16;
-                       add_to_done_queue(ha, sp);
+                       qla2x00_sp_compl(ha, sp);
                        return;
                }
        }
@@ -964,11 +916,6 @@ qla2x00_status_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
                        cp->resid = resid;
                        CMD_RESID_LEN(cp) = resid;
                }
-               if (lscsi_status == SS_BUSY_CONDITION) {
-                       cp->result = DID_BUS_BUSY << 16 | lscsi_status;
-                       break;
-               }
-
                cp->result = DID_OK << 16 | lscsi_status;
 
                if (lscsi_status != SS_CHECK_CONDITION)
@@ -1002,14 +949,6 @@ qla2x00_status_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
                if (sp->request_sense_length != 0)
                        ha->status_srb = sp;
 
-               if (!(sp->flags & (SRB_IOCTL | SRB_TAPE)) &&
-                   qla2x00_check_sense(cp, lq) == QLA_SUCCESS) {
-                       /* Throw away status_cont if any */
-                       ha->status_srb = NULL;
-                       add_to_scsi_retry_queue(ha, sp);
-                       return;
-               }
-
                DEBUG5(printk("%s(): Check condition Sense data, "
                    "scsi(%ld:%d:%d:%d) cmd=%p pid=%ld\n",
                    __func__, ha->host_no, b, t, l, cp,
@@ -1035,12 +974,6 @@ qla2x00_status_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
                 * Status.
                 */
                if (lscsi_status != 0) {
-                       if (lscsi_status == SS_BUSY_CONDITION) {
-                               cp->result = DID_BUS_BUSY << 16 |
-                                   lscsi_status;
-                               break;
-                       }
-
                        cp->result = DID_OK << 16 | lscsi_status;
 
                        if (lscsi_status != SS_CHECK_CONDITION)
@@ -1072,12 +1005,6 @@ qla2x00_status_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
                        if (sp->request_sense_length != 0)
                                ha->status_srb = sp;
 
-                       if (!(sp->flags & (SRB_IOCTL | SRB_TAPE)) &&
-                           (qla2x00_check_sense(cp, lq) == QLA_SUCCESS)) {
-                               ha->status_srb = NULL;
-                               add_to_scsi_retry_queue(ha, sp);
-                               return;
-                       }
                        DEBUG5(printk("%s(): Check condition Sense data, "
                            "scsi(%ld:%d:%d:%d) cmd=%p pid=%ld\n",
                            __func__, ha->host_no, b, t, l, cp,
@@ -1149,30 +1076,15 @@ qla2x00_status_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
                 * Target with DID_NO_CONNECT ELSE Queue the IOs in the
                 * retry_queue.
                 */
-               fcport = sp->fclun->fcport;
                DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down "
                    "pid=%ld, compl status=0x%x, port state=0x%x\n",
                    ha->host_no, t, l, cp->serial_number, comp_status,
                    atomic_read(&fcport->state)));
 
-               if ((sp->flags & (SRB_IOCTL | SRB_TAPE)) ||
-                   atomic_read(&fcport->state) == FCS_DEVICE_DEAD) {
-                       cp->result = DID_NO_CONNECT << 16;
-                       if (atomic_read(&ha->loop_state) == LOOP_DOWN) 
-                               sp->err_id = SRB_ERR_LOOP;
-                       else
-                               sp->err_id = SRB_ERR_PORT;
-                       add_to_done_queue(ha, sp);
-               } else {
-                       qla2x00_extend_timeout(cp, EXTEND_CMD_TIMEOUT);
-                       add_to_retry_queue(ha, sp);
-               }
-
+               cp->result = DID_BUS_BUSY << 16;
                if (atomic_read(&fcport->state) == FCS_ONLINE) {
                        qla2x00_mark_device_lost(ha, fcport, 1);
                }
-
-               return;
                break;
 
        case CS_RESET:
@@ -1180,13 +1092,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
                    "scsi(%ld): RESET status detected 0x%x-0x%x.\n",
                    ha->host_no, comp_status, scsi_status));
 
-               if (sp->flags & (SRB_IOCTL | SRB_TAPE)) {
-                       cp->result = DID_RESET << 16;
-               } else {
-                       qla2x00_extend_timeout(cp, EXTEND_CMD_TIMEOUT);
-                       add_to_retry_queue(ha, sp);
-                       return;
-               }
+               cp->result = DID_RESET << 16;
                break;
 
        case CS_ABORTED:
@@ -1210,8 +1116,6 @@ qla2x00_status_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
 
                cp->result = DID_BUS_BUSY << 16;
 
-               fcport = lq->fclun->fcport;
-
                /* Check to see if logout occurred */
                if ((le16_to_cpu(pkt->status_flags) & SF_LOGOUT_SENT)) {
                        qla2x00_mark_device_lost(ha, fcport, 1);
@@ -1227,16 +1131,6 @@ qla2x00_status_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
 
                cp->result = DID_OK << 16 | lscsi_status; 
 
-               /* TODO: ??? */
-               /* Adjust queue depth */
-               ret = scsi_track_queue_full(cp->device,
-                   sp->lun_queue->out_cnt - 1);
-               if (ret) {
-                       qla_printk(KERN_INFO, ha,
-                           "scsi(%ld:%d:%d:%d): Queue depth adjusted to %d.\n",
-                           ha->host_no, cp->device->channel, cp->device->id,
-                           cp->device->lun, ret);
-               }
                break;
 
        default:
@@ -1253,7 +1147,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
 
        /* Place command on done queue. */
        if (ha->status_srb == NULL)
-               add_to_done_queue(ha, sp);
+               qla2x00_sp_compl(ha, sp);
 }
 
 /**
@@ -1298,8 +1192,8 @@ qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt)
 
                /* Place command on done queue. */
                if (sp->request_sense_length == 0) {
-                       add_to_done_queue(ha, sp);
                        ha->status_srb = NULL;
+                       qla2x00_sp_compl(ha, sp);
                }
        }
 }
@@ -1341,8 +1235,6 @@ qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
                ha->outstanding_cmds[pkt->handle] = NULL;
                if (ha->actthreads)
                        ha->actthreads--;
-               sp->lun_queue->out_cnt--;
-
                /* Bad payload or header */
                if (pkt->entry_status &
                    (RF_INV_E_ORDER | RF_INV_E_COUNT |
@@ -1353,8 +1245,7 @@ qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
                } else {
                        sp->cmd->result = DID_ERROR << 16;
                }
-               /* Place command on done queue. */
-               add_to_done_queue(ha, sp);
+               qla2x00_sp_compl(ha, sp);
 
        } else if (pkt->entry_type == COMMAND_A64_TYPE ||
            pkt->entry_type == COMMAND_TYPE) {
@@ -1403,62 +1294,5 @@ qla2x00_ms_entry(scsi_qla_host_t *ha, ms_iocb_entry_t *pkt)
        /* Free outstanding command slot. */
        ha->outstanding_cmds[pkt->handle1] = NULL;
 
-       add_to_done_queue(ha, sp);
-}
-
-/**
- * qla2x00_check_sense() - Perform any sense data interrogation.
- * @cp: SCSI Command
- * @lq: Lun queue
- *
- * Returns QLA_SUCCESS if the lun queue is suspended, else
- * QLA_FUNCTION_FAILED  (lun queue not suspended).
- */
-static int 
-qla2x00_check_sense(struct scsi_cmnd *cp, os_lun_t *lq)
-{
-       scsi_qla_host_t *ha;
-       srb_t           *sp;
-       fc_port_t       *fcport;
-
-       ha = (scsi_qla_host_t *) cp->device->host->hostdata;
-       if ((cp->sense_buffer[0] & 0x70) != 0x70) {
-               return (QLA_FUNCTION_FAILED);
-       }
-
-       sp = (srb_t * )CMD_SP(cp);
-       sp->flags |= SRB_GOT_SENSE;
-
-       switch (cp->sense_buffer[2] & 0xf) {
-       case RECOVERED_ERROR:
-               cp->result = DID_OK << 16;
-               cp->sense_buffer[0] = 0;
-               break;
-
-       case NOT_READY:
-               fcport = lq->fclun->fcport;
-
-               /*
-                * Suspend the lun only for hard disk device type.
-                */
-               if ((fcport->flags & FCF_TAPE_PRESENT) == 0 &&
-                   lq->q_state != LUN_STATE_TIMEOUT) {
-                       /*
-                        * If target is in process of being ready then suspend
-                        * lun for 6 secs and retry all the commands.
-                        */
-                       if (cp->sense_buffer[12] == 0x4 &&
-                           cp->sense_buffer[13] == 0x1) {
-
-                               /* Suspend the lun for 6 secs */
-                               qla2x00_suspend_lun(ha, lq, 6,
-                                   ql2xsuspendcount);
-
-                               return (QLA_SUCCESS);
-                       }
-               }
-               break;
-       }
-
-       return (QLA_FUNCTION_FAILED);
+       qla2x00_sp_compl(ha, sp);
 }
diff --git a/drivers/scsi/qla2xxx/qla_listops.h b/drivers/scsi/qla2xxx/qla_listops.h
deleted file mode 100644 (file)
index 5da034f..0000000
+++ /dev/null
@@ -1,351 +0,0 @@
-/******************************************************************************
- *                  QLOGIC LINUX SOFTWARE
- *
- * QLogic ISP2x00 device driver for Linux 2.6.x
- * Copyright (C) 2003-2004 QLogic Corporation
- * (www.qlogic.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- ******************************************************************************/
-
-/* Management functions for various lists */
-
-/* __add_to_done_queue()
- * 
- * Place SRB command on done queue.
- *
- * Input:
- *      ha           = host pointer
- *      sp           = srb pointer.
- * Locking:
- *     this function assumes the ha->list_lock is already taken
- */
-static inline void 
-__add_to_done_queue(struct scsi_qla_host * ha, srb_t * sp)
-{
-       /*
-        if (sp->state != SRB_NO_QUEUE_STATE && 
-               sp->state != SRB_ACTIVE_STATE)
-               BUG();
-       */
-
-        /* Place block on done queue */
-        sp->cmd->host_scribble = (unsigned char *) NULL;
-        sp->state = SRB_DONE_STATE;
-        list_add_tail(&sp->list,&ha->done_queue);
-        ha->done_q_cnt++;
-       sp->ha = ha;
-}
-
-static inline void 
-__add_to_retry_queue(struct scsi_qla_host * ha, srb_t * sp)
-{
-       /*
-        if( sp->state != SRB_NO_QUEUE_STATE && 
-               sp->state != SRB_ACTIVE_STATE)
-               BUG();
-       */
-
-        /* Place block on retry queue */
-        list_add_tail(&sp->list,&ha->retry_queue);
-        ha->retry_q_cnt++;
-        sp->flags |= SRB_WATCHDOG;
-        sp->state = SRB_RETRY_STATE;
-       sp->ha = ha;
-}
-
-static inline void 
-__add_to_scsi_retry_queue(struct scsi_qla_host * ha, srb_t * sp)
-{
-       /*
-        if( sp->state != SRB_NO_QUEUE_STATE && 
-               sp->state != SRB_ACTIVE_STATE)
-               BUG();
-       */
-
-        /* Place block on retry queue */
-        list_add_tail(&sp->list,&ha->scsi_retry_queue);
-        ha->scsi_retry_q_cnt++;
-        sp->state = SRB_SCSI_RETRY_STATE;
-       sp->ha = ha;
-}
-
-static inline void 
-add_to_done_queue(struct scsi_qla_host * ha, srb_t * sp)
-{
-        unsigned long flags;
-
-        spin_lock_irqsave(&ha->list_lock, flags);
-        __add_to_done_queue(ha,sp);
-        spin_unlock_irqrestore(&ha->list_lock, flags);
-}
-
-static inline void 
-add_to_free_queue(struct scsi_qla_host * ha, srb_t * sp)
-{
-       mempool_free(sp, ha->srb_mempool);
-}
-
-static inline void 
-add_to_retry_queue(struct scsi_qla_host * ha, srb_t * sp)
-{
-        unsigned long flags;
-
-        spin_lock_irqsave(&ha->list_lock, flags);
-        __add_to_retry_queue(ha,sp);
-        spin_unlock_irqrestore(&ha->list_lock, flags);
-}
-
-static inline void 
-add_to_scsi_retry_queue(struct scsi_qla_host * ha, srb_t * sp)
-{
-        unsigned long flags;
-
-        spin_lock_irqsave(&ha->list_lock, flags);
-        __add_to_scsi_retry_queue(ha,sp);
-        spin_unlock_irqrestore(&ha->list_lock, flags);
-}
-
-/*
- * __del_from_retry_queue
- *      Function used to remove a command block from the
- *      watchdog timer queue.
- *
- *      Note: Must insure that command is on watchdog
- *            list before calling del_from_retry_queue
- *            if (sp->flags & SRB_WATCHDOG)
- *
- * Input: 
- *      ha = adapter block pointer.
- *      sp = srb pointer.
- * Locking:
- *     this function assumes the list_lock is already taken
- */
-static inline void 
-__del_from_retry_queue(struct scsi_qla_host * ha, srb_t * sp)
-{
-        list_del_init(&sp->list);
-
-        sp->flags &= ~(SRB_WATCHDOG | SRB_BUSY);
-        sp->state = SRB_NO_QUEUE_STATE;
-        ha->retry_q_cnt--;
-}
-
-/*
- * __del_from_scsi_retry_queue
- *      Function used to remove a command block from the
- *      scsi retry queue.
- *
- * Input: 
- *      ha = adapter block pointer.
- *      sp = srb pointer.
- * Locking:
- *     this function assumes the list_lock is already taken
- */
-static inline void 
-__del_from_scsi_retry_queue(struct scsi_qla_host * ha, srb_t * sp)
-{
-        list_del_init(&sp->list);
-
-        ha->scsi_retry_q_cnt--;
-        sp->state = SRB_NO_QUEUE_STATE;
-}
-
-/*
- * del_from_retry_queue
- *      Function used to remove a command block from the
- *      watchdog timer queue.
- *
- *      Note: Must insure that command is on watchdog
- *            list before calling del_from_retry_queue
- *            if (sp->flags & SRB_WATCHDOG)
- *
- * Input: 
- *      ha = adapter block pointer.
- *      sp = srb pointer.
- * Locking:
- *     this function takes and releases the list_lock
- */
-static inline void 
-del_from_retry_queue(struct scsi_qla_host * ha, srb_t * sp)
-{
-        unsigned long flags;
-
-        /*     if (unlikely(!(sp->flags & SRB_WATCHDOG)))
-                       BUG();*/
-        spin_lock_irqsave(&ha->list_lock, flags);
-
-        /*     if (unlikely(list_empty(&ha->retry_queue)))
-                       BUG();*/
-
-        __del_from_retry_queue(ha,sp);
-
-        spin_unlock_irqrestore(&ha->list_lock, flags);
-}
-/*
- * del_from_scsi_retry_queue
- *      Function used to remove a command block from the
- *      scsi retry queue.
- *
- * Input: 
- *      ha = adapter block pointer.
- *      sp = srb pointer.
- * Locking:
- *     this function takes and releases the list_lock
- */
-static inline void 
-del_from_scsi_retry_queue(struct scsi_qla_host * ha, srb_t * sp)
-{
-        unsigned long flags;
-
-        spin_lock_irqsave(&ha->list_lock, flags);
-
-        /*     if (unlikely(list_empty(&ha->scsi_retry_queue)))
-                       BUG();*/
-
-        __del_from_scsi_retry_queue(ha,sp);
-
-        spin_unlock_irqrestore(&ha->list_lock, flags);
-}
-
-/*
- * __add_to_pending_queue
- *      Add the standard SCB job to the bottom of standard SCB commands.
- *
- * Input:
- * COMPLETE!!!
- *      q  = SCSI LU pointer.
- *      sp = srb pointer.
- *      SCSI_LU_Q lock must be already obtained.
- */
-static inline int 
-__add_to_pending_queue(struct scsi_qla_host *ha, srb_t * sp)
-{
-       int     empty;
-       /*
-        if( sp->state != SRB_NO_QUEUE_STATE &&
-               sp->state != SRB_FREE_STATE &&
-               sp->state != SRB_ACTIVE_STATE)
-               BUG();
-       */
-
-       empty = list_empty(&ha->pending_queue);
-       list_add_tail(&sp->list, &ha->pending_queue);
-       ha->qthreads++;
-       sp->state = SRB_PENDING_STATE;
-
-       return (empty);
-}
-
-static inline void 
-__add_to_pending_queue_head(struct scsi_qla_host *ha, srb_t * sp)
-{
-       /*
-        if( sp->state != SRB_NO_QUEUE_STATE && 
-               sp->state != SRB_FREE_STATE &&
-               sp->state != SRB_ACTIVE_STATE)
-               BUG();
-       */
-
-       list_add(&sp->list, &ha->pending_queue);
-       ha->qthreads++;
-       sp->state = SRB_PENDING_STATE;
-}
-
-static inline int
-add_to_pending_queue(struct scsi_qla_host *ha, srb_t *sp)
-{
-       int     empty;
-       unsigned long flags;
-
-       spin_lock_irqsave(&ha->list_lock, flags);
-       empty = __add_to_pending_queue(ha, sp);
-       spin_unlock_irqrestore(&ha->list_lock, flags);
-
-       return (empty);
-}
-static inline void
-add_to_pending_queue_head(struct scsi_qla_host *ha, srb_t *sp)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&ha->list_lock, flags);
-       __add_to_pending_queue_head(ha, sp);
-       spin_unlock_irqrestore(&ha->list_lock, flags);
-}
-
-static inline void
-__del_from_pending_queue(struct scsi_qla_host *ha, srb_t *sp)
-{
-       list_del_init(&sp->list);
-       ha->qthreads--;
-       sp->state = SRB_NO_QUEUE_STATE;
-}
-
-/*
- * Failover Stuff.
- */
-static inline void
-__add_to_failover_queue(struct scsi_qla_host * ha, srb_t * sp)
-{
-       /*
-        if( sp->state != SRB_NO_QUEUE_STATE && 
-               sp->state != SRB_ACTIVE_STATE)
-               BUG();
-       */
-
-        list_add_tail(&sp->list,&ha->failover_queue);
-        ha->failover_cnt++;
-        sp->state = SRB_FAILOVER_STATE;
-       sp->ha = ha;
-}
-
-static inline void add_to_failover_queue(struct scsi_qla_host * ha, srb_t * sp)
-{
-        unsigned long flags;
-
-        spin_lock_irqsave(&ha->list_lock, flags);
-
-        __add_to_failover_queue(ha,sp);
-
-        spin_unlock_irqrestore(&ha->list_lock, flags);
-}
-static inline void __del_from_failover_queue(struct scsi_qla_host * ha, srb_t *
-                sp)
-{
-        ha->failover_cnt--;
-        list_del_init(&sp->list);
-        sp->state = SRB_NO_QUEUE_STATE;
-}
-
-static inline void del_from_failover_queue(struct scsi_qla_host * ha, srb_t * sp)
-{
-        unsigned long flags;
-
-        spin_lock_irqsave(&ha->list_lock, flags);
-
-        __del_from_failover_queue(ha,sp);
-
-        spin_unlock_irqrestore(&ha->list_lock, flags);
-}
-
-static inline void 
-del_from_pending_queue(struct scsi_qla_host * ha, srb_t * sp)
-{
-        unsigned long flags;
-
-        spin_lock_irqsave(&ha->list_lock, flags);
-
-        __del_from_pending_queue(ha,sp);
-
-        spin_unlock_irqrestore(&ha->list_lock, flags);
-}
index c04fbcd75235a9e8817a03cfe0399901e087473b..15f6acaca30521908b59eb24291c9f54c397d79e 100644 (file)
@@ -858,8 +858,7 @@ qla2x00_abort_command(scsi_qla_host_t *ha, srb_t *sp)
 
        DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", ha->host_no);)
 
-       fcport = sp->fclun->fcport;
-
+       fcport = sp->fcport;
        if (atomic_read(&ha->loop_state) == LOOP_DOWN ||
            atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
                return 1;
@@ -884,7 +883,7 @@ qla2x00_abort_command(scsi_qla_host_t *ha, srb_t *sp)
                mcp->mb[1] = fcport->loop_id << 8;
        mcp->mb[2] = (uint16_t)handle;
        mcp->mb[3] = (uint16_t)(handle >> 16);
-       mcp->mb[6] = (uint16_t)sp->fclun->lun;
+       mcp->mb[6] = (uint16_t)sp->cmd->device->lun;
        mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
        mcp->in_mb = MBX_0;
        mcp->tov = 30;
@@ -980,30 +979,22 @@ qla2x00_abort_target(fc_port_t *fcport)
  *     Kernel context.
  */
 int
-qla2x00_target_reset(scsi_qla_host_t *ha, uint16_t b, uint16_t t)
+qla2x00_target_reset(scsi_qla_host_t *ha, struct fc_port *fcport)
 {
        int rval;
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
-       os_tgt_t *tgt;
 
        DEBUG11(printk("qla2x00_target_reset(%ld): entered.\n", ha->host_no);)
 
-       tgt = TGT_Q(ha, t);
-       if (tgt->fcport == NULL) {
-               /* no target to abort */
-               return 0;
-       }
-       if (atomic_read(&tgt->fcport->state) != FCS_ONLINE) {
-               /* target not online */
+       if (atomic_read(&fcport->state) != FCS_ONLINE)
                return 0;
-       }
 
        mcp->mb[0] = MBC_TARGET_RESET;
        if (HAS_EXTENDED_IDS(ha))
-               mcp->mb[1] = tgt->fcport->loop_id;
+               mcp->mb[1] = fcport->loop_id;
        else
-               mcp->mb[1] = tgt->fcport->loop_id << 8;
+               mcp->mb[1] = fcport->loop_id << 8;
        mcp->mb[2] = ha->loop_reset_delay;
        mcp->out_mb = MBX_2|MBX_1|MBX_0;
        mcp->in_mb = MBX_0;
index b5863d8769e032d203d10cdafaa897f7d1f3b4fa..84db911318c61f095d32f85994521224784c32fa 100644 (file)
@@ -63,7 +63,7 @@ module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR);
 MODULE_PARM_DESC(ql2xlogintimeout,
                "Login timeout value in seconds.");
 
-int qlport_down_retry;
+int qlport_down_retry = 30;
 module_param(qlport_down_retry, int, S_IRUGO|S_IRUSR);
 MODULE_PARM_DESC(qlport_down_retry,
                "Maximum number of command retries to a port that returns"
@@ -75,11 +75,6 @@ MODULE_PARM_DESC(ql2xretrycount,
                "Maximum number of mid-layer retries allowed for a command.  "
                "Default value is 20, ");
 
-int displayConfig;
-module_param(displayConfig, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(displayConfig,
-               "If 1 then display the configuration used in /etc/modprobe.conf.");
-
 int ql2xplogiabsentdevice;
 module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(ql2xplogiabsentdevice,
@@ -119,30 +114,11 @@ MODULE_PARM_DESC(ql2xsuspendcount,
                "target returns a <NOT READY> status.  Default is 10 "
                "iterations.");
 
-int ql2xdoinitscan = 1;
-module_param(ql2xdoinitscan, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(ql2xdoinitscan,
-               "Signal mid-layer to perform scan after driver load: 0 -- no "
-               "signal sent to mid-layer.");
-
 int ql2xloginretrycount = 0;
 module_param(ql2xloginretrycount, int, S_IRUGO|S_IRUSR);
 MODULE_PARM_DESC(ql2xloginretrycount,
                "Specify an alternate value for the NVRAM login retry count.");
 
-/*
- * Proc structures and functions
- */
-struct info_str {
-       char    *buffer;
-       int     length;
-       off_t   offset;
-       int     pos;
-};
-
-static void copy_mem_info(struct info_str *, char *, int);
-static int copy_info(struct info_str *, char *, ...);
-
 static void qla2x00_free_device(scsi_qla_host_t *);
 
 static void qla2x00_config_dma_addressing(scsi_qla_host_t *ha);
@@ -151,6 +127,8 @@ static void qla2x00_config_dma_addressing(scsi_qla_host_t *ha);
  * SCSI host template entry points 
  */
 static int qla2xxx_slave_configure(struct scsi_device * device);
+static int qla2xxx_slave_alloc(struct scsi_device *);
+static void qla2xxx_slave_destroy(struct scsi_device *);
 static int qla2x00_queuecommand(struct scsi_cmnd *cmd,
                void (*fn)(struct scsi_cmnd *));
 static int qla2xxx_eh_abort(struct scsi_cmnd *);
@@ -160,14 +138,9 @@ static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
 static int qla2x00_loop_reset(scsi_qla_host_t *ha);
 static int qla2x00_device_reset(scsi_qla_host_t *, fc_port_t *);
 
-static int qla2x00_proc_info(struct Scsi_Host *, char *, char **,
-    off_t, int, int);
-
 static struct scsi_host_template qla2x00_driver_template = {
        .module                 = THIS_MODULE,
        .name                   = "qla2xxx",
-       .proc_name              = "qla2xxx",
-       .proc_info              = qla2x00_proc_info,
        .queuecommand           = qla2x00_queuecommand,
 
        .eh_abort_handler       = qla2xxx_eh_abort,
@@ -177,6 +150,8 @@ static struct scsi_host_template qla2x00_driver_template = {
 
        .slave_configure        = qla2xxx_slave_configure,
 
+       .slave_alloc            = qla2xxx_slave_alloc,
+       .slave_destroy          = qla2xxx_slave_destroy,
        .this_id                = -1,
        .cmd_per_lun            = 3,
        .use_clustering         = ENABLE_CLUSTERING,
@@ -191,8 +166,6 @@ static struct scsi_host_template qla2x00_driver_template = {
 
 static struct scsi_transport_template *qla2xxx_transport_template = NULL;
 
-static void qla2x00_display_fc_names(scsi_qla_host_t *);
-
 /* TODO Convert to inlines
  *
  * Timer routines
@@ -230,168 +203,6 @@ qla2x00_stop_timer(scsi_qla_host_t *ha)
        ha->timer_active = 0;
 }
 
-void qla2x00_cmd_timeout(srb_t *);
-
-static __inline__ void qla2x00_callback(scsi_qla_host_t *, struct scsi_cmnd *);
-static __inline__ void sp_put(struct scsi_qla_host * ha, srb_t *sp);
-static __inline__ void sp_get(struct scsi_qla_host * ha, srb_t *sp);
-static __inline__ void
-qla2x00_delete_from_done_queue(scsi_qla_host_t *, srb_t *); 
-
-/*
-* qla2x00_callback
-*      Returns the completed SCSI command to LINUX.
-*
-* Input:
-*      ha -- Host adapter structure
-*      cmd -- SCSI mid-level command structure.
-* Returns:
-*      None
-* Note:From failover point of view we always get the sp
-*      from vis_ha pool in queuecommand.So when we put it 
-*      back to the pool it has to be the vis_ha.        
-*      So rely on struct scsi_cmnd to get the vis_ha and not on sp.                    
-*/
-static inline void
-qla2x00_callback(scsi_qla_host_t *ha, struct scsi_cmnd *cmd)
-{
-       srb_t *sp = (srb_t *) CMD_SP(cmd);
-       scsi_qla_host_t *vis_ha;
-       os_lun_t *lq;
-       int got_sense;
-       unsigned long   cpu_flags = 0;
-
-       cmd->host_scribble = (unsigned char *) NULL;
-       vis_ha = (scsi_qla_host_t *) cmd->device->host->hostdata;
-
-       if (sp == NULL) {
-               qla_printk(KERN_INFO, ha,
-                       "%s(): **** CMD derives a NULL SP\n",
-                       __func__);
-                DEBUG2(BUG();)
-               return;
-       }
-
-       /*
-        * If command status is not DID_BUS_BUSY then go ahead and freed sp.
-        */
-       /*
-        * Cancel command timeout
-        */
-       qla2x00_delete_timer_from_cmd(sp);
-
-       /*
-        * Put SP back in the free queue
-        */
-       sp->cmd   = NULL;
-       CMD_SP(cmd) = NULL;
-       lq = sp->lun_queue;
-       got_sense = (sp->flags & SRB_GOT_SENSE)? 1: 0;
-       add_to_free_queue(vis_ha, sp);
-
-       if (host_byte(cmd->result) == DID_OK) {
-               /* device ok */
-               ha->total_bytes += cmd->bufflen;
-               if (!got_sense) {
-                       /* If lun was suspended then clear retry count */
-                       spin_lock_irqsave(&lq->q_lock, cpu_flags);
-                       if (!test_bit(LUN_EXEC_DELAYED, &lq->q_flag))
-                               lq->q_state = LUN_STATE_READY;
-                       spin_unlock_irqrestore(&lq->q_lock, cpu_flags);
-               }
-       } else if (host_byte(cmd->result) == DID_ERROR) {
-               /* device error */
-               ha->total_dev_errs++;
-       }
-
-       /* Call the mid-level driver interrupt handler */
-       (*(cmd)->scsi_done)(cmd);
-}
-
-/**************************************************************************
-* sp_put
-*
-* Description:
-*   Decrement reference count and call the callback if we're the last
-*   owner of the specified sp. Will get the host_lock before calling
-*   the callback.
-*
-* Input:
-*   ha - pointer to the scsi_qla_host_t where the callback is to occur.
-*   sp - pointer to srb_t structure to use.
-*
-* Returns:
-*
-**************************************************************************/
-static inline void
-sp_put(struct scsi_qla_host * ha, srb_t *sp)
-{
-        if (atomic_read(&sp->ref_count) == 0) {
-               qla_printk(KERN_INFO, ha,
-                       "%s(): **** SP->ref_count not zero\n",
-                       __func__);
-                DEBUG2(BUG();)
-
-                return;
-       }
-
-        if (!atomic_dec_and_test(&sp->ref_count)) {
-                return;
-        }
-
-        qla2x00_callback(ha, sp->cmd);
-}
-
-/**************************************************************************
-* sp_get
-*
-* Description:
-*   Increment reference count of the specified sp.
-*
-* Input:
-*   sp - pointer to srb_t structure to use.
-*
-* Returns:
-*
-**************************************************************************/
-static inline void
-sp_get(struct scsi_qla_host * ha, srb_t *sp)
-{
-        atomic_inc(&sp->ref_count);
-
-        if (atomic_read(&sp->ref_count) > 2) {
-               qla_printk(KERN_INFO, ha,
-                       "%s(): **** SP->ref_count greater than two\n",
-                       __func__);
-                DEBUG2(BUG();)
-
-               return;
-       }
-}
-
-static inline void 
-qla2x00_delete_from_done_queue(scsi_qla_host_t *dest_ha, srb_t *sp) 
-{
-       /* remove command from done list */
-       list_del_init(&sp->list);
-       dest_ha->done_q_cnt--;
-       sp->state = SRB_NO_QUEUE_STATE;
-
-       if (sp->flags & SRB_DMA_VALID) {
-               sp->flags &= ~SRB_DMA_VALID;
-
-               /* Release memory used for this I/O */
-               if (sp->cmd->use_sg) {
-                       pci_unmap_sg(dest_ha->pdev, sp->cmd->request_buffer,
-                           sp->cmd->use_sg, sp->cmd->sc_data_direction);
-               } else if (sp->cmd->request_bufflen) {
-                       pci_unmap_page(dest_ha->pdev, sp->dma_handle,
-                           sp->cmd->request_bufflen,
-                           sp->cmd->sc_data_direction);
-               }
-       }
-}
-
 static int qla2x00_do_dpc(void *data);
 
 static void qla2x00_rst_aen(scsi_qla_host_t *);
@@ -400,186 +211,12 @@ static uint8_t qla2x00_mem_alloc(scsi_qla_host_t *);
 static void qla2x00_mem_free(scsi_qla_host_t *ha);
 static int qla2x00_allocate_sp_pool( scsi_qla_host_t *ha);
 static void qla2x00_free_sp_pool(scsi_qla_host_t *ha);
-static srb_t *qla2x00_get_new_sp(scsi_qla_host_t *ha);
-
-static ssize_t qla2x00_sysfs_read_fw_dump(struct kobject *, char *, loff_t,
-    size_t);
-static ssize_t qla2x00_sysfs_write_fw_dump(struct kobject *, char *, loff_t,
-    size_t);
-static struct bin_attribute sysfs_fw_dump_attr = {
-       .attr = {
-               .name = "fw_dump",
-               .mode = S_IRUSR | S_IWUSR,
-               .owner = THIS_MODULE,
-       },
-       .size = 0,
-       .read = qla2x00_sysfs_read_fw_dump,
-       .write = qla2x00_sysfs_write_fw_dump,
-};
-static ssize_t qla2x00_sysfs_read_nvram(struct kobject *, char *, loff_t,
-    size_t);
-static ssize_t qla2x00_sysfs_write_nvram(struct kobject *, char *, loff_t,
-    size_t);
-static struct bin_attribute sysfs_nvram_attr = {
-       .attr = {
-               .name = "nvram",
-               .mode = S_IRUSR | S_IWUSR,
-               .owner = THIS_MODULE,
-       },
-       .size = sizeof(nvram_t),
-       .read = qla2x00_sysfs_read_nvram,
-       .write = qla2x00_sysfs_write_nvram,
-};
+static srb_t *qla2x00_get_new_sp(scsi_qla_host_t *);
+static void qla2x00_sp_free_dma(scsi_qla_host_t *, srb_t *);
+void qla2x00_sp_compl(scsi_qla_host_t *ha, srb_t *);
 
 /* -------------------------------------------------------------------------- */
 
-
-/* SysFS attributes. */
-static ssize_t qla2x00_sysfs_read_fw_dump(struct kobject *kobj, char *buf,
-    loff_t off, size_t count)
-{
-       struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
-           struct device, kobj)));
-
-       if (ha->fw_dump_reading == 0)
-               return 0;
-       if (off > ha->fw_dump_buffer_len)
-               return 0;
-       if (off + count > ha->fw_dump_buffer_len)
-               count = ha->fw_dump_buffer_len - off;
-
-       memcpy(buf, &ha->fw_dump_buffer[off], count);
-
-       return (count);
-}
-
-static ssize_t qla2x00_sysfs_write_fw_dump(struct kobject *kobj, char *buf,
-    loff_t off, size_t count)
-{
-       struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
-           struct device, kobj)));
-       int reading;
-       uint32_t dump_size;
-
-       if (off != 0)
-               return (0);
-
-       reading = simple_strtol(buf, NULL, 10);
-       switch (reading) {
-       case 0:
-               if (ha->fw_dump_reading == 1) {
-                       qla_printk(KERN_INFO, ha,
-                           "Firmware dump cleared on (%ld).\n",
-                           ha->host_no);
-
-                       vfree(ha->fw_dump_buffer);
-                       free_pages((unsigned long)ha->fw_dump,
-                           ha->fw_dump_order);
-
-                       ha->fw_dump_reading = 0;
-                       ha->fw_dump_buffer = NULL;
-                       ha->fw_dump = NULL;
-               }
-               break;
-       case 1:
-               if (ha->fw_dump != NULL && !ha->fw_dump_reading) {
-                       ha->fw_dump_reading = 1;
-
-                       dump_size = FW_DUMP_SIZE_1M;
-                       if (ha->fw_memory_size < 0x20000) 
-                               dump_size = FW_DUMP_SIZE_128K;
-                       else if (ha->fw_memory_size < 0x80000) 
-                               dump_size = FW_DUMP_SIZE_512K;
-                       ha->fw_dump_buffer = (char *)vmalloc(dump_size);
-                       if (ha->fw_dump_buffer == NULL) {
-                               qla_printk(KERN_WARNING, ha,
-                                   "Unable to allocate memory for firmware "
-                                   "dump buffer (%d).\n", dump_size);
-
-                               ha->fw_dump_reading = 0;
-                               return (count);
-                       }
-                       qla_printk(KERN_INFO, ha,
-                           "Firmware dump ready for read on (%ld).\n",
-                           ha->host_no);
-                       memset(ha->fw_dump_buffer, 0, dump_size);
-                       if (IS_QLA2100(ha) || IS_QLA2200(ha))
-                               qla2100_ascii_fw_dump(ha);
-                       else
-                               qla2300_ascii_fw_dump(ha);
-                       ha->fw_dump_buffer_len = strlen(ha->fw_dump_buffer);
-               }
-               break;
-       }
-       return (count);
-}
-
-static ssize_t qla2x00_sysfs_read_nvram(struct kobject *kobj, char *buf,
-    loff_t off, size_t count)
-{
-       struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
-           struct device, kobj)));
-       uint16_t        *witer;
-       unsigned long   flags;
-       uint16_t        cnt;
-
-       if (!capable(CAP_SYS_ADMIN) || off != 0 || count != sizeof(nvram_t))
-               return 0;
-
-       /* Read NVRAM. */
-       spin_lock_irqsave(&ha->hardware_lock, flags);
-       qla2x00_lock_nvram_access(ha);
-       witer = (uint16_t *)buf;
-       for (cnt = 0; cnt < count / 2; cnt++) {
-               *witer = cpu_to_le16(qla2x00_get_nvram_word(ha,
-                   cnt+ha->nvram_base));
-               witer++;
-       }
-       qla2x00_unlock_nvram_access(ha);
-       spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
-       return (count);
-}
-
-static ssize_t qla2x00_sysfs_write_nvram(struct kobject *kobj, char *buf,
-    loff_t off, size_t count)
-{
-       struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
-           struct device, kobj)));
-       uint8_t         *iter;
-       uint16_t        *witer;
-       unsigned long   flags;
-       uint16_t        cnt;
-       uint8_t         chksum;
-
-       if (!capable(CAP_SYS_ADMIN) || off != 0 || count != sizeof(nvram_t))
-               return 0;
-
-       /* Checksum NVRAM. */
-       iter = (uint8_t *)buf;
-       chksum = 0;
-       for (cnt = 0; cnt < count - 1; cnt++)
-               chksum += *iter++;
-       chksum = ~chksum + 1;
-       *iter = chksum;
-
-       /* Write NVRAM. */
-       spin_lock_irqsave(&ha->hardware_lock, flags);
-       qla2x00_lock_nvram_access(ha);
-       qla2x00_release_nvram_protection(ha);
-       witer = (uint16_t *)buf;
-       for (cnt = 0; cnt < count / 2; cnt++) {
-               qla2x00_write_nvram_word(ha, cnt+ha->nvram_base,
-                   cpu_to_le16(*witer));
-               witer++;
-       }
-       qla2x00_unlock_nvram_access(ha);
-       spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
-       return (count);
-}
-
-/* -------------------------------------------------------------------------- */
 static char *
 qla2x00_get_pci_info_str(struct scsi_qla_host *ha, char *str)
 {
@@ -661,210 +298,76 @@ qla2x00_get_fw_version_str(struct scsi_qla_host *ha, char *str)
 * handling).
 **************************************************************************/
 static int
-qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
+qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
 {
-       fc_port_t       *fcport;
-       os_lun_t        *lq;
-       os_tgt_t        *tq;
-       scsi_qla_host_t *ha, *ha2;
-       srb_t           *sp;
-       struct Scsi_Host *host;
-       unsigned int    b, t, l;
-       unsigned long   handle;
-       int             was_empty;
-
+       scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
+       fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
+       srb_t *sp;
+       int rval;
 
-       host = cmd->device->host;
-       ha = (scsi_qla_host_t *) host->hostdata;
-       was_empty = 1;
+       if (!fcport) {
+               cmd->result = DID_NO_CONNECT << 16;
+               goto qc_fail_command;
+       }
 
-       cmd->scsi_done = fn;
+       if (atomic_read(&fcport->state) != FCS_ONLINE) {
+               if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
+                   atomic_read(&ha->loop_state) == LOOP_DEAD) {
+                       cmd->result = DID_NO_CONNECT << 16;
+                       goto qc_fail_command;
+               }
+               goto qc_host_busy;
+       }
 
        spin_unlock_irq(ha->host->host_lock);
 
-       /*
-        * Allocate a command packet from the "sp" pool.  If we cant get back
-        * one then let scsi layer come back later.
-        */
+       /* Allocate a command packet from the "sp" pool. */
        if ((sp = qla2x00_get_new_sp(ha)) == NULL) {
-               qla_printk(KERN_WARNING, ha,
-                   "Couldn't allocate memory for sp - retried.\n");
-
-               spin_lock_irq(ha->host->host_lock);
-
-               return (1);
+               goto qc_host_busy_lock;
        }
 
+       sp->ha = ha;
+       sp->fcport = fcport;
        sp->cmd = cmd;
-       CMD_SP(cmd) = (void *)sp;
-
        sp->flags = 0;
-       if (CMD_RESID_LEN(cmd) & SRB_IOCTL) {
-               /* Need to set sp->flags */
-               sp->flags |= SRB_IOCTL;
-               CMD_RESID_LEN(cmd) = 0; /* Clear it since no more use. */
-       }
-
-       sp->fo_retry_cnt = 0;
        sp->err_id = 0;
 
-       /* Generate LU queue on bus, target, LUN */
-       b = cmd->device->channel;
-       t = cmd->device->id;
-       l = cmd->device->lun;
-
-       /*
-        * Start Command Timer. Typically it will be 2 seconds less than what
-        * is requested by the Host such that we can return the IO before
-        * aborts are called.
-        */
-       if ((cmd->timeout_per_command / HZ) > QLA_CMD_TIMER_DELTA)
-               qla2x00_add_timer_to_cmd(sp,
-                   (cmd->timeout_per_command / HZ) - QLA_CMD_TIMER_DELTA);
-       else
-               qla2x00_add_timer_to_cmd(sp, cmd->timeout_per_command / HZ);
-
-       if (l >= ha->max_luns) {
-               cmd->result = DID_NO_CONNECT << 16;
-               sp->err_id = SRB_ERR_PORT;
-
-               spin_lock_irq(ha->host->host_lock);
-
-               sp_put(ha, sp);
-
-               return (0);
-       }
-
-       if ((tq = (os_tgt_t *) TGT_Q(ha, t)) != NULL &&
-           (lq = (os_lun_t *) LUN_Q(ha, t, l)) != NULL) {
-               fcport = lq->fclun->fcport;
-               ha2 = fcport->ha;
-       } else {
-               lq = NULL;
-               fcport = NULL;
-               ha2 = ha;
-       }
-
-       /* Set an invalid handle until we issue the command to ISP */
-       /* then we will set the real handle value.                 */
-       handle = INVALID_HANDLE;
-       cmd->host_scribble = (unsigned char *)handle;
-
-       /* Bookkeeping information */
-       sp->r_start = jiffies;          /* Time the request was recieved. */
-       sp->u_start = 0;
-
-       /* Setup device queue pointers. */
-       sp->tgt_queue = tq;
-       sp->lun_queue = lq;
-
-       /*
-        * NOTE : q is NULL
-        *
-        * 1. When device is added from persistent binding but has not been
-        *    discovered yet.The state of loopid == PORT_AVAIL.
-        * 2. When device is never found on the bus.(loopid == UNUSED)
-        *
-        * IF Device Queue is not created, or device is not in a valid state
-        * and link down error reporting is enabled, reject IO.
-        */
-       if (fcport == NULL) {
-               DEBUG3(printk("scsi(%ld:%2d:%2d): port unavailable\n",
-                   ha->host_no,t,l));
-
-               cmd->result = DID_NO_CONNECT << 16;
-               sp->err_id = SRB_ERR_PORT;
-
-               spin_lock_irq(ha->host->host_lock);
+       CMD_SP(cmd) = (void *)sp;
+       cmd->scsi_done = done;
 
-               sp_put(ha, sp);
+       rval = qla2x00_start_scsi(sp);
+       if (rval != QLA_SUCCESS)
+               goto qc_host_busy_free_sp;
 
-               return (0);
-       }
+       /* Manage unprocessed RIO/ZIO commands in response queue. */
+       if (ha->flags.online && ha->flags.process_response_queue &&
+           ha->response_ring_ptr->signature != RESPONSE_PROCESSED) {
+               unsigned long flags;
 
-       /* Only modify the allowed count if the target is a *non* tape device */
-       if ((fcport->flags & FCF_TAPE_PRESENT) == 0) {
-               sp->flags &= ~SRB_TAPE;
-               if (cmd->allowed < ql2xretrycount) {
-                       cmd->allowed = ql2xretrycount;
-               }
-       } else
-               sp->flags |= SRB_TAPE;
-
-       DEBUG5(printk("scsi(%ld:%2d:%2d): (queuecmd) queue sp = %p, "
-           "flags=0x%x fo retry=%d, pid=%ld\n",
-           ha->host_no, t, l, sp, sp->flags, sp->fo_retry_cnt,
-           cmd->serial_number));
-       DEBUG5(qla2x00_print_scsi_cmd(cmd));
-
-       sp->fclun = lq->fclun;
-       sp->ha = ha2;
-
-       if (cmd->sc_data_direction == DMA_BIDIRECTIONAL &&
-           cmd->request_bufflen != 0) {
-
-               DEBUG2(printk(KERN_WARNING
-                   "scsi(%ld): Incorrect data direction - transfer "
-                   "length=%d, direction=%d, pid=%ld, opcode=%x\n",
-                   ha->host_no, cmd->request_bufflen, cmd->sc_data_direction,
-                   cmd->serial_number, cmd->cmnd[0]));
+               spin_lock_irqsave(&ha->hardware_lock, flags);
+               qla2x00_process_response_queue(ha);
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
        }
 
-       /* Final pre-check :
-        *
-        *      Either PORT_DOWN_TIMER OR LINK_DOWN_TIMER Expired.
-        */
-       if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
-           atomic_read(&ha2->loop_state) == LOOP_DEAD) {
-               /*
-                * Add the command to the done-queue for later failover
-                * processing.
-                */
-               cmd->result = DID_NO_CONNECT << 16;
-               if (atomic_read(&ha2->loop_state) == LOOP_DOWN) 
-                       sp->err_id = SRB_ERR_LOOP;
-               else
-                       sp->err_id = SRB_ERR_PORT;
+       spin_lock_irq(ha->host->host_lock);
 
-               add_to_done_queue(ha, sp);
-               qla2x00_done(ha);
+       return 0;
 
-               spin_lock_irq(ha->host->host_lock);
-               return (0);
-       }
+qc_host_busy_free_sp:
+       qla2x00_sp_free_dma(ha, sp);
+       CMD_SP(cmd) = NULL;
+       mempool_free(sp, ha->srb_mempool);
 
-       if (tq && test_bit(TQF_SUSPENDED, &tq->flags) &&
-           (sp->flags & SRB_TAPE) == 0) {
-               /* If target suspended put incoming I/O in retry_q. */
-               qla2x00_extend_timeout(sp->cmd, 10);
-               add_to_scsi_retry_queue(ha, sp);
-       } else
-               was_empty = add_to_pending_queue(ha, sp);
-
-       if ((IS_QLA2100(ha) || IS_QLA2200(ha)) && ha->flags.online) {
-               if (ha->response_ring_ptr->signature != RESPONSE_PROCESSED) {
-                       unsigned long flags;
-
-                       spin_lock_irqsave(&ha->hardware_lock, flags);   
-                       qla2x00_process_response_queue(ha);
-                       spin_unlock_irqrestore(&ha->hardware_lock, flags);
-               }
-       }
+qc_host_busy_lock:
+       spin_lock_irq(ha->host->host_lock);
 
-       /* We submit to the hardware if:
-        *
-        *      1) we're on the cpu the irq's arrive on or
-        *      2) there are very few io's outstanding.
-        *
-        * In all other cases we'll let an irq pick up our IO and submit it
-        * to the controller to improve affinity.
-        */
-       if (_smp_processor_id() == ha->last_irq_cpu || was_empty)
-               qla2x00_next(ha);
+qc_host_busy:
+       return SCSI_MLQUEUE_HOST_BUSY;
 
-       spin_lock_irq(ha->host->host_lock);
+qc_fail_command:
+       done(cmd);
 
-       return (0);
+       return 0;
 }
 
 /*
@@ -886,54 +389,21 @@ static int
 qla2x00_eh_wait_on_command(scsi_qla_host_t *ha, struct scsi_cmnd *cmd)
 {
 #define ABORT_POLLING_PERIOD   HZ
-#define ABORT_WAIT_TIME                ((10 * HZ) / (ABORT_POLLING_PERIOD))
-
-       int             found = 0;
-       int             done = 0;
-       srb_t           *rp = NULL;
-       struct list_head *list, *temp;
-       u_long          max_wait_time = ABORT_WAIT_TIME;
-
-       do {
-               /* Check on done queue */
-               spin_lock(&ha->list_lock);
-               list_for_each_safe(list, temp, &ha->done_queue) {
-                       rp = list_entry(list, srb_t, list);
-
-                       /*
-                        * Found command. Just exit and wait for the cmd sent
-                        * to OS.
-                       */
-                       if (cmd == rp->cmd) {
-                               found++;
-                               DEBUG3(printk("%s: found in done queue.\n",
-                                   __func__);)
-                               break;
-                       }
-               }
-               spin_unlock(&ha->list_lock);
-
-               /* Complete the cmd right away. */
-               if (found) { 
-                       qla2x00_delete_from_done_queue(ha, rp);
-                       sp_put(ha, rp);
-                       done++;
-                       break;
-               }
-
-               spin_unlock_irq(ha->host->host_lock);
+#define ABORT_WAIT_ITER                ((10 * HZ) / (ABORT_POLLING_PERIOD))
+       unsigned long wait_iter = ABORT_WAIT_ITER;
+       int ret = QLA_SUCCESS;
 
+       while (CMD_SP(cmd)) {
                set_current_state(TASK_UNINTERRUPTIBLE);
                schedule_timeout(ABORT_POLLING_PERIOD);
 
-               spin_lock_irq(ha->host->host_lock);
-
-       } while ((max_wait_time--));
-
-       if (done)
-               DEBUG2(printk(KERN_INFO "%s: found cmd=%p.\n", __func__, cmd));
+               if (--wait_iter)
+                       break;
+       }
+       if (CMD_SP(cmd))
+               ret = QLA_FUNCTION_FAILED;
 
-       return (done);
+       return ret;
 }
 
 /*
@@ -1032,246 +502,70 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha)
 int
 qla2xxx_eh_abort(struct scsi_cmnd *cmd)
 {
-       int             i;
-       int             return_status = FAILED;
-       os_lun_t        *q;
-       scsi_qla_host_t *ha;
-       scsi_qla_host_t *vis_ha;
-       srb_t           *sp;
-       srb_t           *rp;
-       struct list_head *list, *temp;
-       struct Scsi_Host *host;
-       uint8_t         found = 0;
-       unsigned int    b, t, l;
-
-       /* Get the SCSI request ptr */
-       sp = (srb_t *) CMD_SP(cmd);
-
-       /*
-        * If sp is NULL, command is already returned.
-        * sp is NULLED just before we call back scsi_done
-        *
-        */
-       if ((sp == NULL)) {
-               /* no action - we don't have command */
-               qla_printk(KERN_INFO, to_qla_host(cmd->device->host),
-                   "qla2xxx_eh_abort: cmd already done sp=%p\n", sp);
-               DEBUG(printk("qla2xxx_eh_abort: cmd already done sp=%p\n", sp);)
-               return SUCCESS;
-       }
-       if (sp) {
-               DEBUG(printk("qla2xxx_eh_abort: refcount %i \n",
-                   atomic_read(&sp->ref_count));)
-       }
-
-       vis_ha = (scsi_qla_host_t *) cmd->device->host->hostdata;
-       ha = (scsi_qla_host_t *)cmd->device->host->hostdata;
-
-       host = ha->host;
-
-       /* Generate LU queue on bus, target, LUN */
-       b = cmd->device->channel;
-       t = cmd->device->id;
-       l = cmd->device->lun;
-       q = GET_LU_Q(vis_ha, t, l);
-
-       qla_printk(KERN_INFO, ha, 
-           "%s scsi(%ld:%d:%d:%d): cmd_timeout_in_sec=0x%x.\n", __func__,
-           ha->host_no, (int)b, (int)t, (int)l,
-           cmd->timeout_per_command / HZ);
-
-       /*
-        * if no LUN queue then something is very wrong!!!
-        */
-       if (q == NULL) {
-               qla_printk(KERN_WARNING, ha,
-                       "qla2x00: (%x:%x:%x) No LUN queue.\n", b, t, l);
+       scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
+       srb_t *sp;
+       int ret, i;
+       unsigned int id, lun;
+       unsigned long serial;
 
-               /* no action - we don't have command */
+       if (!CMD_SP(cmd))
                return FAILED;
-       }
 
-       DEBUG2(printk("scsi(%ld): ABORTing cmd=%p sp=%p jiffies = 0x%lx, "
-           "timeout=%x, dpc_flags=%lx, vis_ha->dpc_flags=%lx q->flag=%lx\n",
-           ha->host_no, cmd, sp, jiffies, cmd->timeout_per_command / HZ,
-           ha->dpc_flags, vis_ha->dpc_flags, q->q_flag));
-       DEBUG2(qla2x00_print_scsi_cmd(cmd));
+       ret = FAILED;
 
+       id = cmd->device->id;
+       lun = cmd->device->lun;
+       serial = cmd->serial_number;
+
+       /* Check active list for command command. */
        spin_unlock_irq(ha->host->host_lock);
-       if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) {
-               DEBUG2(printk("%s failed:board disabled\n", __func__);)
-               spin_lock_irq(ha->host->host_lock);
-               return FAILED;
-       }
-       spin_lock_irq(ha->host->host_lock);
+       spin_lock(&ha->hardware_lock);
+       for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) {
+               sp = ha->outstanding_cmds[i];
 
-       /* Search done queue */
-       spin_lock(&ha->list_lock);
-       list_for_each_safe(list, temp, &ha->done_queue) {
-               rp = list_entry(list, srb_t, list);
+               if (sp == NULL)
+                       continue;
 
-               if (cmd != rp->cmd)
+               if (sp->cmd != cmd)
                        continue;
 
-               /*
-                * Found command.Remove it from done list.
-                * And proceed to post completion to scsi mid layer.
-                */
-               return_status = SUCCESS;
-               found++;
-               qla2x00_delete_from_done_queue(ha, sp);
+               DEBUG2(printk("%s(%ld): aborting sp %p from RISC. pid=%ld "
+                   "sp->state=%x\n", __func__, ha->host_no, sp, serial,
+                   sp->state));
+               DEBUG3(qla2x00_print_scsi_cmd(cmd);)
+
+               spin_unlock(&ha->hardware_lock);
+               if (qla2x00_abort_command(ha, sp)) {
+                       DEBUG2(printk("%s(%ld): abort_command "
+                           "mbx failed.\n", __func__, ha->host_no));
+               } else {
+                       DEBUG3(printk("%s(%ld): abort_command "
+                           "mbx success.\n", __func__, ha->host_no));
+                       ret = SUCCESS;
+               }
+               spin_lock(&ha->hardware_lock);
 
                break;
-       } /* list_for_each_safe() */
-       spin_unlock(&ha->list_lock);
+       }
 
-       /*
-        * Return immediately if the aborted command was already in the done
-        * queue
-        */
-       if (found) {
-               qla_printk(KERN_INFO, ha,
-                   "qla2xxx_eh_abort: Returning completed command=%p sp=%p\n",
-                   cmd, sp);
-               sp_put(ha, sp);
-               return (return_status);
+       /* Wait for the command to be returned. */
+       if (ret == SUCCESS) {
+               spin_unlock(&ha->hardware_lock);
+               if (qla2x00_eh_wait_on_command(ha, cmd) != QLA_SUCCESS) {
+                       qla_printk(KERN_ERR, ha, 
+                           "scsi(%ld:%d:%d): Abort handler timed out -- %lx "
+                           "%x.\n", ha->host_no, id, lun, serial, ret);
+               }
+               spin_lock(&ha->hardware_lock);
        }
-       
+       spin_lock_irq(ha->host->host_lock);
 
-       /*
-        * See if this command is in the retry queue
-        */
-       DEBUG3(printk("qla2xxx_eh_abort: searching sp %p in retry "
-                   "queue.\n", sp);)
+       qla_printk(KERN_INFO, ha, 
+           "scsi(%ld:%d:%d): Abort command issued -- %lx %x.\n", ha->host_no,
+           id, lun, serial, ret);
 
-       spin_lock(&ha->list_lock);
-       list_for_each_safe(list, temp, &ha->retry_queue) {
-               rp = list_entry(list, srb_t, list);
-
-               if (cmd != rp->cmd)
-                       continue;
-
-
-               DEBUG2(printk("qla2xxx_eh_abort: found "
-                   "in retry queue. SP=%p\n", sp);)
-
-               __del_from_retry_queue(ha, rp);
-               cmd->result = DID_ABORT << 16;
-               __add_to_done_queue(ha, rp);
-
-               return_status = SUCCESS;
-               found++;
-
-               break;
-
-       } 
-       spin_unlock(&ha->list_lock);
-
-
-       /*
-        * Our SP pointer points at the command we want to remove from the
-        * pending queue providing we haven't already sent it to the adapter.
-        */
-       if (!found) {
-               DEBUG3(printk("qla2xxx_eh_abort: searching sp %p "
-                   "in pending queue.\n", sp);)
-
-               spin_lock(&vis_ha->list_lock);
-               list_for_each_safe(list, temp, &vis_ha->pending_queue) {
-                       rp = list_entry(list, srb_t, list);
-
-                       if (rp->cmd != cmd)
-                               continue;
-
-                       /* Remove srb from LUN queue. */
-                       rp->flags |=  SRB_ABORTED;
-
-                       DEBUG2(printk("qla2xxx_eh_abort: Cmd in pending queue."
-                           " serial_number %ld.\n",
-                           sp->cmd->serial_number);)
-
-                       __del_from_pending_queue(vis_ha, rp);
-                       cmd->result = DID_ABORT << 16;
-
-                       __add_to_done_queue(vis_ha, rp);
-
-                       return_status = SUCCESS;
-
-                       found++;
-                       break;
-               } /* list_for_each_safe() */
-               spin_unlock(&vis_ha->list_lock);
-       } /*End of if !found */
-
-       if (!found) {  /* find the command in our active list */
-               DEBUG3(printk("qla2xxx_eh_abort: searching sp %p "
-                   "in outstanding queue.\n", sp);)
-
-               spin_lock(&ha->hardware_lock);
-               for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) {
-                       sp = ha->outstanding_cmds[i];
-
-                       if (sp == NULL)
-                               continue;
-
-                       if (sp->cmd != cmd)
-                               continue;
-
-                       DEBUG2(printk("qla2xxx_eh_abort(%ld): aborting sp %p "
-                           "from RISC. pid=%ld sp->state=%x q->q_flag=%lx\n",
-                           ha->host_no, sp, sp->cmd->serial_number,
-                           sp->state, q->q_flag);)
-                       DEBUG(qla2x00_print_scsi_cmd(cmd);)
-
-                       /* Get a reference to the sp and drop the lock.*/
-                       sp_get(ha, sp);
-
-                       spin_unlock(&ha->hardware_lock);
-                       spin_unlock_irq(ha->host->host_lock);
-
-                       if (qla2x00_abort_command(ha, sp)) {
-                               DEBUG2(printk("qla2xxx_eh_abort: abort_command "
-                                   "mbx failed.\n");)
-                               return_status = FAILED;
-                       } else {
-                               DEBUG3(printk("qla2xxx_eh_abort: abort_command "
-                                   " mbx success.\n");)
-                               return_status = SUCCESS;
-                       }
-
-                       sp_put(ha,sp);
-
-                       spin_lock_irq(ha->host->host_lock);
-                       spin_lock(&ha->hardware_lock);
-
-                       /*
-                        * Regardless of mailbox command status, go check on
-                        * done queue just in case the sp is already done.
-                        */
-                       break;
-
-               }/*End of for loop */
-               spin_unlock(&ha->hardware_lock);
-
-       } /*End of if !found */
-
-       /* Waiting for our command in done_queue to be returned to OS.*/
-       if (qla2x00_eh_wait_on_command(ha, cmd) != 0) {
-               DEBUG2(printk("qla2xxx_eh_abort: cmd returned back to OS.\n");)
-               return_status = SUCCESS;
-       }
-
-       if (return_status == FAILED) {
-               qla_printk(KERN_INFO, ha, 
-                       "qla2xxx_eh_abort Exiting: status=Failed\n");
-               return FAILED;
-       }
-
-       DEBUG2(printk("qla2xxx_eh_abort: Exiting. return_status=0x%x.\n",
-           return_status));
-
-       return return_status;
-}
+       return ret;
+}
 
 /**************************************************************************
 * qla2x00_eh_wait_for_pending_target_commands
@@ -1313,8 +607,7 @@ qla2x00_eh_wait_for_pending_target_commands(scsi_qla_host_t *ha, unsigned int t)
                                        break;
                                }
                        }
-               }
-               else {
+               } else {
                        spin_unlock(&ha->hardware_lock);
                }
        }
@@ -1344,96 +637,42 @@ qla2x00_eh_wait_for_pending_target_commands(scsi_qla_host_t *ha, unsigned int t)
 int
 qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
 {
-       int             return_status;
-       unsigned int    b, t, l;
-       scsi_qla_host_t *ha;
-       os_tgt_t        *tq;
-       os_lun_t        *lq;
-       fc_port_t       *fcport_to_reset;
-       srb_t           *rp;
-       struct list_head *list, *temp;
-
-       return_status = FAILED;
-       if (cmd == NULL) {
-               printk(KERN_INFO
-                   "%s(): **** SCSI mid-layer passing in NULL cmd\n",
-                   __func__);
-
-               return (return_status);
-       }
-
-       b = cmd->device->channel;
-       t = cmd->device->id;
-       l = cmd->device->lun;
-       ha = (scsi_qla_host_t *)cmd->device->host->hostdata;
-
-       tq = TGT_Q(ha, t);
-       if (tq == NULL) {
-               qla_printk(KERN_INFO, ha,
-                   "%s(): **** CMD derives a NULL TGT_Q\n", __func__);
+       scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
+       fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
+       srb_t *sp;
+       int ret;
+       unsigned int id, lun;
+       unsigned long serial;
 
-               return (return_status);
-       }
-       lq = (os_lun_t *)LUN_Q(ha, t, l);
-       if (lq == NULL) {
-               printk(KERN_INFO
-                   "%s(): **** CMD derives a NULL LUN_Q\n", __func__);
+       ret = FAILED;
 
-               return (return_status);
-       }
-       fcport_to_reset = lq->fclun->fcport;
+       id = cmd->device->id;
+       lun = cmd->device->lun;
+       serial = cmd->serial_number;
 
-       /* If we are coming in from the back-door, stall I/O until complete. */
-       if (!cmd->device->host->eh_active)
-               set_bit(TQF_SUSPENDED, &tq->flags);
+       sp = (srb_t *) CMD_SP(cmd);
+       if (!sp || !fcport)
+               return ret;
 
        qla_printk(KERN_INFO, ha,
-           "scsi(%ld:%d:%d:%d): DEVICE RESET ISSUED.\n", ha->host_no, b, t, l);
-
-       DEBUG2(printk(KERN_INFO
-           "scsi(%ld): DEVICE_RESET cmd=%p jiffies = 0x%lx, timeout=%x, "
-           "dpc_flags=%lx, status=%x allowed=%d cmd.state=%x\n",
-           ha->host_no, cmd, jiffies, cmd->timeout_per_command / HZ,
-           ha->dpc_flags, cmd->result, cmd->allowed, cmd->state));
-
-       /* Clear commands from the retry queue. */
-       spin_lock(&ha->list_lock);
-       list_for_each_safe(list, temp, &ha->retry_queue) {
-               rp = list_entry(list, srb_t, list);
-               if (t != rp->cmd->device->id) 
-                       continue;
-               DEBUG2(printk(KERN_INFO
-                   "qla2xxx_eh_reset: found in retry queue. SP=%p\n", rp));
-               __del_from_retry_queue(ha, rp);
-               rp->cmd->result = DID_RESET << 16;
-               __add_to_done_queue(ha, rp);
-       }
-       spin_unlock(&ha->list_lock);
+           "scsi(%ld:%d:%d): DEVICE RESET ISSUED.\n", ha->host_no, id, lun);
 
        spin_unlock_irq(ha->host->host_lock);
 
        if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) {
-               DEBUG2(printk(KERN_INFO
-                   "%s failed:board disabled\n",__func__));
-
                spin_lock_irq(ha->host->host_lock);
                goto eh_dev_reset_done;
        }
 
        if (qla2x00_wait_for_loop_ready(ha) == QLA_SUCCESS) {
-               if (qla2x00_device_reset(ha, fcport_to_reset) == 0) {
-                       return_status = SUCCESS;
-               }
+               if (qla2x00_device_reset(ha, fcport) == 0)
+                       ret = SUCCESS;
 
 #if defined(LOGOUT_AFTER_DEVICE_RESET)
-               if (return_status == SUCCESS) {
-                       if (fcport_to_reset->flags & FC_FABRIC_DEVICE) {
-                               qla2x00_fabric_logout(ha,
-                                   fcport_to_reset->loop_id);
-                               qla2x00_mark_device_lost(ha, fcport_to_reset);
+               if (ret == SUCCESS) {
+                       if (fcport->flags & FC_FABRIC_DEVICE) {
+                               qla2x00_fabric_logout(ha, fcport->loop_id);
+                               qla2x00_mark_device_lost(ha, fcport);
                        }
                }
 #endif
@@ -1442,9 +681,7 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
                    "%s failed: loop not ready\n",__func__);)
        }
 
-       spin_lock_irq(ha->host->host_lock);
-
-       if (return_status == FAILED) {
+       if (ret == FAILED) {
                DEBUG3(printk("%s(%ld): device reset failed\n",
                    __func__, ha->host_no));
                qla_printk(KERN_INFO, ha, "%s: device reset failed\n",
@@ -1458,10 +695,10 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
         * complete for the device.
         */
        if (cmd->device->host->eh_active) {
-               if (qla2x00_eh_wait_for_pending_target_commands(ha, t))
-                       return_status = FAILED;
+               if (qla2x00_eh_wait_for_pending_target_commands(ha, id))
+                       ret = FAILED;
 
-               if (return_status == FAILED) {
+               if (ret == FAILED) {
                        DEBUG3(printk("%s(%ld): failed while waiting for "
                            "commands\n", __func__, ha->host_no));
                        qla_printk(KERN_INFO, ha,
@@ -1473,15 +710,12 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
        }
 
        qla_printk(KERN_INFO, ha,
-           "scsi(%ld:%d:%d:%d): DEVICE RESET SUCCEEDED.\n",
-           ha->host_no, b, t, l);
+           "scsi(%ld:%d:%d): DEVICE RESET SUCCEEDED.\n", ha->host_no, id, lun);
 
 eh_dev_reset_done:
+       spin_lock_irq(ha->host->host_lock);
 
-       if (!cmd->device->host->eh_active)
-               clear_bit(TQF_SUSPENDED, &tq->flags);
-
-       return (return_status);
+       return ret;
 }
 
 /**************************************************************************
@@ -1549,44 +783,52 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *ha)
 int
 qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
 {
-       scsi_qla_host_t *ha;
+       scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
+       fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
        srb_t *sp;
-       int rval = FAILED;
+       int ret;
+       unsigned int id, lun;
+       unsigned long serial;
+
+       ret = FAILED;
+
+       id = cmd->device->id;
+       lun = cmd->device->lun;
+       serial = cmd->serial_number;
 
-       ha = (scsi_qla_host_t *) cmd->device->host->hostdata;
        sp = (srb_t *) CMD_SP(cmd);
+       if (!sp || !fcport)
+               return ret;
 
        qla_printk(KERN_INFO, ha,
-           "scsi(%ld:%d:%d:%d): LOOP RESET ISSUED.\n", ha->host_no,
-           cmd->device->channel, cmd->device->id, cmd->device->lun);
+           "scsi(%ld:%d:%d): LOOP RESET ISSUED.\n", ha->host_no, id, lun);
 
        spin_unlock_irq(ha->host->host_lock);
 
        if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) {
                DEBUG2(printk("%s failed:board disabled\n",__func__));
-               spin_lock_irq(ha->host->host_lock);
-               return FAILED;
+               goto eh_bus_reset_done;
        }
 
        if (qla2x00_wait_for_loop_ready(ha) == QLA_SUCCESS) {
-               if (qla2x00_loop_reset(ha) == QLA_SUCCESS) 
-                       rval = SUCCESS;
+               if (qla2x00_loop_reset(ha) == QLA_SUCCESS)
+                       ret = SUCCESS;
        }
-
-       spin_lock_irq(ha->host->host_lock);
-       if (rval == FAILED)
-               goto out;
+       if (ret == FAILED)
+               goto eh_bus_reset_done;
 
        /* Waiting for our command in done_queue to be returned to OS.*/
        if (cmd->device->host->eh_active)
                if (!qla2x00_eh_wait_for_pending_commands(ha))
-                       rval = FAILED;
+                       ret = FAILED;
 
- out:
+eh_bus_reset_done:
        qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__,
-                       (rval == FAILED) ? "failed" : "succeded");
+           (ret == FAILED) ? "failed" : "succeded");
 
-       return rval;
+       spin_lock_irq(ha->host->host_lock);
+
+       return ret;
 }
 
 /**************************************************************************
@@ -1607,24 +849,30 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
 int
 qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
 {
-       scsi_qla_host_t *ha = (scsi_qla_host_t *)cmd->device->host->hostdata;
-       int             rval = SUCCESS;
+       scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
+       fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
+       srb_t *sp;
+       int ret;
+       unsigned int id, lun;
+       unsigned long serial;
 
-       /* Display which one we're actually resetting for debug. */
-       DEBUG(printk("qla2xxx_eh_host_reset:Resetting scsi(%ld).\n",
-           ha->host_no));
+       ret = FAILED;
+
+       id = cmd->device->id;
+       lun = cmd->device->lun;
+       serial = cmd->serial_number;
+
+       sp = (srb_t *) CMD_SP(cmd);
+       if (!sp || !fcport)
+               return ret;
 
-       /*
-        *  Now issue reset.
-        */
        qla_printk(KERN_INFO, ha,
-           "scsi(%ld:%d:%d:%d): ADAPTER RESET issued.\n", ha->host_no,
-           cmd->device->channel, cmd->device->id, cmd->device->lun);
+           "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", ha->host_no, id, lun);
 
        spin_unlock_irq(ha->host->host_lock);
 
        if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS)
-               goto board_disabled;
+               goto eh_host_reset_lock;
 
        /*
         * Fixme-may be dpc thread is active and processing
@@ -1634,7 +882,6 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
         * devices as lost kicking of the port_down_timer
         * while dpc is stuck for the mailbox to complete.
         */
-       /* Blocking call-Does context switching if loop is Not Ready */
        qla2x00_wait_for_loop_ready(ha);
        set_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
        if (qla2x00_abort_isp(ha)) {
@@ -1643,32 +890,22 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
                set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
 
                if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS)
-                       goto board_disabled;
+                       goto eh_host_reset_lock;
        } 
-
        clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
 
-       spin_lock_irq(ha->host->host_lock);
-       if (rval == FAILED)
-               goto out;
-
        /* Waiting for our command in done_queue to be returned to OS.*/
-       if (!qla2x00_eh_wait_for_pending_commands(ha))
-               rval = FAILED;
+       if (qla2x00_eh_wait_for_pending_commands(ha))
+               ret = SUCCESS;
 
- out:
-       qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__,
-                       (rval == FAILED) ? "failed" : "succeded");
-
-       return rval;
-
- board_disabled:
+eh_host_reset_lock:
        spin_lock_irq(ha->host->host_lock);
 
-       qla_printk(KERN_INFO, ha, "%s: failed:board disabled\n", __func__);
-       return FAILED;
-}
+       qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__,
+           (ret == FAILED) ? "failed" : "succeded");
 
+       return ret;
+}
 
 /*
 * qla2x00_loop_reset
@@ -1684,25 +921,20 @@ static int
 qla2x00_loop_reset(scsi_qla_host_t *ha)
 {
        int status = QLA_SUCCESS;
-       uint16_t t;
-       os_tgt_t        *tq;
+       struct fc_port *fcport;
 
        if (ha->flags.enable_lip_reset) {
                status = qla2x00_lip_reset(ha);
        }
 
        if (status == QLA_SUCCESS && ha->flags.enable_target_reset) {
-               for (t = 0; t < MAX_FIBRE_DEVICES; t++) {
-                       if ((tq = TGT_Q(ha, t)) == NULL)
-                               continue;
-
-                       if (tq->fcport == NULL)
+               list_for_each_entry(fcport, &ha->fcports, list) {
+                       if (fcport->port_type != FCT_TARGET)
                                continue;
 
-                       status = qla2x00_target_reset(ha, 0, t);
-                       if (status != QLA_SUCCESS) {
+                       status = qla2x00_target_reset(ha, fcport);
+                       if (status != QLA_SUCCESS)
                                break;
-                       }
                }
        }
 
@@ -1752,41 +984,53 @@ qla2x00_device_reset(scsi_qla_host_t *ha, fc_port_t *reset_fcport)
        return qla2x00_abort_target(reset_fcport);
 }
 
-/**************************************************************************
-* qla2xxx_slave_configure
-*
-* Description:
-**************************************************************************/
-int
-qla2xxx_slave_configure(struct scsi_device *sdev)
+static int
+qla2xxx_slave_alloc(struct scsi_device *sdev)
 {
        scsi_qla_host_t *ha = to_qla_host(sdev->host);
-       int queue_depth;
+       struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+       fc_port_t *fcport;
+       int found;
 
-       if (IS_QLA2100(ha) || IS_QLA2200(ha))
-               queue_depth = 16;
-       else
-               queue_depth = 32;
+       if (!rport)
+               return -ENXIO;
 
-       if (sdev->tagged_supported) {
-               if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU)
-                       queue_depth = ql2xmaxqdepth;
+       found = 0;
+       list_for_each_entry(fcport, &ha->fcports, list) {
+               if (rport->port_name ==
+                   be64_to_cpu(*(uint64_t *)fcport->port_name)) {
+                       found++;
+                       break;
+               }
+       }
+       if (!found)
+               return -ENXIO;
 
-               ql2xmaxqdepth = queue_depth;
+       sdev->hostdata = fcport;
 
-               scsi_activate_tcq(sdev, queue_depth);
+       return 0;
+}
 
-               qla_printk(KERN_INFO, ha,
-                   "scsi(%d:%d:%d:%d): Enabled tagged queuing, queue "
-                   "depth %d.\n",
-                   sdev->host->host_no, sdev->channel, sdev->id, sdev->lun,
-                   sdev->queue_depth);
-       } else {
-                scsi_adjust_queue_depth(sdev, 0 /* TCQ off */,
-                    sdev->host->hostt->cmd_per_lun /* 3 */);
-       }
+static int
+qla2xxx_slave_configure(struct scsi_device *sdev)
+{
+       scsi_qla_host_t *ha = to_qla_host(sdev->host);
+       struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
 
-       return (0);
+       if (sdev->tagged_supported)
+               scsi_activate_tcq(sdev, 32);
+       else
+               scsi_deactivate_tcq(sdev, 32);
+
+       rport->dev_loss_tmo = ha->port_down_retry_count + 5;
+
+       return 0;
+}
+
+static void
+qla2xxx_slave_destroy(struct scsi_device *sdev)
+{
+       sdev->hostdata = NULL;
 }
 
 /**
@@ -1912,6 +1156,7 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
        unsigned long   wait_switch = 0;
        char pci_info[20];
        char fw_str[30];
+       fc_port_t *fcport;
 
        if (pci_enable_device(pdev))
                return -1;
@@ -1937,7 +1182,7 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
        /* Configure PCI I/O space */
        ret = qla2x00_iospace_config(ha);
        if (ret != 0) {
-               goto probe_failed;
+               goto probe_alloc_failed;
        }
 
        /* Sanitize the information from PCI BIOS. */
@@ -1993,10 +1238,6 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
        INIT_LIST_HEAD(&ha->list);
        INIT_LIST_HEAD(&ha->fcports);
        INIT_LIST_HEAD(&ha->rscn_fcports);
-       INIT_LIST_HEAD(&ha->done_queue);
-       INIT_LIST_HEAD(&ha->retry_queue);
-       INIT_LIST_HEAD(&ha->scsi_retry_queue);
-       INIT_LIST_HEAD(&ha->pending_queue);
 
        /*
         * These locks are used to prevent more than one CPU
@@ -2005,7 +1246,6 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
         * contention for these locks.
         */
        spin_lock_init(&ha->mbx_reg_lock);
-       spin_lock_init(&ha->list_lock);
 
        ha->dpc_pid = -1;
        init_completion(&ha->dpc_inited);
@@ -2016,9 +1256,23 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
                qla_printk(KERN_WARNING, ha,
                    "[ERROR] Failed to allocate memory for adapter\n");
 
-               goto probe_failed;
+               goto probe_alloc_failed;
        }
 
+       pci_set_drvdata(pdev, ha);
+       host->this_id = 255;
+       host->cmd_per_lun = 3;
+       host->unique_id = ha->instance;
+       host->max_cmd_len = MAX_CMDSZ;
+       host->max_channel = ha->ports - 1;
+       host->max_id = ha->max_targets;
+       host->max_lun = ha->max_luns;
+       host->transportt = qla2xxx_transport_template;
+       if (scsi_add_host(host, &pdev->dev))
+               goto probe_alloc_failed;
+
+       qla2x00_alloc_sysfs_attr(ha);
+
        if (qla2x00_initialize_adapter(ha) &&
            !(ha->device_flags & DFLG_NO_CABLE)) {
 
@@ -2032,6 +1286,8 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
                goto probe_failed;
        }
 
+       qla2x00_init_host_attr(ha);
+
        /*
         * Startup the kernel thread for this host adapter
         */
@@ -2045,16 +1301,6 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
        }
        wait_for_completion(&ha->dpc_inited);
 
-       host->this_id = 255;
-       host->cmd_per_lun = 3;
-       host->max_cmd_len = MAX_CMDSZ;
-       host->max_channel = ha->ports - 1;
-       host->max_lun = ha->max_luns;
-       BUG_ON(qla2xxx_transport_template == NULL);
-       host->transportt = qla2xxx_transport_template;
-       host->unique_id = ha->instance;
-       host->max_id = ha->max_targets;
-
        if (IS_QLA2100(ha) || IS_QLA2200(ha))
                ret = request_irq(host->irq, qla2100_intr_handler,
                    SA_INTERRUPT|SA_SHIRQ, ha->brd_info->drv_name, ha);
@@ -2115,21 +1361,9 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
                msleep(10);
        }
 
-       pci_set_drvdata(pdev, ha);
        ha->flags.init_done = 1;
        num_hosts++;
 
-       /* List the target we have found */
-       if (displayConfig) {
-               qla2x00_display_fc_names(ha);
-       }
-
-       if (scsi_add_host(host, &pdev->dev))
-               goto probe_failed;
-
-       sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_fw_dump_attr);
-       sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_nvram_attr);
-
        qla_printk(KERN_INFO, ha, "\n"
            " QLogic Fibre Channel HBA Driver: %s\n"
            "  QLogic %s - %s\n"
@@ -2139,12 +1373,18 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
            pci_name(ha->pdev), ha->flags.enable_64bit_addressing ? '+': '-',
            ha->host_no, qla2x00_get_fw_version_str(ha, fw_str));
 
-       if (ql2xdoinitscan)
-               scsi_scan_host(host);
+       /* Go with fc_rport registration. */
+       list_for_each_entry(fcport, &ha->fcports, list)
+               qla2x00_reg_remote_port(ha, fcport);
 
        return 0;
 
 probe_failed:
+       fc_remove_host(ha->host);
+
+       scsi_remove_host(host);
+
+probe_alloc_failed:
        qla2x00_free_device(ha);
 
        scsi_host_put(host);
@@ -2162,9 +1402,9 @@ void qla2x00_remove_one(struct pci_dev *pdev)
 
        ha = pci_get_drvdata(pdev);
 
-       sysfs_remove_bin_file(&ha->host->shost_gendev.kobj,
-           &sysfs_fw_dump_attr);
-       sysfs_remove_bin_file(&ha->host->shost_gendev.kobj, &sysfs_nvram_attr);
+       qla2x00_free_sysfs_attr(ha);
+
+       fc_remove_host(ha->host);
 
        scsi_remove_host(ha->host);
 
@@ -2225,590 +1465,99 @@ qla2x00_free_device(scsi_qla_host_t *ha)
        pci_disable_device(ha->pdev);
 }
 
-
 /*
- * The following support functions are adopted to handle
- * the re-entrant qla2x00_proc_info correctly.
+ * qla2x00_mark_device_lost Updates fcport state when device goes offline.
+ *
+ * Input: ha = adapter block pointer.  fcport = port structure pointer.
+ *
+ * Return: None.
+ *
+ * Context:
  */
-static void
-copy_mem_info(struct info_str *info, char *data, int len)
+void qla2x00_mark_device_lost(scsi_qla_host_t *ha, fc_port_t *fcport,
+    int do_login)
 {
-       if (info->pos + len > info->offset + info->length)
-               len = info->offset + info->length - info->pos;
+       if (atomic_read(&fcport->state) == FCS_ONLINE && fcport->rport)
+               fc_remote_port_block(fcport->rport);
+       /* 
+        * We may need to retry the login, so don't change the state of the
+        * port but do the retries.
+        */
+       if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD)
+               atomic_set(&fcport->state, FCS_DEVICE_LOST);
 
-       if (info->pos + len < info->offset) {
-               info->pos += len;
+       if (!do_login)
                return;
-       }
-       if (info->pos < info->offset) {
-               off_t partial;
-               partial = info->offset - info->pos;
-               data += partial;
-               info->pos += partial;
-               len  -= partial;
-       }
-       if (len > 0) {
-               memcpy(info->buffer, data, len);
-               info->pos += len;
-               info->buffer += len;
+
+       if (fcport->login_retry == 0) {
+               fcport->login_retry = ha->login_retry_count;
+               set_bit(RELOGIN_NEEDED, &ha->dpc_flags);
+
+               DEBUG(printk("scsi(%ld): Port login retry: "
+                   "%02x%02x%02x%02x%02x%02x%02x%02x, "
+                   "id = 0x%04x retry cnt=%d\n",
+                   ha->host_no,
+                   fcport->port_name[0],
+                   fcport->port_name[1],
+                   fcport->port_name[2],
+                   fcport->port_name[3],
+                   fcport->port_name[4],
+                   fcport->port_name[5],
+                   fcport->port_name[6],
+                   fcport->port_name[7],
+                   fcport->loop_id,
+                   fcport->login_retry));
        }
 }
 
-static int
-copy_info(struct info_str *info, char *fmt, ...)
+/*
+ * qla2x00_mark_all_devices_lost
+ *     Updates fcport state when device goes offline.
+ *
+ * Input:
+ *     ha = adapter block pointer.
+ *     fcport = port structure pointer.
+ *
+ * Return:
+ *     None.
+ *
+ * Context:
+ */
+void
+qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha) 
 {
-       va_list args;
-       char buf[256];
-       int len;
-       va_start(args, fmt);
-       len = vsprintf(buf, fmt, args);
-       va_end(args);
-       copy_mem_info(info, buf, len);
-
-       return (len);
+       fc_port_t *fcport;
+
+       list_for_each_entry(fcport, &ha->fcports, list) {
+               if (fcport->port_type != FCT_TARGET)
+                       continue;
+
+               /*
+                * No point in marking the device as lost, if the device is
+                * already DEAD.
+                */
+               if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
+                       continue;
+               if (atomic_read(&fcport->state) == FCS_ONLINE && fcport->rport)
+                       fc_remote_port_block(fcport->rport);
+               atomic_set(&fcport->state, FCS_DEVICE_LOST);
+       }
 }
 
-/*************************************************************************
-* qla2x00_proc_info
-*
-* Description:
-*   Return information to handle /proc support for the driver.
+/*
+* qla2x00_mem_alloc
+*      Allocates adapter memory.
 *
-* inout : decides the direction of the dataflow and the meaning of the
-*         variables
-* buffer: If inout==0 data is being written to it else read from it
-*         (ptr to a page buffer)
-* *start: If inout==0 start of the valid data in the buffer
-* offset: If inout==0 starting offset from the beginning of all
-*         possible data to return.
-* length: If inout==0 max number of bytes to be written into the buffer
-*         else number of bytes in "buffer"
 * Returns:
-*         < 0:  error. errno value.
-*         >= 0: sizeof data returned.
-*************************************************************************/
-int
-qla2x00_proc_info(struct Scsi_Host *shost, char *buffer,
-    char **start, off_t offset, int length, int inout)
+*      0  = success.
+*      1  = failure.
+*/
+static uint8_t
+qla2x00_mem_alloc(scsi_qla_host_t *ha)
 {
-       struct info_str info;
-       int             retval = -EINVAL;
-       os_lun_t        *up;
-       os_tgt_t        *tq;
-       unsigned int    t, l;
-       uint32_t        tmp_sn;
-       uint32_t        *flags;
-       uint8_t         *loop_state;
-       scsi_qla_host_t *ha;
-       char fw_info[30];
-       DEBUG3(printk(KERN_INFO
-           "Entering proc_info buff_in=%p, offset=0x%lx, length=0x%x\n",
-           buffer, offset, length);)
-
-       ha = (scsi_qla_host_t *) shost->hostdata;
-
-       if (inout) {
-               /* Has data been written to the file? */
-               DEBUG3(printk(
-                   "%s: has data been written to the file. \n",
-                   __func__);)
-
-               return -ENOSYS;
-       }
-
-       if (start) {
-               *start = buffer;
-       }
-
-       info.buffer = buffer;
-       info.length = length;
-       info.offset = offset;
-       info.pos    = 0;
-
-       /* start building the print buffer */
-       copy_info(&info,
-           "QLogic PCI to Fibre Channel Host Adapter for %s:\n"
-           "        Firmware version %s, ",
-           ha->model_number, qla2x00_get_fw_version_str(ha, fw_info));
-
-       copy_info(&info, "Driver version %s\n", qla2x00_version_str);
-
-       tmp_sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | 
-           ha->serial1;
-       copy_info(&info, "ISP: %s, Serial# %c%05d\n",
-           ha->brd_info->isp_name, ('A' + tmp_sn/100000), (tmp_sn%100000));
-
-       copy_info(&info,
-           "Request Queue = 0x%llx, Response Queue = 0x%llx\n",
-               (unsigned long long)ha->request_dma,
-               (unsigned long long)ha->response_dma);
-
-       copy_info(&info,
-           "Request Queue count = %d, Response Queue count = %d\n",
-           ha->request_q_length, ha->response_q_length);
-
-       copy_info(&info,
-           "Total number of active commands = %ld\n",
-           ha->actthreads);
-
-       copy_info(&info,
-           "Total number of interrupts = %ld\n",
-           (long)ha->total_isr_cnt);
-
-       copy_info(&info,
-           "    Device queue depth = 0x%x\n",
-           (ql2xmaxqdepth == 0) ? 16 : ql2xmaxqdepth);
-
-       copy_info(&info,
-           "Number of free request entries = %d\n", ha->req_q_cnt);
-
-       copy_info(&info,
-           "Number of mailbox timeouts = %ld\n", ha->total_mbx_timeout);
-
-       copy_info(&info,
-           "Number of ISP aborts = %ld\n", ha->total_isp_aborts);
-
-       copy_info(&info,
-           "Number of loop resyncs = %ld\n", ha->total_loop_resync);
-
-       copy_info(&info,
-           "Number of retries for empty slots = %ld\n",
-           qla2x00_stats.outarray_full);
-
-       copy_info(&info,
-           "Number of reqs in pending_q= %ld, retry_q= %d, "
-           "done_q= %ld, scsi_retry_q= %d\n",
-           ha->qthreads, ha->retry_q_cnt,
-           ha->done_q_cnt, ha->scsi_retry_q_cnt);
-
-
-       flags = (uint32_t *) &ha->flags;
-
-       if (atomic_read(&ha->loop_state) == LOOP_DOWN) {
-               loop_state = "DOWN";
-       } else if (atomic_read(&ha->loop_state) == LOOP_UP) {
-               loop_state = "UP";
-       } else if (atomic_read(&ha->loop_state) == LOOP_READY) {
-               loop_state = "READY";
-       } else if (atomic_read(&ha->loop_state) == LOOP_TIMEOUT) {
-               loop_state = "TIMEOUT";
-       } else if (atomic_read(&ha->loop_state) == LOOP_UPDATE) {
-               loop_state = "UPDATE";
-       } else {
-               loop_state = "UNKNOWN";
-       }
-
-       copy_info(&info, 
-           "Host adapter:loop state = <%s>, flags = 0x%lx\n",
-           loop_state , *flags);
-
-       copy_info(&info, "Dpc flags = 0x%lx\n", ha->dpc_flags);
-
-       copy_info(&info, "MBX flags = 0x%x\n", ha->mbx_flags);
-
-       copy_info(&info, "Link down Timeout = %3.3d\n",
-           ha->link_down_timeout);
-
-       copy_info(&info, "Port down retry = %3.3d\n",
-           ha->port_down_retry_count);
-
-       copy_info(&info, "Login retry count = %3.3d\n",
-           ha->login_retry_count);
-
-       copy_info(&info,
-           "Commands retried with dropped frame(s) = %d\n",
-           ha->dropped_frame_error_cnt);
-
-       copy_info(&info,
-           "Product ID = %04x %04x %04x %04x\n", ha->product_id[0],
-           ha->product_id[1], ha->product_id[2], ha->product_id[3]);
-
-       copy_info(&info, "\n");
-
-       /* 2.25 node/port display to proc */
-       /* Display the node name for adapter */
-       copy_info(&info, "\nSCSI Device Information:\n");
-       copy_info(&info,
-           "scsi-qla%d-adapter-node="
-           "%02x%02x%02x%02x%02x%02x%02x%02x;\n",
-           (int)ha->instance,
-           ha->init_cb->node_name[0],
-           ha->init_cb->node_name[1],
-           ha->init_cb->node_name[2],
-           ha->init_cb->node_name[3],
-           ha->init_cb->node_name[4],
-           ha->init_cb->node_name[5],
-           ha->init_cb->node_name[6],
-           ha->init_cb->node_name[7]);
-
-       /* display the port name for adapter */
-       copy_info(&info,
-           "scsi-qla%d-adapter-port="
-           "%02x%02x%02x%02x%02x%02x%02x%02x;\n",
-           (int)ha->instance,
-           ha->init_cb->port_name[0],
-           ha->init_cb->port_name[1],
-           ha->init_cb->port_name[2],
-           ha->init_cb->port_name[3],
-           ha->init_cb->port_name[4],
-           ha->init_cb->port_name[5],
-           ha->init_cb->port_name[6],
-           ha->init_cb->port_name[7]);
-
-       /* Print out device port names */
-       for (t = 0; t < MAX_FIBRE_DEVICES; t++) {
-               if ((tq = TGT_Q(ha, t)) == NULL)
-                       continue;
-
-               copy_info(&info,
-                   "scsi-qla%d-target-%d="
-                   "%02x%02x%02x%02x%02x%02x%02x%02x;\n",
-                   (int)ha->instance, t,
-                   tq->port_name[0], tq->port_name[1],
-                   tq->port_name[2], tq->port_name[3],
-                   tq->port_name[4], tq->port_name[5],
-                   tq->port_name[6], tq->port_name[7]);
-       }
-
-       copy_info(&info, "\nSCSI LUN Information:\n");
-       copy_info(&info,
-           "(Id:Lun)  * - indicates lun is not registered with the OS.\n");
-
-       /* scan for all equipment stats */
-       for (t = 0; t < MAX_FIBRE_DEVICES; t++) {
-               /* scan all luns */
-               for (l = 0; l < ha->max_luns; l++) {
-                       up = (os_lun_t *) GET_LU_Q(ha, t, l);
-
-                       if (up == NULL) {
-                               continue;
-                       }
-                       if (up->fclun == NULL) {
-                               continue;
-                       }
-
-                       copy_info(&info,
-                           "(%2d:%2d): Total reqs %ld,",
-                           t,l,up->io_cnt);
-
-                       copy_info(&info,
-                           " Pending reqs %ld,",
-                           up->out_cnt);
-
-                       if (up->io_cnt < 4) {
-                               copy_info(&info,
-                                   " flags 0x%x*,",
-                                   (int)up->q_flag);
-                       } else {
-                               copy_info(&info,
-                                   " flags 0x%x,",
-                                   (int)up->q_flag);
-                       }
-
-                       copy_info(&info, 
-                           " %ld:%d:%02x %02x",
-                           up->fclun->fcport->ha->instance,
-                           up->fclun->fcport->cur_path,
-                           up->fclun->fcport->loop_id,
-                           up->fclun->device_type);
-
-                       copy_info(&info, "\n");
-
-                       if (info.pos >= info.offset + info.length) {
-                               /* No need to continue */
-                               goto profile_stop;
-                       }
-               }
-
-               if (info.pos >= info.offset + info.length) {
-                       /* No need to continue */
-                       break;
-               }
-       }
-
-profile_stop:
-
-       retval = info.pos > info.offset ? info.pos - info.offset : 0;
-
-       DEBUG3(printk(KERN_INFO 
-           "Exiting proc_info: info.pos=%d, offset=0x%lx, "
-           "length=0x%x\n", info.pos, offset, length);)
-
-       return (retval);
-}
-
-/*
-* qla2x00_display_fc_names
-*      This routine will the node names of the different devices found
-*      after port inquiry.
-*
-* Input:
-*      cmd = SCSI command structure
-*
-* Returns:
-*      None.
-*/
-static void
-qla2x00_display_fc_names(scsi_qla_host_t *ha) 
-{
-       uint16_t        tgt;
-       os_tgt_t        *tq;
-
-       /* Display the node name for adapter */
-       qla_printk(KERN_INFO, ha,
-           "scsi-qla%d-adapter-node=%02x%02x%02x%02x%02x%02x%02x%02x\\;\n",
-           (int)ha->instance,
-           ha->init_cb->node_name[0],
-           ha->init_cb->node_name[1],
-           ha->init_cb->node_name[2],
-           ha->init_cb->node_name[3],
-           ha->init_cb->node_name[4],
-           ha->init_cb->node_name[5],
-           ha->init_cb->node_name[6],
-           ha->init_cb->node_name[7]);
-
-       /* display the port name for adapter */
-       qla_printk(KERN_INFO, ha,
-           "scsi-qla%d-adapter-port=%02x%02x%02x%02x%02x%02x%02x%02x\\;\n",
-           (int)ha->instance,
-           ha->init_cb->port_name[0],
-           ha->init_cb->port_name[1],
-           ha->init_cb->port_name[2],
-           ha->init_cb->port_name[3],
-           ha->init_cb->port_name[4],
-           ha->init_cb->port_name[5],
-           ha->init_cb->port_name[6],
-           ha->init_cb->port_name[7]);
-
-       /* Print out device port names */
-       for (tgt = 0; tgt < MAX_TARGETS; tgt++) {
-               if ((tq = ha->otgt[tgt]) == NULL)
-                       continue;
-
-               if (tq->fcport == NULL)
-                       continue;
-
-               switch (ha->binding_type) {
-                       case BIND_BY_PORT_NAME:
-                               qla_printk(KERN_INFO, ha,
-                                   "scsi-qla%d-tgt-%d-di-0-port="
-                                   "%02x%02x%02x%02x%02x%02x%02x%02x\\;\n",
-                                   (int)ha->instance, 
-                                   tgt,
-                                   tq->port_name[0], 
-                                   tq->port_name[1],
-                                   tq->port_name[2], 
-                                   tq->port_name[3],
-                                   tq->port_name[4], 
-                                   tq->port_name[5],
-                                   tq->port_name[6], 
-                                   tq->port_name[7]);
-
-                               break;
-
-                       case BIND_BY_PORT_ID:
-                               qla_printk(KERN_INFO, ha,
-                                   "scsi-qla%d-tgt-%d-di-0-pid="
-                                   "%02x%02x%02x\\;\n",
-                                   (int)ha->instance,
-                                   tgt,
-                                   tq->d_id.b.domain,
-                                   tq->d_id.b.area,
-                                   tq->d_id.b.al_pa);
-                               break;
-               }
-
-#if VSA
-               qla_printk(KERN_INFO, ha,
-                   "scsi-qla%d-target-%d-vsa=01;\n", (int)ha->instance, tgt);
-#endif
-       }
-}
-
-/*
- *  qla2x00_suspend_lun
- *     Suspend lun and start port down timer
- *
- * Input:
- *     ha = visable adapter block pointer.
- *  lq = lun queue
- *  cp = Scsi command pointer 
- *  time = time in seconds
- *  count = number of times to let time expire
- *  delay_lun = non-zero, if lun should be delayed rather than suspended
- *
- * Return:
- *     QLA_SUCCESS  -- suspended lun 
- *     QLA_FUNCTION_FAILED  -- Didn't suspend lun
- *
- * Context:
- *     Interrupt context.
- */
-int
-__qla2x00_suspend_lun(scsi_qla_host_t *ha,
-               os_lun_t *lq, int time, int count, int delay_lun)
-{
-       int     rval;
-       srb_t *sp;
-       struct list_head *list, *temp;
-       unsigned long flags;
-
-       rval = QLA_SUCCESS;
-
-       /* if the lun_q is already suspended then don't do it again */
-       if (lq->q_state == LUN_STATE_READY ||lq->q_state == LUN_STATE_RUN) {
-
-               spin_lock_irqsave(&lq->q_lock, flags);
-               if (lq->q_state == LUN_STATE_READY) {
-                       lq->q_max = count;
-                       lq->q_count = 0;
-               }
-               /* Set the suspend time usually 6 secs */
-               atomic_set(&lq->q_timer, time);
-
-               /* now suspend the lun */
-               lq->q_state = LUN_STATE_WAIT;
-
-               if (delay_lun) {
-                       set_bit(LUN_EXEC_DELAYED, &lq->q_flag);
-                       DEBUG(printk(KERN_INFO
-                           "scsi(%ld): Delay lun execution for %d secs, "
-                           "count=%d, max count=%d, state=%d\n",
-                           ha->host_no,
-                           time,
-                           lq->q_count, lq->q_max, lq->q_state));
-               } else {
-                       DEBUG(printk(KERN_INFO
-                           "scsi(%ld): Suspend lun for %d secs, count=%d, "
-                           "max count=%d, state=%d\n",
-                           ha->host_no,
-                           time,
-                           lq->q_count, lq->q_max, lq->q_state));
-               }
-               spin_unlock_irqrestore(&lq->q_lock, flags);
-
-               /*
-                * Remove all pending commands from request queue and  put them
-                * in the scsi_retry queue.
-                */
-               spin_lock_irqsave(&ha->list_lock, flags);
-               list_for_each_safe(list, temp, &ha->pending_queue) {
-                       sp = list_entry(list, srb_t, list);
-                       if (sp->lun_queue != lq)
-                               continue;
-
-                       __del_from_pending_queue(ha, sp);
-
-                       if (sp->cmd->allowed < count)
-                               sp->cmd->allowed = count;
-                       __add_to_scsi_retry_queue(ha, sp);
-
-               } /* list_for_each_safe */
-               spin_unlock_irqrestore(&ha->list_lock, flags);
-               rval = QLA_SUCCESS;
-       } else {
-               rval = QLA_FUNCTION_FAILED;
-       }
-
-       return (rval);
-}
-
-/*
- * qla2x00_mark_device_lost Updates fcport state when device goes offline.
- *
- * Input: ha = adapter block pointer.  fcport = port structure pointer.
- *
- * Return: None.
- *
- * Context:
- */
-void qla2x00_mark_device_lost(scsi_qla_host_t *ha, fc_port_t *fcport,
-    int do_login)
-{
-       /* 
-        * We may need to retry the login, so don't change the state of the
-        * port but do the retries.
-        */
-       if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD)
-               atomic_set(&fcport->state, FCS_DEVICE_LOST);
-
-       if (!do_login)
-               return;
-
-       if (fcport->login_retry == 0) {
-               fcport->login_retry = ha->login_retry_count;
-               set_bit(RELOGIN_NEEDED, &ha->dpc_flags);
-
-               DEBUG(printk("scsi(%ld): Port login retry: "
-                   "%02x%02x%02x%02x%02x%02x%02x%02x, "
-                   "id = 0x%04x retry cnt=%d\n",
-                   ha->host_no,
-                   fcport->port_name[0],
-                   fcport->port_name[1],
-                   fcport->port_name[2],
-                   fcport->port_name[3],
-                   fcport->port_name[4],
-                   fcport->port_name[5],
-                   fcport->port_name[6],
-                   fcport->port_name[7],
-                   fcport->loop_id,
-                   fcport->login_retry));
-       }
-}
-
-/*
- * qla2x00_mark_all_devices_lost
- *     Updates fcport state when device goes offline.
- *
- * Input:
- *     ha = adapter block pointer.
- *     fcport = port structure pointer.
- *
- * Return:
- *     None.
- *
- * Context:
- */
-void
-qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha) 
-{
-       fc_port_t *fcport;
-
-       list_for_each_entry(fcport, &ha->fcports, list) {
-               if (fcport->port_type != FCT_TARGET)
-                       continue;
-
-               /*
-                * No point in marking the device as lost, if the device is
-                * already DEAD.
-                */
-               if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
-                       continue;
-
-               atomic_set(&fcport->state, FCS_DEVICE_LOST);
-       }
-}
-
-/*
-* qla2x00_mem_alloc
-*      Allocates adapter memory.
-*
-* Returns:
-*      0  = success.
-*      1  = failure.
-*/
-static uint8_t
-qla2x00_mem_alloc(scsi_qla_host_t *ha)
-{
-       char    name[16];
-       uint8_t   status = 1;
-       int     retry= 10;
+       char    name[16];
+       uint8_t   status = 1;
+       int     retry= 10;
 
        do {
                /*
@@ -3007,11 +1756,8 @@ qla2x00_mem_alloc(scsi_qla_host_t *ha)
 static void
 qla2x00_mem_free(scsi_qla_host_t *ha)
 {
-       uint32_t        t;
        struct list_head        *fcpl, *fcptemp;
        fc_port_t       *fcport;
-       struct list_head        *fcll, *fcltemp;
-       fc_lun_t        *fclun;
        unsigned long   wtime;/* max wait time if mbx cmd is busy. */
 
        if (ha == NULL) {
@@ -3020,11 +1766,6 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
                return;
        }
 
-       /* Free the target queues */
-       for (t = 0; t < MAX_TARGETS; t++) {
-               qla2x00_tgt_free(ha, t);
-       }
-
        /* Make sure all other threads are stopped. */
        wtime = 60 * HZ;
        while (ha->dpc_wait && wtime) {
@@ -3103,14 +1844,6 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
        list_for_each_safe(fcpl, fcptemp, &ha->fcports) {
                fcport = list_entry(fcpl, fc_port_t, list);
 
-               /* fc luns */
-               list_for_each_safe(fcll, fcltemp, &fcport->fcluns) {
-                       fclun = list_entry(fcll, fc_lun_t, list);
-
-                       list_del_init(&fclun->list);
-                       kfree(fclun);
-               }
-
                /* fc ports */
                list_del_init(&fcport->list);
                kfree(fcport);
@@ -3188,16 +1921,8 @@ qla2x00_do_dpc(void *data)
        DECLARE_MUTEX_LOCKED(sem);
        scsi_qla_host_t *ha;
        fc_port_t       *fcport;
-       os_lun_t        *q;
-       srb_t           *sp;
        uint8_t         status;
-       unsigned long   flags = 0;
-       struct list_head *list, *templist;
-       int     dead_cnt, online_cnt;
-       int     retry_cmds = 0;
        uint16_t        next_loopid;
-       int t;
-       os_tgt_t *tq;
 
        ha = (scsi_qla_host_t *)data;
 
@@ -3233,139 +1958,7 @@ qla2x00_do_dpc(void *data)
 
                ha->dpc_active = 1;
 
-               if (!list_empty(&ha->done_queue))
-                       qla2x00_done(ha);
-
-               /* Process commands in retry queue */
-               if (test_and_clear_bit(PORT_RESTART_NEEDED, &ha->dpc_flags)) {
-                       DEBUG(printk("scsi(%ld): DPC checking retry_q. "
-                           "total=%d\n",
-                           ha->host_no, ha->retry_q_cnt));
-
-                       spin_lock_irqsave(&ha->list_lock, flags);
-                       dead_cnt = online_cnt = 0;
-                       list_for_each_safe(list, templist, &ha->retry_queue) {
-                               sp = list_entry(list, srb_t, list);
-                               q = sp->lun_queue;
-                               DEBUG3(printk("scsi(%ld): pid=%ld sp=%p, "
-                                   "spflags=0x%x, q_flag= 0x%lx\n",
-                                   ha->host_no, sp->cmd->serial_number, sp,
-                                   sp->flags, q->q_flag));
-
-                               if (q == NULL)
-                                       continue;
-                               fcport = q->fclun->fcport;
-
-                               if (atomic_read(&fcport->state) ==
-                                   FCS_DEVICE_DEAD ||
-                                   atomic_read(&fcport->ha->loop_state) == LOOP_DEAD) {
-
-                                       __del_from_retry_queue(ha, sp);
-                                       sp->cmd->result = DID_NO_CONNECT << 16;
-                                       if (atomic_read(&fcport->ha->loop_state) ==
-                                           LOOP_DOWN) 
-                                               sp->err_id = SRB_ERR_LOOP;
-                                       else
-                                               sp->err_id = SRB_ERR_PORT;
-                                       sp->cmd->host_scribble =
-                                           (unsigned char *) NULL;
-                                       __add_to_done_queue(ha, sp);
-                                       dead_cnt++;
-                               } else if (atomic_read(&fcport->state) !=
-                                   FCS_DEVICE_LOST) {
-
-                                       __del_from_retry_queue(ha, sp);
-                                       sp->cmd->result = DID_BUS_BUSY << 16;
-                                       sp->cmd->host_scribble =
-                                           (unsigned char *) NULL;
-                                       __add_to_done_queue(ha, sp);
-                                       online_cnt++;
-                               }
-                       } /* list_for_each_safe() */
-                       spin_unlock_irqrestore(&ha->list_lock, flags);
-
-                       DEBUG(printk("scsi(%ld): done processing retry queue "
-                           "- dead=%d, online=%d\n ",
-                           ha->host_no, dead_cnt, online_cnt));
-               }
-
-               /* Process commands in scsi retry queue */
-               if (test_and_clear_bit(SCSI_RESTART_NEEDED, &ha->dpc_flags)) {
-                       /*
-                        * Any requests we want to delay for some period is put
-                        * in the scsi retry queue with a delay added. The
-                        * timer will schedule a "scsi_restart_needed" every 
-                        * second as long as there are requests in the scsi
-                        * queue. 
-                        */
-                       DEBUG(printk("scsi(%ld): DPC checking scsi "
-                           "retry_q.total=%d\n",
-                           ha->host_no, ha->scsi_retry_q_cnt));
-
-                       online_cnt = 0;
-                       spin_lock_irqsave(&ha->list_lock, flags);
-                       list_for_each_safe(list, templist,
-                           &ha->scsi_retry_queue) {
-
-                               sp = list_entry(list, srb_t, list);
-                               q = sp->lun_queue;
-                               tq = sp->tgt_queue;
-
-                               DEBUG3(printk("scsi(%ld): scsi_retry_q: "
-                                   "pid=%ld sp=%p, spflags=0x%x, "
-                                   "q_flag= 0x%lx,q_state=%d\n",
-                                   ha->host_no, sp->cmd->serial_number,
-                                   sp, sp->flags, q->q_flag, q->q_state));
-
-                               /* Was this lun suspended */
-                               if (q->q_state != LUN_STATE_WAIT) {
-                                       online_cnt++;
-                                       __del_from_scsi_retry_queue(ha, sp);
-
-                                       if (test_bit(TQF_RETRY_CMDS,
-                                           &tq->flags)) {
-                                               qla2x00_extend_timeout(sp->cmd,
-                                                   (sp->cmd->timeout_per_command / HZ) - QLA_CMD_TIMER_DELTA);
-                                               __add_to_pending_queue(ha, sp);
-                                               retry_cmds++;
-                                       } else
-                                               __add_to_retry_queue(ha, sp);
-                               }
-
-                               /* Was this command suspended for N secs */
-                               if (sp->delay != 0) {
-                                       sp->delay--;
-                                       if (sp->delay == 0) {
-                                               online_cnt++;
-                                               __del_from_scsi_retry_queue(
-                                                   ha, sp);
-                                               __add_to_retry_queue(ha,sp);
-                                       }
-                               }
-                       }
-                       spin_unlock_irqrestore(&ha->list_lock, flags);
-
-                       /* Clear all Target Unsuspended bits */
-                       for (t = 0; t < ha->max_targets; t++) {
-                               if ((tq = ha->otgt[t]) == NULL)
-                                       continue;
-
-                               if (test_bit(TQF_RETRY_CMDS, &tq->flags))
-                                       clear_bit(TQF_RETRY_CMDS, &tq->flags);
-                       }
-                       if (retry_cmds)
-                               qla2x00_next(ha);
-
-                       DEBUG(if (online_cnt > 0))
-                       DEBUG(printk("scsi(%ld): dpc() found scsi reqs to "
-                           "restart= %d\n",
-                           ha->host_no, online_cnt));
-               }
-
                if (ha->flags.mbox_busy) {
-                       if (!list_empty(&ha->done_queue))
-                               qla2x00_done(ha);
-
                        ha->dpc_active = 0;
                        continue;
                }
@@ -3493,28 +2086,6 @@ qla2x00_do_dpc(void *data)
                            ha->host_no));
                }
 
-
-               if (test_bit(RESTART_QUEUES_NEEDED, &ha->dpc_flags)) {
-                       DEBUG(printk("scsi(%ld): qla2x00_restart_queues()\n",
-                           ha->host_no));
-
-                       qla2x00_restart_queues(ha, 0);
-
-                       DEBUG(printk("scsi(%ld): qla2x00_restart_queues - end\n",
-                           ha->host_no));
-               }
-
-               if (test_bit(ABORT_QUEUES_NEEDED, &ha->dpc_flags)) {
-
-                       DEBUG(printk("scsi(%ld): qla2x00_abort_queues()\n",
-                           ha->host_no));
-                               
-                       qla2x00_abort_queues(ha, 0);
-
-                       DEBUG(printk("scsi(%ld): qla2x00_abort_queues - end\n",
-                           ha->host_no));
-               }
-
                if (test_and_clear_bit(FCPORT_RESCAN_NEEDED, &ha->dpc_flags)) {
 
                        DEBUG(printk("scsi(%ld): Rescan flagged fcports...\n",
@@ -3527,13 +2098,9 @@ qla2x00_do_dpc(void *data)
                            ha->host_no));
                }
 
-
                if (!ha->interrupts_on)
                        qla2x00_enable_intrs(ha);
 
-               if (!list_empty(&ha->done_queue))
-                       qla2x00_done(ha);
-
                ha->dpc_active = 0;
        } /* End of while(1) */
 
@@ -3548,45 +2115,6 @@ qla2x00_do_dpc(void *data)
        complete_and_exit(&ha->dpc_exited, 0);
 }
 
-/*
- *  qla2x00_abort_queues
- *     Abort all commands on queues on device
- *
- * Input:
- *     ha = adapter block pointer.
- *
- * Context:
- *     Interrupt context.
- */
-void
-qla2x00_abort_queues(scsi_qla_host_t *ha, uint8_t doneqflg) 
-{
-
-       srb_t       *sp;
-       struct list_head *list, *temp;
-       unsigned long flags;
-
-       clear_bit(ABORT_QUEUES_NEEDED, &ha->dpc_flags);
-
-       /* Return all commands device queues. */
-       spin_lock_irqsave(&ha->list_lock,flags);
-       list_for_each_safe(list, temp, &ha->pending_queue) {
-               sp = list_entry(list, srb_t, list);
-
-               if (sp->flags & SRB_ABORTED)
-                       continue;
-
-               /* Remove srb from LUN queue. */
-               __del_from_pending_queue(ha, sp);
-
-               /* Set ending status. */
-               sp->cmd->result = DID_BUS_BUSY << 16;
-
-               __add_to_done_queue(ha, sp);
-       }
-       spin_unlock_irqrestore(&ha->list_lock, flags);
-}
-
 /*
 *  qla2x00_rst_aen
 *      Processes asynchronous reset.
@@ -3632,6 +2160,36 @@ qla2x00_get_new_sp(scsi_qla_host_t *ha)
        return (sp);
 }
 
+static void
+qla2x00_sp_free_dma(scsi_qla_host_t *ha, srb_t *sp)
+{
+       struct scsi_cmnd *cmd = sp->cmd;
+
+       if (sp->flags & SRB_DMA_VALID) {
+               if (cmd->use_sg) {
+                       dma_unmap_sg(&ha->pdev->dev, cmd->request_buffer,
+                           cmd->use_sg, cmd->sc_data_direction);
+               } else if (cmd->request_bufflen) {
+                       dma_unmap_single(&ha->pdev->dev, sp->dma_handle,
+                           cmd->request_bufflen, cmd->sc_data_direction);
+               }
+               sp->flags &= ~SRB_DMA_VALID;
+       }
+}
+
+void
+qla2x00_sp_compl(scsi_qla_host_t *ha, srb_t *sp)
+{
+       struct scsi_cmnd *cmd = sp->cmd;
+
+       qla2x00_sp_free_dma(ha, sp);
+
+       CMD_SP(cmd) = NULL;
+       mempool_free(sp, ha->srb_mempool);
+
+       cmd->scsi_done(cmd);
+}
+
 /**************************************************************************
 *   qla2x00_timer
 *
@@ -3643,30 +2201,12 @@ qla2x00_get_new_sp(scsi_qla_host_t *ha)
 static void
 qla2x00_timer(scsi_qla_host_t *ha)
 {
-       int             t,l;
        unsigned long   cpu_flags = 0;
        fc_port_t       *fcport;
-       os_lun_t *lq;
-       os_tgt_t *tq;
        int             start_dpc = 0;
        int             index;
        srb_t           *sp;
-
-       /*
-        * We try and restart any request in the retry queue every second.
-        */
-       if (!list_empty(&ha->retry_queue)) {
-               set_bit(PORT_RESTART_NEEDED, &ha->dpc_flags);
-               start_dpc++;
-       }
-
-       /*
-        * We try and restart any request in the scsi_retry queue every second.
-        */
-       if (!list_empty(&ha->scsi_retry_queue)) {
-               set_bit(SCSI_RESTART_NEEDED, &ha->dpc_flags);
-               start_dpc++;
-       }
+       int             t;
 
        /*
         * Ports - Port down timer.
@@ -3696,59 +2236,6 @@ qla2x00_timer(scsi_qla_host_t *ha)
                t++;
        } /* End of for fcport  */
 
-       /*
-        * LUNS - lun suspend timer.
-        *
-        * Whenever, a lun is suspended the timer starts decrementing its
-        * suspend timer every second until it reaches zero. Once  it reaches
-        * zero the lun retry count is decremented. 
-        */
-
-       /*
-        * FIXME(dg) - Need to convert this linear search of luns into a search
-        * of a list of suspended luns.
-        */
-       for (t = 0; t < ha->max_targets; t++) {
-               if ((tq = ha->otgt[t]) == NULL)
-                       continue;
-
-               for (l = 0; l < ha->max_luns; l++) {
-                       if ((lq = (os_lun_t *) tq->olun[l]) == NULL)
-                               continue;
-
-                       spin_lock_irqsave(&lq->q_lock, cpu_flags);
-                       if (lq->q_state == LUN_STATE_WAIT &&
-                               atomic_read(&lq->q_timer) != 0) {
-
-                               if (atomic_dec_and_test(&lq->q_timer) != 0) {
-                                       /*
-                                        * A delay should immediately
-                                        * transition to a READY state
-                                        */
-                                       if (test_and_clear_bit(LUN_EXEC_DELAYED,
-                                           &lq->q_flag)) {
-                                               lq->q_state = LUN_STATE_READY;
-                                       }
-                                       else {
-                                               lq->q_count++;
-                                               if (lq->q_count == lq->q_max)
-                                                       lq->q_state =
-                                                           LUN_STATE_TIMEOUT;
-                                               else
-                                                       lq->q_state =
-                                                           LUN_STATE_RUN;
-                                       }
-                               }
-                               DEBUG3(printk("scsi(%ld): lun%d - timer %d, "
-                                   "count=%d, max=%d, state=%d\n",
-                                   ha->host_no,
-                                   l,
-                                   atomic_read(&lq->q_timer),
-                                   lq->q_count, lq->q_max, lq->q_state));
-                       }
-                       spin_unlock_irqrestore(&lq->q_lock, cpu_flags);
-               } /* End of for luns  */
-       } /* End of for targets  */
 
        /* Loop down handler. */
        if (atomic_read(&ha->loop_down_timer) > 0 &&
@@ -3768,11 +2255,13 @@ qla2x00_timer(scsi_qla_host_t *ha)
                        spin_lock_irqsave(&ha->hardware_lock, cpu_flags);
                        for (index = 1; index < MAX_OUTSTANDING_COMMANDS;
                            index++) {
+                               fc_port_t *sfcp;
+
                                sp = ha->outstanding_cmds[index];
                                if (!sp)
                                        continue;
-                               if (!(sp->fclun->fcport->flags &
-                                   FCF_TAPE_PRESENT))
+                               sfcp = sp->fcport;
+                               if (!(sfcp->flags & FCF_TAPE_PRESENT))
                                        continue;
 
                                set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
@@ -3808,19 +2297,12 @@ qla2x00_timer(scsi_qla_host_t *ha)
                    atomic_read(&ha->loop_down_timer)));
        }
 
-       /*
-        * Done Q Handler -- dgFIXME This handler will kick off doneq if we
-        * haven't process it in 2 seconds.
-        */
-       if (!list_empty(&ha->done_queue))
-               qla2x00_done(ha);
-
-
        /* Schedule the DPC routine if needed */
        if ((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) ||
            test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) ||
            start_dpc ||
            test_bit(LOGIN_RETRY_NEEDED, &ha->dpc_flags) ||
+           test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) ||
            test_bit(RELOGIN_NEEDED, &ha->dpc_flags)) &&
            ha->dpc_wait && !ha->dpc_active) {
 
@@ -3830,496 +2312,6 @@ qla2x00_timer(scsi_qla_host_t *ha)
        qla2x00_restart_timer(ha, WATCH_INTERVAL);
 }
 
-/*
- * qla2x00_extend_timeout
- *      This routine will extend the timeout to the specified value.
- *
- * Input:
- *      cmd = SCSI command structure
- *
- * Returns:
- *      None.
- */
-void 
-qla2x00_extend_timeout(struct scsi_cmnd *cmd, int timeout) 
-{
-       srb_t *sp = (srb_t *) CMD_SP(cmd);
-       u_long our_jiffies = (timeout * HZ) + jiffies;
-
-       sp->ext_history= 0; 
-       sp->e_start = jiffies;
-       if (cmd->eh_timeout.function) {
-               mod_timer(&cmd->eh_timeout,our_jiffies);
-               sp->ext_history |= 1;
-       }
-       if (sp->timer.function != NULL) {
-               /* 
-                * Our internal timer should timeout before the midlayer has a
-                * chance begin the abort process
-                */
-               mod_timer(&sp->timer,our_jiffies - (QLA_CMD_TIMER_DELTA * HZ));
-
-               sp->ext_history |= 2;
-       }
-}
-
-/**************************************************************************
-*   qla2x00_cmd_timeout
-*
-* Description:
-*       Handles the command if it times out in any state.
-*
-* Input:
-*     sp - pointer to validate
-*
-* Returns:
-* None.
-* Note:Need to add the support for if( sp->state == SRB_FAILOVER_STATE).
-**************************************************************************/
-void
-qla2x00_cmd_timeout(srb_t *sp)
-{
-       int t, l;
-       int processed;
-       scsi_qla_host_t *vis_ha, *dest_ha;
-       struct scsi_cmnd *cmd;
-       unsigned long flags, cpu_flags;
-       fc_port_t *fcport;
-
-       cmd = sp->cmd;
-       vis_ha = (scsi_qla_host_t *)cmd->device->host->hostdata;
-
-       DEBUG3(printk("cmd_timeout: Entering sp->state = %x\n", sp->state));
-
-       t = cmd->device->id;
-       l = cmd->device->lun;
-       fcport = sp->fclun->fcport;
-       dest_ha = sp->ha;
-
-       /*
-        * If IO is found either in retry Queue 
-        *    OR in Lun Queue
-        * Return this IO back to host
-        */
-       spin_lock_irqsave(&vis_ha->list_lock, flags);
-       processed = 0;
-       if (sp->state == SRB_PENDING_STATE) {
-               __del_from_pending_queue(vis_ha, sp);
-               DEBUG2(printk("scsi(%ld): Found in Pending queue pid %ld, "
-                   "State = %x., fcport state=%d sjiffs=%lx njiffs=%lx\n",
-                   vis_ha->host_no, cmd->serial_number, sp->state,
-                   atomic_read(&fcport->state), sp->r_start, jiffies));
-
-               /*
-                * If FC_DEVICE is marked as dead return the cmd with
-                * DID_NO_CONNECT status.  Otherwise set the host_byte to
-                * DID_BUS_BUSY to let the OS  retry this cmd.
-                */
-               if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
-                   atomic_read(&fcport->ha->loop_state) == LOOP_DEAD) {
-                       cmd->result = DID_NO_CONNECT << 16;
-                       if (atomic_read(&fcport->ha->loop_state) == LOOP_DOWN) 
-                               sp->err_id = SRB_ERR_LOOP;
-                       else
-                               sp->err_id = SRB_ERR_PORT;
-               } else {
-                       cmd->result = DID_BUS_BUSY << 16;
-               }
-               __add_to_done_queue(vis_ha, sp);
-               processed++;
-       } 
-       spin_unlock_irqrestore(&vis_ha->list_lock, flags);
-
-       if (processed) {
-               qla2x00_done(vis_ha);
-               return;
-       }
-
-       spin_lock_irqsave(&dest_ha->list_lock, flags);
-       if ((sp->state == SRB_RETRY_STATE) ||
-           (sp->state == SRB_SCSI_RETRY_STATE)) {
-
-               DEBUG2(printk("scsi(%ld): Found in (Scsi) Retry queue or "
-                   "failover Q pid %ld, State = %x., fcport state=%d "
-                   "jiffies=%lx retried=%d\n",
-                   dest_ha->host_no, cmd->serial_number, sp->state,
-                   atomic_read(&fcport->state), jiffies, cmd->retries));
-
-               if ((sp->state == SRB_RETRY_STATE)) {
-                       __del_from_retry_queue(dest_ha, sp);
-               } else if ((sp->state == SRB_SCSI_RETRY_STATE)) {
-                       __del_from_scsi_retry_queue(dest_ha, sp);
-               } 
-
-               /*
-                * If FC_DEVICE is marked as dead return the cmd with
-                * DID_NO_CONNECT status.  Otherwise set the host_byte to
-                * DID_BUS_BUSY to let the OS  retry this cmd.
-                */
-               if ((atomic_read(&fcport->state) == FCS_DEVICE_DEAD) ||
-                   atomic_read(&dest_ha->loop_state) == LOOP_DEAD) {
-                       qla2x00_extend_timeout(cmd, EXTEND_CMD_TIMEOUT);
-                       cmd->result = DID_NO_CONNECT << 16;
-                       if (atomic_read(&dest_ha->loop_state) == LOOP_DOWN) 
-                               sp->err_id = SRB_ERR_LOOP;
-                       else
-                               sp->err_id = SRB_ERR_PORT;
-               } else {
-                       cmd->result = DID_BUS_BUSY << 16;
-               }
-
-               __add_to_done_queue(dest_ha, sp);
-               processed++;
-       } 
-       spin_unlock_irqrestore(&dest_ha->list_lock, flags);
-
-       if (processed) {
-               qla2x00_done(dest_ha);
-               return;
-       }
-
-       spin_lock_irqsave(&dest_ha->list_lock, cpu_flags);
-       if (sp->state == SRB_DONE_STATE) {
-               /* IO in done_q  -- leave it */
-               DEBUG(printk("scsi(%ld): Found in Done queue pid %ld sp=%p.\n",
-                   dest_ha->host_no, cmd->serial_number, sp));
-       } else if (sp->state == SRB_SUSPENDED_STATE) {
-               DEBUG(printk("scsi(%ld): Found SP %p in suspended state  "
-                   "- pid %ld:\n",
-                   dest_ha->host_no, sp, cmd->serial_number));
-               DEBUG(qla2x00_dump_buffer((uint8_t *)sp, sizeof(srb_t));)
-       } else if (sp->state == SRB_ACTIVE_STATE) {
-               /*
-                * IO is with ISP find the command in our active list.
-                */
-               spin_unlock_irqrestore(&dest_ha->list_lock, cpu_flags);
-               spin_lock_irqsave(&dest_ha->hardware_lock, flags);
-               if (sp == dest_ha->outstanding_cmds[
-                   (unsigned long)sp->cmd->host_scribble]) {
-
-                       DEBUG(printk("cmd_timeout: Found in ISP \n"));
-
-                       if (sp->flags & SRB_TAPE) {
-                               /*
-                                * We cannot allow the midlayer error handler
-                                * to wakeup and begin the abort process.
-                                * Extend the timer so that the firmware can
-                                * properly return the IOCB.
-                                */
-                               DEBUG(printk("cmd_timeout: Extending timeout "
-                                   "of FCP2 tape command!\n"));
-                               qla2x00_extend_timeout(sp->cmd,
-                                   EXTEND_CMD_TIMEOUT);
-                       }
-                       sp->state = SRB_ACTIVE_TIMEOUT_STATE;
-                       spin_unlock_irqrestore(&dest_ha->hardware_lock, flags);
-               } else {
-                       spin_unlock_irqrestore(&dest_ha->hardware_lock, flags);
-                       printk(KERN_INFO 
-                               "qla_cmd_timeout: State indicates it is with "
-                               "ISP, But not in active array\n");
-               }
-               spin_lock_irqsave(&dest_ha->list_lock, cpu_flags);
-       } else if (sp->state == SRB_ACTIVE_TIMEOUT_STATE) {
-               DEBUG(printk("qla2100%ld: Found in Active timeout state"
-                               "pid %ld, State = %x., \n",
-                               dest_ha->host_no,
-                               sp->cmd->serial_number, sp->state);)
-       } else {
-               /* EMPTY */
-               DEBUG2(printk("cmd_timeout%ld: LOST command state = "
-                               "0x%x, sp=%p\n",
-                               vis_ha->host_no, sp->state,sp);)
-
-               qla_printk(KERN_INFO, vis_ha,
-                       "cmd_timeout: LOST command state = 0x%x\n", sp->state);
-       }
-       spin_unlock_irqrestore(&dest_ha->list_lock, cpu_flags);
-
-       DEBUG3(printk("cmd_timeout: Leaving\n");)
-}
-
-/**************************************************************************
-* qla2x00_done
-*      Process completed commands.
-*
-* Input:
-*      old_ha           = adapter block pointer.
-*
-**************************************************************************/
-void
-qla2x00_done(scsi_qla_host_t *old_ha)
-{
-       os_lun_t        *lq;
-       struct scsi_cmnd *cmd;
-       unsigned long   flags = 0;
-       scsi_qla_host_t *ha;
-       scsi_qla_host_t *vis_ha;
-       int     send_marker_once = 0;
-       srb_t           *sp, *sptemp;
-       LIST_HEAD(local_sp_list);
-
-       /*
-        * Get into local queue such that we do not wind up calling done queue
-        * tasklet for the same IOs from DPC or any other place.
-        */
-       spin_lock_irqsave(&old_ha->list_lock, flags);
-       list_splice_init(&old_ha->done_queue, &local_sp_list);
-       spin_unlock_irqrestore(&old_ha->list_lock, flags);
-
-       /*
-        * All done commands are in the local queue, now do the call back.
-        */
-       list_for_each_entry_safe(sp, sptemp, &local_sp_list, list) {
-               old_ha->done_q_cnt--;
-               sp->state = SRB_NO_QUEUE_STATE;
-
-               /* remove command from local list */
-               list_del_init(&sp->list);
-
-               cmd = sp->cmd;
-               if (cmd == NULL)
-                       continue;
-
-               vis_ha = (scsi_qla_host_t *)cmd->device->host->hostdata;
-               lq = sp->lun_queue;
-               ha = sp->ha;
-
-               if (sp->flags & SRB_DMA_VALID) {
-                       sp->flags &= ~SRB_DMA_VALID;
-
-                       /* Release memory used for this I/O */
-                       if (cmd->use_sg) {
-                               pci_unmap_sg(ha->pdev, cmd->request_buffer,
-                                   cmd->use_sg, cmd->sc_data_direction);
-                       } else if (cmd->request_bufflen) {
-                               pci_unmap_page(ha->pdev, sp->dma_handle,
-                                   cmd->request_bufflen,
-                                   cmd->sc_data_direction);
-                       }
-               }
-
-
-               switch (host_byte(cmd->result)) {
-                       case DID_OK:
-                       case DID_ERROR:
-                               break;
-
-                       case DID_RESET:
-                               /*
-                                * Set marker needed, so we don't have to
-                                * send multiple markers
-                                */
-                               if (!send_marker_once) {
-                                       ha->marker_needed = 1;
-                                       send_marker_once++;
-                               }
-
-                               /*
-                                * WORKAROUND
-                                *
-                                * A backdoor device-reset requires different
-                                * error handling.  This code differentiates
-                                * between normal error handling and the
-                                * backdoor method.
-                                *
-                                */
-                               if (ha->host->eh_active != EH_ACTIVE)
-                                       cmd->result = DID_BUS_BUSY << 16;
-                               break;
-
-
-                       case DID_ABORT:
-                               sp->flags &= ~SRB_ABORT_PENDING;
-                               sp->flags |= SRB_ABORTED;
-
-                               if (sp->flags & SRB_TIMEOUT)
-                                       cmd->result = DID_TIME_OUT << 16;
-
-                               break;
-
-                       default:
-                               DEBUG2(printk("scsi(%ld:%d:%d) %s: did_error "
-                                   "= %d, comp-scsi= 0x%x-0x%x pid=%ld.\n",
-                                   vis_ha->host_no,
-                                   cmd->device->id, cmd->device->lun,
-                                   __func__,
-                                   host_byte(cmd->result),
-                                   CMD_COMPL_STATUS(cmd),
-                                   CMD_SCSI_STATUS(cmd), cmd->serial_number));
-                               break;
-               }
-
-               /*
-                * Call the mid-level driver interrupt handler -- via sp_put()
-                */
-               sp_put(ha, sp);
-       } /* end of while */
-}
-
-/*
- * qla2x00_process_response_queue_in_zio_mode
- *     Process response queue completion as fast as possible
- *     to achieve Zero Interrupt Opertions-ZIO
- *
- * Input:
- *     ha = adapter block pointer.
- *
- * Context:
- *     Kernel context.
- */
-static inline void
-qla2x00_process_response_queue_in_zio_mode(scsi_qla_host_t *ha)
-{
-       unsigned long flags;
-
-       /* Check for unprocessed commands in response queue. */
-       if (!ha->flags.process_response_queue)
-               return;
-       if (!ha->flags.online)
-               return;
-       if (ha->response_ring_ptr->signature == RESPONSE_PROCESSED)
-               return;
-       
-       spin_lock_irqsave(&ha->hardware_lock,flags);
-       qla2x00_process_response_queue(ha);
-       spin_unlock_irqrestore(&ha->hardware_lock, flags);
-}
-
-/*
- * qla2x00_next
- *     Retrieve and process next job in the LUN queue.
- *
- * Input:
- *     tq = SCSI target queue pointer.
- *     lq = SCSI LUN queue pointer.
- *     TGT_LOCK must be already obtained.
- *
- * Output:
- *     Releases TGT_LOCK upon exit.
- *
- * Context:
- *     Kernel/Interrupt context.
- * 
- * Note: This routine will always try to start I/O from visible HBA.
- */
-void
-qla2x00_next(scsi_qla_host_t *vis_ha) 
-{
-       int             rval;
-       unsigned long   flags;
-       scsi_qla_host_t *dest_ha;
-       fc_port_t       *fcport;
-       srb_t           *sp, *sptemp;
-       LIST_HEAD(local_sp_list);
-
-       dest_ha = NULL;
-
-       spin_lock_irqsave(&vis_ha->list_lock, flags);
-       list_splice_init(&vis_ha->pending_queue, &local_sp_list);
-       vis_ha->qthreads = 0;
-       spin_unlock_irqrestore(&vis_ha->list_lock, flags);
-
-       list_for_each_entry_safe(sp, sptemp, &local_sp_list, list) {
-               list_del_init(&sp->list);
-               sp->state = SRB_NO_QUEUE_STATE;
-
-               fcport = sp->fclun->fcport;
-               dest_ha = fcport->ha;
-
-               /* If device is dead then send request back to OS */
-               if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD) {
-                       sp->cmd->result = DID_NO_CONNECT << 16;
-                       if (atomic_read(&dest_ha->loop_state) == LOOP_DOWN) 
-                               sp->err_id = SRB_ERR_LOOP;
-                       else
-                               sp->err_id = SRB_ERR_PORT;
-
-                       DEBUG3(printk("scsi(%ld): loop/port is down - pid=%ld, "
-                           "sp=%p err_id=%d loopid=0x%x queued to dest HBA "
-                           "scsi%ld.\n", dest_ha->host_no,
-                           sp->cmd->serial_number, sp, sp->err_id,
-                           fcport->loop_id, dest_ha->host_no));
-                       /* 
-                        * Initiate a failover - done routine will initiate.
-                        */
-                       add_to_done_queue(vis_ha, sp);
-
-                       continue;
-               }
-
-               /*
-                * SCSI Kluge: Whenever, we need to wait for an event such as
-                * loop down (i.e. loop_down_timer ) or port down (i.e.  LUN
-                * request qeueue is suspended) then we will recycle new
-                * commands back to the SCSI layer.  We do this because this is
-                * normally a temporary condition and we don't want the
-                * mid-level scsi.c driver to get upset and start aborting
-                * commands.  The timeout value is extracted from the command
-                * minus 1-second and put on a retry queue (watchdog). Once the
-                * command timeout it is returned to the mid-level with a BUSY
-                * status, so the mid-level will retry it. This process
-                * continues until the LOOP DOWN time expires or the condition
-                * goes away.
-                */
-               if (!(sp->flags & (SRB_IOCTL | SRB_TAPE)) &&
-                   (atomic_read(&fcport->state) != FCS_ONLINE ||
-                       test_bit(ABORT_ISP_ACTIVE, &dest_ha->dpc_flags) ||
-                       atomic_read(&dest_ha->loop_state) != LOOP_READY)) {
-
-                       DEBUG3(printk("scsi(%ld): pid=%ld port=0x%x state=%d "
-                           "loop state=%d, loop counter=0x%x "
-                           "dpc_flags=0x%lx\n", sp->cmd->serial_number,
-                           dest_ha->host_no, fcport->loop_id,
-                           atomic_read(&fcport->state),
-                           atomic_read(&dest_ha->loop_state),
-                           atomic_read(&dest_ha->loop_down_timer),
-                           dest_ha->dpc_flags));
-
-                       qla2x00_extend_timeout(sp->cmd, EXTEND_CMD_TIMEOUT);
-                       add_to_retry_queue(vis_ha, sp);
-
-                       continue;
-               } 
-
-               /*
-                * If this request's lun is suspended then put the request on
-                * the  scsi_retry queue. 
-                */
-               if (!(sp->flags & (SRB_IOCTL | SRB_TAPE)) &&
-                   sp->lun_queue->q_state == LUN_STATE_WAIT) {
-                       DEBUG3(printk("scsi(%ld): lun wait state - pid=%ld, "
-                           "opcode=%d, allowed=%d, retries=%d\n",
-                           dest_ha->host_no,
-                           sp->cmd->serial_number,
-                           sp->cmd->cmnd[0],
-                           sp->cmd->allowed,
-                           sp->cmd->retries));
-                               
-                       add_to_scsi_retry_queue(vis_ha, sp);
-
-                       continue;
-               }
-
-               sp->lun_queue->io_cnt++;
-
-               rval = qla2x00_start_scsi(sp);
-               if (rval != QLA_SUCCESS) {
-                       /* Place request back on top of device queue */
-                       /* add to the top of queue */
-                       add_to_pending_queue_head(vis_ha, sp);
-
-                       sp->lun_queue->io_cnt--;
-               }
-       }
-
-       if (!IS_QLA2100(vis_ha) && !IS_QLA2200(vis_ha)) {
-               /* Process response_queue if ZIO support is enabled*/ 
-               qla2x00_process_response_queue_in_zio_mode(vis_ha);
-
-       }
-}
-
 /* XXX(hch): crude hack to emulate a down_timeout() */
 int
 qla2x00_down_timeout(struct semaphore *sema, unsigned long timeout)
@@ -4337,67 +2329,6 @@ qla2x00_down_timeout(struct semaphore *sema, unsigned long timeout)
        return -ETIMEDOUT;
 }
 
-static void
-qla2xxx_get_port_id(struct scsi_target *starget)
-{
-       struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
-       scsi_qla_host_t *ha = to_qla_host(shost);
-       struct fc_port *fc;
-
-       list_for_each_entry(fc, &ha->fcports, list) {
-               if (fc->os_target_id == starget->id) {
-                       fc_starget_port_id(starget) = fc->d_id.b.domain << 16 |
-                               fc->d_id.b.area << 8 | 
-                               fc->d_id.b.al_pa;
-                       return;
-               }
-       }
-       fc_starget_port_id(starget) = -1;
-}
-
-static void
-qla2xxx_get_port_name(struct scsi_target *starget)
-{
-       struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
-       scsi_qla_host_t *ha = to_qla_host(shost);
-       struct fc_port *fc;
-
-       list_for_each_entry(fc, &ha->fcports, list) {
-               if (fc->os_target_id == starget->id) {
-                       fc_starget_port_name(starget) =
-                               __be64_to_cpu(*(uint64_t *)fc->port_name);
-                       return;
-               }
-       }
-       fc_starget_port_name(starget) = -1;
-}
-
-static void
-qla2xxx_get_node_name(struct scsi_target *starget)
-{
-       struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
-       scsi_qla_host_t *ha = to_qla_host(shost);
-       struct fc_port *fc;
-
-       list_for_each_entry(fc, &ha->fcports, list) {
-               if (fc->os_target_id == starget->id) {
-                       fc_starget_node_name(starget) =
-                               __be64_to_cpu(*(uint64_t *)fc->node_name);
-                       return;
-               }
-       }
-       fc_starget_node_name(starget) = -1;
-}
-
-static struct fc_function_template qla2xxx_transport_functions = {
-       .get_starget_port_id = qla2xxx_get_port_id,
-       .show_starget_port_id = 1,
-       .get_starget_port_name = qla2xxx_get_port_name,
-       .show_starget_port_name = 1,
-       .get_starget_node_name = qla2xxx_get_node_name,
-       .show_starget_node_name = 1,
-};
-
 /**
  * qla2x00_module_init - Module initialization.
  **/
@@ -4419,8 +2350,7 @@ qla2x00_module_init(void)
 #if DEBUG_QLA2100
        strcat(qla2x00_version_str, "-debug");
 #endif
-
-       qla2xxx_transport_template = fc_attach_transport(&qla2xxx_transport_functions);
+       qla2xxx_transport_template = qla2x00_alloc_transport_tmpl();
        if (!qla2xxx_transport_template)
                return -ENODEV;
 
index 73ff88b834b74b0f460a5bf75200cbc5114f4cfd..98e68867261a9430937101f371bad07c727b0d9b 100644 (file)
@@ -19,9 +19,9 @@
 /*
  * Driver version 
  */
-#define QLA2XXX_VERSION      "8.00.02b4-k"
+#define QLA2XXX_VERSION      "8.00.02b5-k"
 
 #define QLA_DRIVER_MAJOR_VER   8
 #define QLA_DRIVER_MINOR_VER   0
 #define QLA_DRIVER_PATCH_VER   2
-#define QLA_DRIVER_BETA_VER    4
+#define QLA_DRIVER_BETA_VER    5
index 24c1174b0c2f33574cf65a7afaa4ff0d754408de..ddf0f4277ee8cd087b24b007afb07adcb1b085c3 100644 (file)
@@ -1261,7 +1261,7 @@ static int isp2x00_queuecommand(Scsi_Cmnd * Cmnd, void (*done) (Scsi_Cmnd *))
 
        if (Cmnd->use_sg) {
                sg = (struct scatterlist *) Cmnd->request_buffer;
-               sg_count = pci_map_sg(hostdata->pci_dev, sg, Cmnd->use_sg, scsi_to_pci_dma_dir(Cmnd->sc_data_direction));
+               sg_count = pci_map_sg(hostdata->pci_dev, sg, Cmnd->use_sg, Cmnd->sc_data_direction);
                cmd->segment_cnt = cpu_to_le16(sg_count);
                ds = cmd->dataseg;
                /* fill in first two sg entries: */
@@ -1307,7 +1307,7 @@ static int isp2x00_queuecommand(Scsi_Cmnd * Cmnd, void (*done) (Scsi_Cmnd *))
                dma_addr_t busaddr = pci_map_page(hostdata->pci_dev,
                                                  page, offset,
                                                  Cmnd->request_bufflen,
-                                                 scsi_to_pci_dma_dir(Cmnd->sc_data_direction));
+                                                 Cmnd->sc_data_direction);
                Cmnd->SCp.dma_handle = busaddr;
 
                cmd->dataseg[0].d_base = cpu_to_le32(pci64_dma_lo32(busaddr));
@@ -1320,7 +1320,7 @@ static int isp2x00_queuecommand(Scsi_Cmnd * Cmnd, void (*done) (Scsi_Cmnd *))
                cmd->segment_cnt = cpu_to_le16(1); /* Shouldn't this be 0? */
        }
 
-       if (Cmnd->sc_data_direction == SCSI_DATA_WRITE)
+       if (Cmnd->sc_data_direction == DMA_TO_DEVICE)
                cmd->control_flags = cpu_to_le16(CFLAG_WRITE);
        else 
                cmd->control_flags = cpu_to_le16(CFLAG_READ);
@@ -1405,13 +1405,13 @@ static void redo_port_db(unsigned long arg)
                                                 pci_unmap_sg(hostdata->pci_dev,
                                                              (struct scatterlist *)Cmnd->buffer,
                                                              Cmnd->use_sg,
-                                                             scsi_to_pci_dma_dir(Cmnd->sc_data_direction));
+                                                             Cmnd->sc_data_direction);
                                         else if (Cmnd->request_bufflen &&
                                                  Cmnd->sc_data_direction != PCI_DMA_NONE) {
                                                 pci_unmap_page(hostdata->pci_dev,
                                                                Cmnd->SCp.dma_handle,
                                                                Cmnd->request_bufflen,
-                                                               scsi_to_pci_dma_dir(Cmnd->sc_data_direction));
+                                                               Cmnd->sc_data_direction);
                                         }
 
                                         hostdata->handle_ptrs[i]->result = DID_SOFT_ERROR << 16;
@@ -1515,13 +1515,13 @@ void isp2x00_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
                                        pci_unmap_sg(hostdata->pci_dev,
                                                     (struct scatterlist *)Cmnd->buffer,
                                                     Cmnd->use_sg,
-                                                    scsi_to_pci_dma_dir(Cmnd->sc_data_direction));
+                                                    Cmnd->sc_data_direction);
                                else if (Cmnd->request_bufflen &&
                                         Cmnd->sc_data_direction != PCI_DMA_NONE)
                                        pci_unmap_page(hostdata->pci_dev,
                                                       Cmnd->SCp.dma_handle,
                                                       Cmnd->request_bufflen,
-                                                      scsi_to_pci_dma_dir(Cmnd->sc_data_direction));
+                                                      Cmnd->sc_data_direction);
                                Cmnd->result = 0x0;
                                (*Cmnd->scsi_done) (Cmnd);
                        } else
@@ -1569,12 +1569,12 @@ void isp2x00_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
                                if (Cmnd->use_sg)
                                        pci_unmap_sg(hostdata->pci_dev,
                                                     (struct scatterlist *)Cmnd->buffer, Cmnd->use_sg,
-                                                    scsi_to_pci_dma_dir(Cmnd->sc_data_direction));
+                                                    Cmnd->sc_data_direction);
                                else if (Cmnd->request_bufflen && Cmnd->sc_data_direction != PCI_DMA_NONE)
                                        pci_unmap_page(hostdata->pci_dev,
                                                       Cmnd->SCp.dma_handle,
                                                       Cmnd->request_bufflen,
-                                                      scsi_to_pci_dma_dir(Cmnd->sc_data_direction));
+                                                      Cmnd->sc_data_direction);
 
                                /* 
                                 * if any of the following are true we do not
index 71d597a9b0b008ab1ea82d6a9cf67e27b4a02861..6d29e1b864e2361ec067280be8d586bf04e17d38 100644 (file)
@@ -877,7 +877,7 @@ static int isp1020_queuecommand(Scsi_Cmnd *Cmnd, void (*done)(Scsi_Cmnd *))
                ds = cmd->dataseg;
 
                sg_count = pci_map_sg(hostdata->pci_dev, sg, Cmnd->use_sg,
-                                     scsi_to_pci_dma_dir(Cmnd->sc_data_direction));
+                                     Cmnd->sc_data_direction);
 
                cmd->segment_cnt = cpu_to_le16(sg_count);
 
@@ -934,7 +934,7 @@ static int isp1020_queuecommand(Scsi_Cmnd *Cmnd, void (*done)(Scsi_Cmnd *))
                dma_addr = pci_map_single(hostdata->pci_dev,
                                       Cmnd->request_buffer,
                                       Cmnd->request_bufflen,
-                                      scsi_to_pci_dma_dir(Cmnd->sc_data_direction));
+                                      Cmnd->sc_data_direction);
                Cmnd->SCp.ptr = (char *)(unsigned long) dma_addr;
 
                cmd->dataseg[0].d_base =
@@ -1067,7 +1067,7 @@ void isp1020_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
                        pci_unmap_sg(hostdata->pci_dev,
                                     (struct scatterlist *)Cmnd->buffer,
                                     Cmnd->use_sg,
-                                    scsi_to_pci_dma_dir(Cmnd->sc_data_direction));
+                                    Cmnd->sc_data_direction);
                else if (Cmnd->request_bufflen)
                        pci_unmap_single(hostdata->pci_dev,
 #ifdef CONFIG_QL_ISP_A64
@@ -1076,7 +1076,7 @@ void isp1020_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
                                         (u32)((long)Cmnd->SCp.ptr),
 #endif
                                         Cmnd->request_bufflen,
-                                        scsi_to_pci_dma_dir(Cmnd->sc_data_direction));
+                                        Cmnd->sc_data_direction);
 
                isp_outw(out_ptr, host, MBOX5);
                (*Cmnd->scsi_done)(Cmnd);
index e2360c26ef01f3eed5027bf730b1376442f96473..5ee5d80a9931376920033f8338989491694552be 100644 (file)
@@ -45,21 +45,6 @@ struct scsi_device;
 struct scsi_target;
 struct scatterlist;
 
-/*
- * Legacy dma direction interfaces.
- *
- * This assumes the pci/sbus dma mapping flags have the same numercial
- * values as the generic dma-mapping ones.  Currently they have but there's
- * no way to check.  Better don't use these interfaces!
- */
-#define SCSI_DATA_UNKNOWN      (DMA_BIDIRECTIONAL)
-#define SCSI_DATA_WRITE                (DMA_TO_DEVICE)
-#define SCSI_DATA_READ         (DMA_FROM_DEVICE)
-#define SCSI_DATA_NONE         (DMA_NONE)
-
-#define scsi_to_pci_dma_dir(scsi_dir)  ((int)(scsi_dir))
-#define scsi_to_sbus_dma_dir(scsi_dir) ((int)(scsi_dir))
-
 /* obsolete typedef junk. */
 #include "scsi_typedefs.h"
 
index 203a0812508a1962c2c9d58020d3a6a9bc61aaf5..1a135f38e78d8965d9a9fab95ca58614e7db61a3 100644 (file)
@@ -476,7 +476,8 @@ static void scsi_eh_done(struct scsi_cmnd *scmd)
  **/
 static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout)
 {
-       struct Scsi_Host *host = scmd->device->host;
+       struct scsi_device *sdev = scmd->device;
+       struct Scsi_Host *shost = sdev->host;
        DECLARE_MUTEX_LOCKED(sem);
        unsigned long flags;
        int rtn = SUCCESS;
@@ -487,27 +488,27 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout)
         */
        scmd->owner = SCSI_OWNER_LOWLEVEL;
 
-       if (scmd->device->scsi_level <= SCSI_2)
+       if (sdev->scsi_level <= SCSI_2)
                scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) |
-                       (scmd->device->lun << 5 & 0xe0);
+                       (sdev->lun << 5 & 0xe0);
 
        scsi_add_timer(scmd, timeout, scsi_eh_times_out);
 
        /*
         * set up the semaphore so we wait for the command to complete.
         */
-       scmd->device->host->eh_action = &sem;
+       shost->eh_action = &sem;
        scmd->request->rq_status = RQ_SCSI_BUSY;
 
-       spin_lock_irqsave(scmd->device->host->host_lock, flags);
+       spin_lock_irqsave(shost->host_lock, flags);
        scsi_log_send(scmd);
-       host->hostt->queuecommand(scmd, scsi_eh_done);
-       spin_unlock_irqrestore(scmd->device->host->host_lock, flags);
+       shost->hostt->queuecommand(scmd, scsi_eh_done);
+       spin_unlock_irqrestore(shost->host_lock, flags);
 
        down(&sem);
        scsi_log_completion(scmd, SUCCESS);
 
-       scmd->device->host->eh_action = NULL;
+       shost->eh_action = NULL;
 
        /*
         * see if timeout.  if so, tell the host to forget about it.
@@ -527,10 +528,10 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout)
                 * abort a timed out command or not.  not sure how
                 * we should treat them differently anyways.
                 */
-               spin_lock_irqsave(scmd->device->host->host_lock, flags);
-               if (scmd->device->host->hostt->eh_abort_handler)
-                       scmd->device->host->hostt->eh_abort_handler(scmd);
-               spin_unlock_irqrestore(scmd->device->host->host_lock, flags);
+               spin_lock_irqsave(shost->host_lock, flags);
+               if (shost->hostt->eh_abort_handler)
+                       shost->hostt->eh_abort_handler(scmd);
+               spin_unlock_irqrestore(shost->host_lock, flags);
                        
                scmd->request->rq_status = RQ_SCSI_DONE;
                scmd->owner = SCSI_OWNER_ERROR_HANDLER;
index 619d3fb7a2f056bdfdde5f103d618a2b566fb949..d18da21c9c57e0ef590369ea815b7b1adba4042b 100644 (file)
@@ -358,9 +358,9 @@ void scsi_device_unbusy(struct scsi_device *sdev)
                     shost->host_failed))
                scsi_eh_wakeup(shost);
        spin_unlock(shost->host_lock);
-       spin_lock(&sdev->sdev_lock);
+       spin_lock(sdev->request_queue->queue_lock);
        sdev->device_busy--;
-       spin_unlock_irqrestore(&sdev->sdev_lock, flags);
+       spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
 }
 
 /*
@@ -1423,7 +1423,7 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
        struct Scsi_Host *shost = sdev->host;
        struct request_queue *q;
 
-       q = blk_init_queue(scsi_request_fn, &sdev->sdev_lock);
+       q = blk_init_queue(scsi_request_fn, NULL);
        if (!q)
                return NULL;
 
index a8a37a338c02f70ea4e04c9cbcab29559c0d3492..287d197a7c17f8527668ee162361b9552f04d948 100644 (file)
@@ -249,7 +249,6 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
         */
        sdev->borken = 1;
 
-       spin_lock_init(&sdev->sdev_lock);
        sdev->request_queue = scsi_alloc_queue(sdev);
        if (!sdev->request_queue) {
                /* release fn is set up in scsi_sysfs_device_initialise, so
index 134d3a3e4222a4154ad25b938e78b37276219bb4..e75ee4671ee3a0a6dcb608a73d6bd2f4007eff09 100644 (file)
@@ -171,6 +171,9 @@ void scsi_device_dev_release(struct device *dev)
        if (sdev->request_queue) {
                sdev->request_queue->queuedata = NULL;
                scsi_free_queue(sdev->request_queue);
+               /* temporary expedient, try to catch use of queue lock
+                * after free of sdev */
+               sdev->request_queue = NULL;
        }
 
        scsi_target_reap(scsi_target(sdev));
index cf6b1f0fb124d1af9b80f2815f93d2c34c5dab6a..7936aafc3d05644bad589d0776b4ac9e6fcda00b 100644 (file)
@@ -18,8 +18,8 @@
  *
  */
 
-static int sg_version_num = 30532;     /* 2 digits for each component */
-#define SG_VERSION_STR "3.5.32"
+static int sg_version_num = 30533;     /* 2 digits for each component */
+#define SG_VERSION_STR "3.5.33"
 
 /*
  *  D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes:
@@ -61,7 +61,7 @@ static int sg_version_num = 30532;    /* 2 digits for each component */
 
 #ifdef CONFIG_SCSI_PROC_FS
 #include <linux/proc_fs.h>
-static char *sg_version_date = "20050117";
+static char *sg_version_date = "20050328";
 
 static int sg_proc_init(void);
 static void sg_proc_cleanup(void);
@@ -331,14 +331,13 @@ sg_release(struct inode *inode, struct file *filp)
 static ssize_t
 sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
 {
-       int res;
        Sg_device *sdp;
        Sg_fd *sfp;
        Sg_request *srp;
        int req_pack_id = -1;
-       struct sg_header old_hdr;
-       sg_io_hdr_t new_hdr;
        sg_io_hdr_t *hp;
+       struct sg_header *old_hdr = NULL;
+       int retval = 0;
 
        if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
                return -ENXIO;
@@ -347,98 +346,138 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
        if (!access_ok(VERIFY_WRITE, buf, count))
                return -EFAULT;
        if (sfp->force_packid && (count >= SZ_SG_HEADER)) {
-               if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER))
-                       return -EFAULT;
-               if (old_hdr.reply_len < 0) {
+               old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
+               if (!old_hdr)
+                       return -ENOMEM;
+               if (__copy_from_user(old_hdr, buf, SZ_SG_HEADER)) {
+                       retval = -EFAULT;
+                       goto free_old_hdr;
+               }
+               if (old_hdr->reply_len < 0) {
                        if (count >= SZ_SG_IO_HDR) {
-                               if (__copy_from_user
-                                   (&new_hdr, buf, SZ_SG_IO_HDR))
-                                       return -EFAULT;
-                               req_pack_id = new_hdr.pack_id;
+                               sg_io_hdr_t *new_hdr;
+                               new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL);
+                               if (!new_hdr) {
+                                       retval = -ENOMEM;
+                                       goto free_old_hdr;
+                               }
+                               retval =__copy_from_user
+                                   (new_hdr, buf, SZ_SG_IO_HDR);
+                               req_pack_id = new_hdr->pack_id;
+                               kfree(new_hdr);
+                               if (retval) {
+                                       retval = -EFAULT;
+                                       goto free_old_hdr;
+                               }
                        }
                } else
-                       req_pack_id = old_hdr.pack_id;
+                       req_pack_id = old_hdr->pack_id;
        }
        srp = sg_get_rq_mark(sfp, req_pack_id);
        if (!srp) {             /* now wait on packet to arrive */
-               if (sdp->detached)
-                       return -ENODEV;
-               if (filp->f_flags & O_NONBLOCK)
-                       return -EAGAIN;
+               if (sdp->detached) {
+                       retval = -ENODEV;
+                       goto free_old_hdr;
+               }
+               if (filp->f_flags & O_NONBLOCK) {
+                       retval = -EAGAIN;
+                       goto free_old_hdr;
+               }
                while (1) {
-                       res = 0;        /* following is a macro that beats race condition */
+                       retval = 0; /* following macro beats race condition */
                        __wait_event_interruptible(sfp->read_wait,
-                               (sdp->detached || (srp = sg_get_rq_mark(sfp, req_pack_id))), 
-                                                  res);
-                       if (sdp->detached)
-                               return -ENODEV;
-                       if (0 == res)
+                               (sdp->detached ||
+                               (srp = sg_get_rq_mark(sfp, req_pack_id))), 
+                               retval);
+                       if (sdp->detached) {
+                               retval = -ENODEV;
+                               goto free_old_hdr;
+                       }
+                       if (0 == retval)
                                break;
-                       return res;     /* -ERESTARTSYS because signal hit process */
+
+                       /* -ERESTARTSYS as signal hit process */
+                       goto free_old_hdr;
                }
        }
-       if (srp->header.interface_id != '\0')
-               return sg_new_read(sfp, buf, count, srp);
+       if (srp->header.interface_id != '\0') {
+               retval = sg_new_read(sfp, buf, count, srp);
+               goto free_old_hdr;
+       }
 
        hp = &srp->header;
-       memset(&old_hdr, 0, SZ_SG_HEADER);
-       old_hdr.reply_len = (int) hp->timeout;
-       old_hdr.pack_len = old_hdr.reply_len; /* very old, strange behaviour */
-       old_hdr.pack_id = hp->pack_id;
-       old_hdr.twelve_byte =
+       if (old_hdr == NULL) {
+               old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
+               if (! old_hdr) {
+                       retval = -ENOMEM;
+                       goto free_old_hdr;
+               }
+       }
+       memset(old_hdr, 0, SZ_SG_HEADER);
+       old_hdr->reply_len = (int) hp->timeout;
+       old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */
+       old_hdr->pack_id = hp->pack_id;
+       old_hdr->twelve_byte =
            ((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0;
-       old_hdr.target_status = hp->masked_status;
-       old_hdr.host_status = hp->host_status;
-       old_hdr.driver_status = hp->driver_status;
+       old_hdr->target_status = hp->masked_status;
+       old_hdr->host_status = hp->host_status;
+       old_hdr->driver_status = hp->driver_status;
        if ((CHECK_CONDITION & hp->masked_status) ||
            (DRIVER_SENSE & hp->driver_status))
-               memcpy(old_hdr.sense_buffer, srp->sense_b,
-                      sizeof (old_hdr.sense_buffer));
+               memcpy(old_hdr->sense_buffer, srp->sense_b,
+                      sizeof (old_hdr->sense_buffer));
        switch (hp->host_status) {
        /* This setup of 'result' is for backward compatibility and is best
           ignored by the user who should use target, host + driver status */
        case DID_OK:
        case DID_PASSTHROUGH:
        case DID_SOFT_ERROR:
-               old_hdr.result = 0;
+               old_hdr->result = 0;
                break;
        case DID_NO_CONNECT:
        case DID_BUS_BUSY:
        case DID_TIME_OUT:
-               old_hdr.result = EBUSY;
+               old_hdr->result = EBUSY;
                break;
        case DID_BAD_TARGET:
        case DID_ABORT:
        case DID_PARITY:
        case DID_RESET:
        case DID_BAD_INTR:
-               old_hdr.result = EIO;
+               old_hdr->result = EIO;
                break;
        case DID_ERROR:
-               old_hdr.result = (srp->sense_b[0] == 0 && 
+               old_hdr->result = (srp->sense_b[0] == 0 && 
                                  hp->masked_status == GOOD) ? 0 : EIO;
                break;
        default:
-               old_hdr.result = EIO;
+               old_hdr->result = EIO;
                break;
        }
 
        /* Now copy the result back to the user buffer.  */
        if (count >= SZ_SG_HEADER) {
-               if (__copy_to_user(buf, &old_hdr, SZ_SG_HEADER))
-                       return -EFAULT;
+               if (__copy_to_user(buf, old_hdr, SZ_SG_HEADER)) {
+                       retval = -EFAULT;
+                       goto free_old_hdr;
+               }
                buf += SZ_SG_HEADER;
-               if (count > old_hdr.reply_len)
-                       count = old_hdr.reply_len;
+               if (count > old_hdr->reply_len)
+                       count = old_hdr->reply_len;
                if (count > SZ_SG_HEADER) {
-                       if ((res =
-                            sg_read_oxfer(srp, buf, count - SZ_SG_HEADER)))
-                               return -EFAULT;
+                       if (sg_read_oxfer(srp, buf, count - SZ_SG_HEADER)) {
+                               retval = -EFAULT;
+                               goto free_old_hdr;
+                       }
                }
        } else
-               count = (old_hdr.result == 0) ? 0 : -EIO;
+               count = (old_hdr->result == 0) ? 0 : -EIO;
        sg_finish_rem_req(srp);
-       return count;
+       retval = count;
+free_old_hdr:
+       if (old_hdr)
+               kfree(old_hdr);
+       return retval;
 }
 
 static ssize_t
@@ -708,16 +747,16 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
        switch (hp->dxfer_direction) {
        case SG_DXFER_TO_FROM_DEV:
        case SG_DXFER_FROM_DEV:
-               SRpnt->sr_data_direction = SCSI_DATA_READ;
+               SRpnt->sr_data_direction = DMA_FROM_DEVICE;
                break;
        case SG_DXFER_TO_DEV:
-               SRpnt->sr_data_direction = SCSI_DATA_WRITE;
+               SRpnt->sr_data_direction = DMA_TO_DEVICE;
                break;
        case SG_DXFER_UNKNOWN:
-               SRpnt->sr_data_direction = SCSI_DATA_UNKNOWN;
+               SRpnt->sr_data_direction = DMA_BIDIRECTIONAL;
                break;
        default:
-               SRpnt->sr_data_direction = SCSI_DATA_NONE;
+               SRpnt->sr_data_direction = DMA_NONE;
                break;
        }
        SRpnt->upper_private_data = srp;
@@ -725,7 +764,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
        srp->data.sglist_len = 0;
        srp->data.bufflen = 0;
        srp->data.buffer = NULL;
-       hp->duration = jiffies; /* unit jiffies now, millisecs after done */
+       hp->duration = jiffies_to_msecs(jiffies);
 /* Now send everything of to mid-level. The next time we hear about this
    packet is when sg_cmd_done() is called (i.e. a callback). */
        scsi_do_req(SRpnt, (void *) cmnd,
@@ -938,8 +977,13 @@ sg_ioctl(struct inode *inode, struct file *filp,
                if (!access_ok(VERIFY_WRITE, p, SZ_SG_REQ_INFO * SG_MAX_QUEUE))
                        return -EFAULT;
                else {
-                       sg_req_info_t rinfo[SG_MAX_QUEUE];
-                       Sg_request *srp;
+                       sg_req_info_t *rinfo;
+                       unsigned int ms;
+
+                       rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
+                                                               GFP_KERNEL);
+                       if (!rinfo)
+                               return -ENOMEM;
                        read_lock_irqsave(&sfp->rq_list_lock, iflags);
                        for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE;
                             ++val, srp = srp ? srp->nextrp : srp) {
@@ -950,19 +994,30 @@ sg_ioctl(struct inode *inode, struct file *filp,
                                            srp->header.masked_status & 
                                            srp->header.host_status & 
                                            srp->header.driver_status;
-                                       rinfo[val].duration =
-                                           srp->done ? srp->header.duration :
-                                           jiffies_to_msecs(
-                                               jiffies - srp->header.duration);
+                                       if (srp->done)
+                                               rinfo[val].duration =
+                                                       srp->header.duration;
+                                       else {
+                                               ms = jiffies_to_msecs(jiffies);
+                                               rinfo[val].duration =
+                                                   (ms > srp->header.duration) ?
+                                                   (ms - srp->header.duration) : 0;
+                                       }
                                        rinfo[val].orphan = srp->orphan;
-                                       rinfo[val].sg_io_owned = srp->sg_io_owned;
-                                       rinfo[val].pack_id = srp->header.pack_id;
-                                       rinfo[val].usr_ptr = srp->header.usr_ptr;
+                                       rinfo[val].sg_io_owned =
+                                                       srp->sg_io_owned;
+                                       rinfo[val].pack_id =
+                                                       srp->header.pack_id;
+                                       rinfo[val].usr_ptr =
+                                                       srp->header.usr_ptr;
                                }
                        }
                        read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
-                       return (__copy_to_user(p, rinfo,
-                               SZ_SG_REQ_INFO * SG_MAX_QUEUE) ? -EFAULT : 0);
+                       result = __copy_to_user(p, rinfo, 
+                                               SZ_SG_REQ_INFO * SG_MAX_QUEUE);
+                       result = result ? -EFAULT : 0;
+                       kfree(rinfo);
+                       return result;
                }
        case SG_EMULATED_HOST:
                if (sdp->detached)
@@ -1209,11 +1264,12 @@ static int
 sg_mmap(struct file *filp, struct vm_area_struct *vma)
 {
        Sg_fd *sfp;
-       unsigned long req_sz = vma->vm_end - vma->vm_start;
+       unsigned long req_sz;
        Sg_scatter_hold *rsv_schp;
 
        if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
                return -ENXIO;
+       req_sz = vma->vm_end - vma->vm_start;
        SCSI_LOG_TIMEOUT(3, printk("sg_mmap starting, vm_start=%p, len=%d\n",
                                   (void *) vma->vm_start, (int) req_sz));
        if (vma->vm_pgoff)
@@ -1260,6 +1316,7 @@ sg_cmd_done(Scsi_Cmnd * SCpnt)
        Sg_fd *sfp;
        Sg_request *srp = NULL;
        unsigned long iflags;
+       unsigned int ms;
 
        if (SCpnt && (SRpnt = SCpnt->sc_request))
                srp = (Sg_request *) SRpnt->upper_private_data;
@@ -1296,9 +1353,9 @@ sg_cmd_done(Scsi_Cmnd * SCpnt)
        SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",
                sdp->disk->disk_name, srp->header.pack_id, (int) SRpnt->sr_result));
        srp->header.resid = SCpnt->resid;
-       /* N.B. unit of duration changes here from jiffies to millisecs */
-       srp->header.duration =
-           jiffies_to_msecs(jiffies - srp->header.duration);
+       ms = jiffies_to_msecs(jiffies);
+       srp->header.duration = (ms > srp->header.duration) ?
+                               (ms - srp->header.duration) : 0;
        if (0 != SRpnt->sr_result) {
                struct scsi_sense_hdr sshdr;
 
@@ -2396,7 +2453,7 @@ sg_add_request(Sg_fd * sfp)
        }
        if (resp) {
                resp->nextrp = NULL;
-               resp->header.duration = jiffies;
+               resp->header.duration = jiffies_to_msecs(jiffies);
                resp->my_cmdp = NULL;
        }
        write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
@@ -2991,6 +3048,7 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
        Sg_fd *fp;
        const sg_io_hdr_t *hp;
        const char * cp;
+       unsigned int ms;
 
        for (k = 0; (fp = sg_get_nth_sfp(sdp, k)); ++k) {
                seq_printf(s, "   FD(%d): timeout=%dms bufflen=%d "
@@ -3029,10 +3087,13 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
                                   srp->header.pack_id, blen);
                        if (srp->done)
                                seq_printf(s, " dur=%d", hp->duration);
-                       else
+                       else {
+                               ms = jiffies_to_msecs(jiffies);
                                seq_printf(s, " t_o/elap=%d/%d",
-                                 new_interface ? hp->timeout : jiffies_to_msecs(fp->timeout),
-                                 jiffies_to_msecs(hp->duration ? (jiffies - hp->duration) : 0));
+                                       (new_interface ? hp->timeout :
+                                                 jiffies_to_msecs(fp->timeout)),
+                                       (ms > hp->duration ? ms - hp->duration : 0));
+                       }
                        seq_printf(s, "ms sgat=%d op=0x%02x\n", usg,
                                   (int) srp->data.cmd_opcode);
                }
index 63bf2aecbc57cbdb6c079efa69a99685c4bd42e8..9171788348c485211ac70ea5f8f042ca38a2fe7e 100644 (file)
@@ -120,11 +120,10 @@ sim710_probe_common(struct device *dev, unsigned long base_addr,
        }
 
        /* Fill in the three required pieces of hostdata */
-       hostdata->base = base_addr;
+       hostdata->base = ioport_map(base_addr, 64);
        hostdata->differential = differential;
        hostdata->clock = clock;
        hostdata->chip710 = 1;
-       NCR_700_set_io_mapped(hostdata);
 
        /* and register the chip */
        if((host = NCR_700_detect(&sim710_driver_template, hostdata, dev))
@@ -133,6 +132,7 @@ sim710_probe_common(struct device *dev, unsigned long base_addr,
                goto out_release;
        }
        host->this_id = scsi_id;
+       host->base = base_addr;
        host->irq = irq;
        if (request_irq(irq, NCR_700_intr, SA_SHIRQ, "sim710", host)) {
                printk(KERN_ERR "sim710: request_irq failed\n");
@@ -164,6 +164,7 @@ sim710_device_remove(struct device *dev)
        NCR_700_release(host);
        kfree(hostdata);
        free_irq(host->irq, host);
+       release_region(host->base, 64);
        return 0;
 }
 
index 70ac2860a605342199b4d68c2215154eb2921645..ef1afc178c0a2df9569daf3312e33911a0e89bf2 100644 (file)
@@ -355,8 +355,11 @@ struct request_queue
        unsigned long           queue_flags;
 
        /*
-        * protects queue structures from reentrancy
+        * protects queue structures from reentrancy. ->__queue_lock should
+        * _never_ be used directly, it is queue private. always use
+        * ->queue_lock.
         */
+       spinlock_t              __queue_lock;
        spinlock_t              *queue_lock;
 
        /*
index 07d974051b0c10b1381ea4b0a675076fe5210e71..c018020d9160891b511e03bdff1db93796bf9928 100644 (file)
@@ -43,8 +43,9 @@ struct scsi_device {
        struct list_head    siblings;   /* list of all devices on this host */
        struct list_head    same_target_siblings; /* just the devices sharing same target id */
 
-       volatile unsigned short device_busy;    /* commands actually active on low-level */
-       spinlock_t sdev_lock;           /* also the request queue_lock */
+       /* this is now protected by the request_queue->queue_lock */
+       unsigned int device_busy;       /* commands actually active on
+                                        * low-level. protected by queue_lock. */
        spinlock_t list_lock;
        struct list_head cmd_list;      /* queue of in use SCSI Command structures */
        struct list_head starved_entry;
index 27f2c4e8943ac35941cdbe36c9bd929ced644b79..1cee1e100943dafe00c14d5329b73659062a4d1a 100644 (file)
@@ -448,8 +448,14 @@ struct Scsi_Host {
        wait_queue_head_t       host_wait;
        struct scsi_host_template *hostt;
        struct scsi_transport_template *transportt;
-       volatile unsigned short host_busy;   /* commands actually active on low-level */
-       volatile unsigned short host_failed; /* commands that failed. */
+
+       /*
+        * The following two fields are protected with host_lock;
+        * however, eh routines can safely access during eh processing
+        * without acquiring the lock.
+        */
+       unsigned int host_busy;            /* commands actually active on low-level */
+       unsigned int host_failed;          /* commands that failed. */
     
        unsigned short host_no;  /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */
        int resetting; /* if set, it means that last_reset is a valid value */