]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - drivers/firewire/fw-sbp2.c
Merge git://git.infradead.org/mtd-2.6
[linux-2.6-omap-h63xx.git] / drivers / firewire / fw-sbp2.c
index 05997cee4f379761baa3282bb28b26424c4babc8..d334cac5e1fc64f4847ea4a72ad2620afd3167ee 100644 (file)
@@ -29,6 +29,7 @@
  */
 
 #include <linux/blkdev.h>
+#include <linux/bug.h>
 #include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/dma-mapping.h>
@@ -172,6 +173,9 @@ struct sbp2_target {
        int blocked;    /* ditto */
 };
 
+/* Impossible login_id, to detect logout attempt before successful login */
+#define INVALID_LOGIN_ID 0x10000
+
 /*
  * Per section 7.4.8 of the SBP-2 spec, a mgt_ORB_timeout value can be
  * provided in the config rom. Most devices do provide a value, which
@@ -181,10 +185,16 @@ struct sbp2_target {
 #define SBP2_MAX_LOGIN_ORB_TIMEOUT     40000U  /* Timeout in ms */
 #define SBP2_ORB_TIMEOUT               2000U   /* Timeout in ms */
 #define SBP2_ORB_NULL                  0x80000000
-#define SBP2_MAX_SG_ELEMENT_LENGTH     0xf000
 #define SBP2_RETRY_LIMIT               0xf             /* 15 retries */
 #define SBP2_CYCLE_LIMIT               (0xc8 << 12)    /* 200 125us cycles */
 
+/*
+ * The default maximum s/g segment size of a FireWire controller is
+ * usually 0x10000, but SBP-2 only allows 0xffff. Since buffers have to
+ * be quadlet-aligned, we set the length limit to 0xffff & ~3.
+ */
+#define SBP2_MAX_SEG_SIZE              0xfffc
+
 /* Unit directory keys */
 #define SBP2_CSR_UNIT_CHARACTERISTICS  0x3a
 #define SBP2_CSR_FIRMWARE_REVISION     0x3c
@@ -781,9 +791,20 @@ static void sbp2_release_target(struct kref *kref)
                        scsi_remove_device(sdev);
                        scsi_device_put(sdev);
                }
-               sbp2_send_management_orb(lu, tgt->node_id, lu->generation,
-                               SBP2_LOGOUT_REQUEST, lu->login_id, NULL);
-
+               if (lu->login_id != INVALID_LOGIN_ID) {
+                       int generation, node_id;
+                       /*
+                        * tgt->node_id may be obsolete here if we failed
+                        * during initial login or after a bus reset where
+                        * the topology changed.
+                        */
+                       generation = device->generation;
+                       smp_rmb(); /* node_id vs. generation */
+                       node_id    = device->node_id;
+                       sbp2_send_management_orb(lu, node_id, generation,
+                                                SBP2_LOGOUT_REQUEST,
+                                                lu->login_id, NULL);
+               }
                fw_core_remove_address_handler(&lu->address_handler);
                list_del(&lu->link);
                kfree(lu);
@@ -798,19 +819,20 @@ static void sbp2_release_target(struct kref *kref)
 
 static struct workqueue_struct *sbp2_wq;
 
+static void sbp2_target_put(struct sbp2_target *tgt)
+{
+       kref_put(&tgt->kref, sbp2_release_target);
+}
+
 /*
  * Always get the target's kref when scheduling work on one its units.
  * Each workqueue job is responsible to call sbp2_target_put() upon return.
  */
 static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay)
 {
-       if (queue_delayed_work(sbp2_wq, &lu->work, delay))
-               kref_get(&lu->tgt->kref);
-}
-
-static void sbp2_target_put(struct sbp2_target *tgt)
-{
-       kref_put(&tgt->kref, sbp2_release_target);
+       kref_get(&lu->tgt->kref);
+       if (!queue_delayed_work(sbp2_wq, &lu->work, delay))
+               sbp2_target_put(lu->tgt);
 }
 
 /*
@@ -971,6 +993,7 @@ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
 
        lu->tgt      = tgt;
        lu->lun      = lun_entry & 0xffff;
+       lu->login_id = INVALID_LOGIN_ID;
        lu->retries  = 0;
        lu->has_sdev = false;
        lu->blocked  = false;
@@ -1099,6 +1122,10 @@ static int sbp2_probe(struct device *dev)
        struct Scsi_Host *shost;
        u32 model, firmware_revision;
 
+       if (dma_get_max_seg_size(device->card->device) > SBP2_MAX_SEG_SIZE)
+               BUG_ON(dma_set_max_seg_size(device->card->device,
+                                           SBP2_MAX_SEG_SIZE));
+
        shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt));
        if (shost == NULL)
                return -ENOMEM;
@@ -1136,7 +1163,7 @@ static int sbp2_probe(struct device *dev)
 
        /* Do the login in a workqueue so we can easily reschedule retries. */
        list_for_each_entry(lu, &tgt->lu_list, link)
-               sbp2_queue_work(lu, 0);
+               sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
        return 0;
 
  fail_tgt_put:
@@ -1347,14 +1374,12 @@ static int
 sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device,
                     struct sbp2_logical_unit *lu)
 {
-       struct scatterlist *sg;
-       int sg_len, l, i, j, count;
-       dma_addr_t sg_addr;
-
-       sg = scsi_sglist(orb->cmd);
-       count = dma_map_sg(device->card->device, sg, scsi_sg_count(orb->cmd),
-                          orb->cmd->sc_data_direction);
-       if (count == 0)
+       struct scatterlist *sg = scsi_sglist(orb->cmd);
+       int i, n;
+
+       n = dma_map_sg(device->card->device, sg, scsi_sg_count(orb->cmd),
+                      orb->cmd->sc_data_direction);
+       if (n == 0)
                goto fail;
 
        /*
@@ -1364,7 +1389,7 @@ sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device,
         * as the second generation iPod which doesn't support page
         * tables.
         */
-       if (count == 1 && sg_dma_len(sg) < SBP2_MAX_SG_ELEMENT_LENGTH) {
+       if (n == 1) {
                orb->request.data_descriptor.high =
                        cpu_to_be32(lu->tgt->address_high);
                orb->request.data_descriptor.low  =
@@ -1374,29 +1399,9 @@ sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device,
                return 0;
        }
 
-       /*
-        * Convert the scatterlist to an sbp2 page table.  If any
-        * scatterlist entries are too big for sbp2, we split them as we
-        * go.  Even if we ask the block I/O layer to not give us sg
-        * elements larger than 65535 bytes, some IOMMUs may merge sg elements
-        * during DMA mapping, and Linux currently doesn't prevent this.
-        */
-       for (i = 0, j = 0; i < count; i++, sg = sg_next(sg)) {
-               sg_len = sg_dma_len(sg);
-               sg_addr = sg_dma_address(sg);
-               while (sg_len) {
-                       /* FIXME: This won't get us out of the pinch. */
-                       if (unlikely(j >= ARRAY_SIZE(orb->page_table))) {
-                               fw_error("page table overflow\n");
-                               goto fail_page_table;
-                       }
-                       l = min(sg_len, SBP2_MAX_SG_ELEMENT_LENGTH);
-                       orb->page_table[j].low = cpu_to_be32(sg_addr);
-                       orb->page_table[j].high = cpu_to_be32(l << 16);
-                       sg_addr += l;
-                       sg_len -= l;
-                       j++;
-               }
+       for_each_sg(sg, sg, n, i) {
+               orb->page_table[i].high = cpu_to_be32(sg_dma_len(sg) << 16);
+               orb->page_table[i].low = cpu_to_be32(sg_dma_address(sg));
        }
 
        orb->page_table_bus =
@@ -1415,13 +1420,13 @@ sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device,
        orb->request.data_descriptor.high = cpu_to_be32(lu->tgt->address_high);
        orb->request.data_descriptor.low  = cpu_to_be32(orb->page_table_bus);
        orb->request.misc |= cpu_to_be32(COMMAND_ORB_PAGE_TABLE_PRESENT |
-                                        COMMAND_ORB_DATA_SIZE(j));
+                                        COMMAND_ORB_DATA_SIZE(n));
 
        return 0;
 
  fail_page_table:
-       dma_unmap_sg(device->card->device, sg, scsi_sg_count(orb->cmd),
-                    orb->cmd->sc_data_direction);
+       dma_unmap_sg(device->card->device, scsi_sglist(orb->cmd),
+                    scsi_sg_count(orb->cmd), orb->cmd->sc_data_direction);
  fail:
        return -ENOMEM;
 }
@@ -1434,7 +1439,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
        struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
        struct sbp2_command_orb *orb;
        unsigned int max_payload;
-       int retval = SCSI_MLQUEUE_HOST_BUSY;
+       int generation, retval = SCSI_MLQUEUE_HOST_BUSY;
 
        /*
         * Bidirectional commands are not yet implemented, and unknown
@@ -1478,6 +1483,9 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
        if (cmd->sc_data_direction == DMA_FROM_DEVICE)
                orb->request.misc |= cpu_to_be32(COMMAND_ORB_DIRECTION);
 
+       generation = device->generation;
+       smp_rmb();    /* sbp2_map_scatterlist looks at tgt->address_high */
+
        if (scsi_sg_count(cmd) && sbp2_map_scatterlist(orb, device, lu) < 0)
                goto out;
 
@@ -1490,7 +1498,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
        if (dma_mapping_error(device->card->device, orb->base.request_bus))
                goto out;
 
-       sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, lu->generation,
+       sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, generation,
                      lu->command_block_agent_address + SBP2_ORB_POINTER);
        retval = 0;
  out:
@@ -1542,6 +1550,8 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
        if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)
                blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512);
 
+       blk_queue_max_segment_size(sdev->request_queue, SBP2_MAX_SEG_SIZE);
+
        return 0;
 }