1 /* ------------------------------------------------------------
3 * (C) Copyright IBM Corporation 1994, 2004
4 * Authors: Colin DeVilbiss (devilbis@us.ibm.com)
5 * Santiago Leon (santil@us.ibm.com)
6 * Dave Boutcher (sleddog@us.ibm.com)
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
23 * ------------------------------------------------------------
24 * Emulation of a SCSI host adapter for Virtual I/O devices
26 * This driver supports the SCSI adapter implemented by the IBM
27 * Power5 firmware. That SCSI adapter is not a physical adapter,
28 * but allows Linux SCSI peripheral drivers to directly
29 * access devices in another logical partition on the physical system.
31 * The virtual adapter(s) are present in the open firmware device
32 * tree just like real adapters.
34 * One of the capabilities provided on these systems is the ability
35 * to DMA between partitions. The architecture states that for VSCSI,
36 * the server side is allowed to DMA to and from the client. The client
37 * is never trusted to DMA to or from the server directly.
39 * Messages are sent between partitions on a "Command/Response Queue"
40 * (CRQ), which is just a buffer of 16 byte entries in the receiver's
41 * Senders cannot access the buffer directly, but send messages by
42 * making a hypervisor call and passing in the 16 bytes. The hypervisor
43 * puts the message in the next 16 byte space in round-robbin fashion,
44 * turns on the high order bit of the message (the valid bit), and
45 * generates an interrupt to the receiver (if interrupts are turned on.)
46 * The receiver just turns off the valid bit when they have copied out
49 * The VSCSI client builds a SCSI Remote Protocol (SRP) Information Unit
50 * (IU) (as defined in the T10 standard available at www.t10.org), gets
51 * a DMA address for the message, and sends it to the server as the
52 * payload of a CRQ message. The server DMAs the SRP IU and processes it,
53 * including doing any additional data transfers. When it is done, it
54 * DMAs the SRP response back to the same address as the request came from,
55 * and sends a CRQ message back to inform the client that the request has
58 * Note that some of the underlying infrastructure is different between
59 * machines conforming to the "RS/6000 Platform Architecture" (RPA) and
60 * the older iSeries hypervisor models. To support both, some low level
61 * routines have been broken out into rpa_vscsi.c and iseries_vscsi.c.
62 * The Makefile should pick one, not two, not zero, of these.
64 * TODO: This is currently pretty tied to the IBM i/pSeries hypervisor
65 * interfaces. It would be really nice to abstract this above an RDMA
69 #include <linux/module.h>
70 #include <linux/moduleparam.h>
71 #include <linux/dma-mapping.h>
72 #include <linux/delay.h>
74 #include <scsi/scsi.h>
75 #include <scsi/scsi_cmnd.h>
76 #include <scsi/scsi_host.h>
77 #include <scsi/scsi_device.h>
80 /* The values below are somewhat arbitrary default values, but
81 * OS/400 will use 3 busses (disks, CDs, tapes, I think.)
82 * Note that there are 3 bits of channel value, 6 bits of id, and
85 static int max_id = 64;
86 static int max_channel = 3;
87 static int init_timeout = 5;
88 static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
90 #define IBMVSCSI_VERSION "1.5.8"
92 MODULE_DESCRIPTION("IBM Virtual SCSI");
93 MODULE_AUTHOR("Dave Boutcher");
94 MODULE_LICENSE("GPL");
95 MODULE_VERSION(IBMVSCSI_VERSION);
97 module_param_named(max_id, max_id, int, S_IRUGO | S_IWUSR);
98 MODULE_PARM_DESC(max_id, "Largest ID value for each channel");
99 module_param_named(max_channel, max_channel, int, S_IRUGO | S_IWUSR);
100 MODULE_PARM_DESC(max_channel, "Largest channel value");
101 module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR);
102 MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds");
103 module_param_named(max_requests, max_requests, int, S_IRUGO | S_IWUSR);
104 MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter");
106 /* ------------------------------------------------------------
107 * Routines for the event pool and event structs
110 * initialize_event_pool: - Allocates and initializes the event pool for a host
111 * @pool: event_pool to be initialized
112 * @size: Number of events in pool
113 * @hostdata: ibmvscsi_host_data who owns the event pool
115 * Returns zero on success.
117 static int initialize_event_pool(struct event_pool *pool,
118 int size, struct ibmvscsi_host_data *hostdata)
124 pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
129 dma_alloc_coherent(hostdata->dev,
130 pool->size * sizeof(*pool->iu_storage),
132 if (!pool->iu_storage) {
137 for (i = 0; i < pool->size; ++i) {
138 struct srp_event_struct *evt = &pool->events[i];
139 memset(&evt->crq, 0x00, sizeof(evt->crq));
140 atomic_set(&evt->free, 1);
141 evt->crq.valid = 0x80;
142 evt->crq.IU_length = sizeof(*evt->xfer_iu);
143 evt->crq.IU_data_ptr = pool->iu_token +
144 sizeof(*evt->xfer_iu) * i;
145 evt->xfer_iu = pool->iu_storage + i;
146 evt->hostdata = hostdata;
147 evt->ext_list = NULL;
148 evt->ext_list_token = 0;
155 * release_event_pool: - Frees memory of an event pool of a host
156 * @pool: event_pool to be released
157 * @hostdata: ibmvscsi_host_data who owns the even pool
159 * Returns zero on success.
161 static void release_event_pool(struct event_pool *pool,
162 struct ibmvscsi_host_data *hostdata)
165 for (i = 0; i < pool->size; ++i) {
166 if (atomic_read(&pool->events[i].free) != 1)
168 if (pool->events[i].ext_list) {
169 dma_free_coherent(hostdata->dev,
170 SG_ALL * sizeof(struct srp_direct_buf),
171 pool->events[i].ext_list,
172 pool->events[i].ext_list_token);
177 "ibmvscsi: releasing event pool with %d "
178 "events still in use?\n", in_use);
180 dma_free_coherent(hostdata->dev,
181 pool->size * sizeof(*pool->iu_storage),
182 pool->iu_storage, pool->iu_token);
186 * valid_event_struct: - Determines if event is valid.
187 * @pool: event_pool that contains the event
188 * @evt: srp_event_struct to be checked for validity
190 * Returns zero if event is invalid, one otherwise.
192 static int valid_event_struct(struct event_pool *pool,
193 struct srp_event_struct *evt)
195 int index = evt - pool->events;
196 if (index < 0 || index >= pool->size) /* outside of bounds */
198 if (evt != pool->events + index) /* unaligned */
204 * ibmvscsi_free-event_struct: - Changes status of event to "free"
205 * @pool: event_pool that contains the event
206 * @evt: srp_event_struct to be modified
209 static void free_event_struct(struct event_pool *pool,
210 struct srp_event_struct *evt)
212 if (!valid_event_struct(pool, evt)) {
214 "ibmvscsi: Freeing invalid event_struct %p "
215 "(not in pool %p)\n", evt, pool->events);
218 if (atomic_inc_return(&evt->free) != 1) {
220 "ibmvscsi: Freeing event_struct %p "
221 "which is not in use!\n", evt);
227 * get_evt_struct: - Gets the next free event in pool
228 * @pool: event_pool that contains the events to be searched
230 * Returns the next event in "free" state, and NULL if none are free.
231 * Note that no synchronization is done here, we assume the host_lock
232 * will syncrhonze things.
234 static struct srp_event_struct *get_event_struct(struct event_pool *pool)
237 int poolsize = pool->size;
238 int offset = pool->next;
240 for (i = 0; i < poolsize; i++) {
241 offset = (offset + 1) % poolsize;
242 if (!atomic_dec_if_positive(&pool->events[offset].free)) {
244 return &pool->events[offset];
248 printk(KERN_ERR "ibmvscsi: found no event struct in pool!\n");
253 * init_event_struct: Initialize fields in an event struct that are always
256 * @done: Routine to call when the event is responded to
257 * @format: SRP or MAD format
258 * @timeout: timeout value set in the CRQ
260 static void init_event_struct(struct srp_event_struct *evt_struct,
261 void (*done) (struct srp_event_struct *),
265 evt_struct->cmnd = NULL;
266 evt_struct->cmnd_done = NULL;
267 evt_struct->sync_srp = NULL;
268 evt_struct->crq.format = format;
269 evt_struct->crq.timeout = timeout;
270 evt_struct->done = done;
273 /* ------------------------------------------------------------
274 * Routines for receiving SCSI responses from the hosting partition
278 * set_srp_direction: Set the fields in the srp related to data
279 * direction and number of buffers based on the direction in
280 * the scsi_cmnd and the number of buffers
282 static void set_srp_direction(struct scsi_cmnd *cmd,
283 struct srp_cmd *srp_cmd,
292 fmt = SRP_DATA_DESC_DIRECT;
294 fmt = SRP_DATA_DESC_INDIRECT;
295 numbuf = min(numbuf, MAX_INDIRECT_BUFS);
297 if (cmd->sc_data_direction == DMA_TO_DEVICE)
298 srp_cmd->data_out_desc_cnt = numbuf;
300 srp_cmd->data_in_desc_cnt = numbuf;
303 if (cmd->sc_data_direction == DMA_TO_DEVICE)
304 srp_cmd->buf_fmt = fmt << 4;
306 srp_cmd->buf_fmt = fmt;
309 static void unmap_sg_list(int num_entries,
311 struct srp_direct_buf *md)
315 for (i = 0; i < num_entries; ++i)
316 dma_unmap_single(dev, md[i].va, md[i].len, DMA_BIDIRECTIONAL);
320 * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format
321 * @cmd: srp_cmd whose additional_data member will be unmapped
322 * @dev: device for which the memory is mapped
325 static void unmap_cmd_data(struct srp_cmd *cmd,
326 struct srp_event_struct *evt_struct,
331 out_fmt = cmd->buf_fmt >> 4;
332 in_fmt = cmd->buf_fmt & ((1U << 4) - 1);
334 if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC)
336 else if (out_fmt == SRP_DATA_DESC_DIRECT ||
337 in_fmt == SRP_DATA_DESC_DIRECT) {
338 struct srp_direct_buf *data =
339 (struct srp_direct_buf *) cmd->add_data;
340 dma_unmap_single(dev, data->va, data->len, DMA_BIDIRECTIONAL);
342 struct srp_indirect_buf *indirect =
343 (struct srp_indirect_buf *) cmd->add_data;
344 int num_mapped = indirect->table_desc.len /
345 sizeof(struct srp_direct_buf);
347 if (num_mapped <= MAX_INDIRECT_BUFS) {
348 unmap_sg_list(num_mapped, dev, &indirect->desc_list[0]);
352 unmap_sg_list(num_mapped, dev, evt_struct->ext_list);
356 static int map_sg_list(int num_entries,
357 struct scatterlist *sg,
358 struct srp_direct_buf *md)
361 u64 total_length = 0;
363 for (i = 0; i < num_entries; ++i) {
364 struct srp_direct_buf *descr = md + i;
365 struct scatterlist *sg_entry = &sg[i];
366 descr->va = sg_dma_address(sg_entry);
367 descr->len = sg_dma_len(sg_entry);
369 total_length += sg_dma_len(sg_entry);
375 * map_sg_data: - Maps dma for a scatterlist and initializes decriptor fields
376 * @cmd: Scsi_Cmnd with the scatterlist
377 * @srp_cmd: srp_cmd that contains the memory descriptor
378 * @dev: device for which to map dma memory
380 * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd.
381 * Returns 1 on success.
383 static int map_sg_data(struct scsi_cmnd *cmd,
384 struct srp_event_struct *evt_struct,
385 struct srp_cmd *srp_cmd, struct device *dev)
389 u64 total_length = 0;
390 struct scatterlist *sg = cmd->request_buffer;
391 struct srp_direct_buf *data =
392 (struct srp_direct_buf *) srp_cmd->add_data;
393 struct srp_indirect_buf *indirect =
394 (struct srp_indirect_buf *) data;
396 sg_mapped = dma_map_sg(dev, sg, cmd->use_sg, DMA_BIDIRECTIONAL);
401 set_srp_direction(cmd, srp_cmd, sg_mapped);
403 /* special case; we can use a single direct descriptor */
404 if (sg_mapped == 1) {
405 data->va = sg_dma_address(&sg[0]);
406 data->len = sg_dma_len(&sg[0]);
411 indirect->table_desc.va = 0;
412 indirect->table_desc.len = sg_mapped * sizeof(struct srp_direct_buf);
413 indirect->table_desc.key = 0;
415 if (sg_mapped <= MAX_INDIRECT_BUFS) {
416 total_length = map_sg_list(sg_mapped, sg,
417 &indirect->desc_list[0]);
418 indirect->len = total_length;
422 /* get indirect table */
423 if (!evt_struct->ext_list) {
424 evt_struct->ext_list = (struct srp_direct_buf *)
425 dma_alloc_coherent(dev,
426 SG_ALL * sizeof(struct srp_direct_buf),
427 &evt_struct->ext_list_token, 0);
428 if (!evt_struct->ext_list) {
430 "ibmvscsi: Can't allocate memory for indirect table\n");
436 total_length = map_sg_list(sg_mapped, sg, evt_struct->ext_list);
438 indirect->len = total_length;
439 indirect->table_desc.va = evt_struct->ext_list_token;
440 indirect->table_desc.len = sg_mapped * sizeof(indirect->desc_list[0]);
441 memcpy(indirect->desc_list, evt_struct->ext_list,
442 MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf));
448 * map_single_data: - Maps memory and initializes memory decriptor fields
449 * @cmd: struct scsi_cmnd with the memory to be mapped
450 * @srp_cmd: srp_cmd that contains the memory descriptor
451 * @dev: device for which to map dma memory
453 * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd.
454 * Returns 1 on success.
456 static int map_single_data(struct scsi_cmnd *cmd,
457 struct srp_cmd *srp_cmd, struct device *dev)
459 struct srp_direct_buf *data =
460 (struct srp_direct_buf *) srp_cmd->add_data;
463 dma_map_single(dev, cmd->request_buffer,
464 cmd->request_bufflen,
466 if (dma_mapping_error(data->va)) {
468 "ibmvscsi: Unable to map request_buffer for command!\n");
471 data->len = cmd->request_bufflen;
474 set_srp_direction(cmd, srp_cmd, 1);
480 * map_data_for_srp_cmd: - Calls functions to map data for srp cmds
481 * @cmd: struct scsi_cmnd with the memory to be mapped
482 * @srp_cmd: srp_cmd that contains the memory descriptor
483 * @dev: dma device for which to map dma memory
485 * Called by scsi_cmd_to_srp_cmd() when converting scsi cmds to srp cmds
486 * Returns 1 on success.
488 static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
489 struct srp_event_struct *evt_struct,
490 struct srp_cmd *srp_cmd, struct device *dev)
492 switch (cmd->sc_data_direction) {
493 case DMA_FROM_DEVICE:
498 case DMA_BIDIRECTIONAL:
500 "ibmvscsi: Can't map DMA_BIDIRECTIONAL to read/write\n");
504 "ibmvscsi: Unknown data direction 0x%02x; can't map!\n",
505 cmd->sc_data_direction);
509 if (!cmd->request_buffer)
512 return map_sg_data(cmd, evt_struct, srp_cmd, dev);
513 return map_single_data(cmd, srp_cmd, dev);
516 /* ------------------------------------------------------------
517 * Routines for sending and receiving SRPs
520 * ibmvscsi_send_srp_event: - Transforms event to u64 array and calls send_crq()
521 * @evt_struct: evt_struct to be sent
522 * @hostdata: ibmvscsi_host_data of host
524 * Returns the value returned from ibmvscsi_send_crq(). (Zero for success)
525 * Note that this routine assumes that host_lock is held for synchronization
527 static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
528 struct ibmvscsi_host_data *hostdata)
530 u64 *crq_as_u64 = (u64 *) &evt_struct->crq;
534 /* If we have exhausted our request limit, just fail this request,
535 * unless it is for a reset or abort.
536 * Note that there are rare cases involving driver generated requests
537 * (such as task management requests) that the mid layer may think we
538 * can handle more requests (can_queue) when we actually can't
540 if (evt_struct->crq.format == VIOSRP_SRP_FORMAT) {
542 atomic_dec_if_positive(&hostdata->request_limit);
543 /* If request limit was -1 when we started, it is now even
546 if (request_status < -1)
548 /* Otherwise, we may have run out of requests. */
549 /* Abort and reset calls should make it through.
550 * Nothing except abort and reset should use the last two
551 * slots unless we had two or less to begin with.
553 else if (request_status < 2 &&
554 evt_struct->iu.srp.cmd.opcode != SRP_TSK_MGMT) {
555 /* In the case that we have less than two requests
556 * available, check the server limit as a combination
557 * of the request limit and the number of requests
558 * in-flight (the size of the send list). If the
559 * server limit is greater than 2, return busy so
560 * that the last two are reserved for reset and abort.
562 int server_limit = request_status;
563 struct srp_event_struct *tmp_evt;
565 list_for_each_entry(tmp_evt, &hostdata->sent, list) {
569 if (server_limit > 2)
574 /* Copy the IU into the transfer area */
575 *evt_struct->xfer_iu = evt_struct->iu;
576 evt_struct->xfer_iu->srp.rsp.tag = (u64)evt_struct;
578 /* Add this to the sent list. We need to do this
579 * before we actually send
580 * in case it comes back REALLY fast
582 list_add_tail(&evt_struct->list, &hostdata->sent);
585 ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {
586 list_del(&evt_struct->list);
588 printk(KERN_ERR "ibmvscsi: send error %d\n",
590 atomic_inc(&hostdata->request_limit);
597 unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
599 free_event_struct(&hostdata->pool, evt_struct);
600 atomic_inc(&hostdata->request_limit);
601 return SCSI_MLQUEUE_HOST_BUSY;
604 unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
606 if (evt_struct->cmnd != NULL) {
607 evt_struct->cmnd->result = DID_ERROR << 16;
608 evt_struct->cmnd_done(evt_struct->cmnd);
609 } else if (evt_struct->done)
610 evt_struct->done(evt_struct);
612 free_event_struct(&hostdata->pool, evt_struct);
617 * handle_cmd_rsp: - Handle responses from commands
618 * @evt_struct: srp_event_struct to be handled
620 * Used as a callback by when sending scsi cmds.
621 * Gets called by ibmvscsi_handle_crq()
623 static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
625 struct srp_rsp *rsp = &evt_struct->xfer_iu->srp.rsp;
626 struct scsi_cmnd *cmnd = evt_struct->cmnd;
628 if (unlikely(rsp->opcode != SRP_RSP)) {
629 if (printk_ratelimit())
631 "ibmvscsi: bad SRP RSP type %d\n",
636 cmnd->result = rsp->status;
637 if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION)
638 memcpy(cmnd->sense_buffer,
640 rsp->sense_data_len);
641 unmap_cmd_data(&evt_struct->iu.srp.cmd,
643 evt_struct->hostdata->dev);
645 if (rsp->flags & SRP_RSP_FLAG_DOOVER)
646 cmnd->resid = rsp->data_out_res_cnt;
647 else if (rsp->flags & SRP_RSP_FLAG_DIOVER)
648 cmnd->resid = rsp->data_in_res_cnt;
651 if (evt_struct->cmnd_done)
652 evt_struct->cmnd_done(cmnd);
656 * lun_from_dev: - Returns the lun of the scsi device
657 * @dev: struct scsi_device
660 static inline u16 lun_from_dev(struct scsi_device *dev)
662 return (0x2 << 14) | (dev->id << 8) | (dev->channel << 5) | dev->lun;
666 * ibmvscsi_queue: - The queuecommand function of the scsi template
667 * @cmd: struct scsi_cmnd to be executed
668 * @done: Callback function to be called when cmd is completed
670 static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
671 void (*done) (struct scsi_cmnd *))
673 struct srp_cmd *srp_cmd;
674 struct srp_event_struct *evt_struct;
675 struct srp_indirect_buf *indirect;
676 struct ibmvscsi_host_data *hostdata =
677 (struct ibmvscsi_host_data *)&cmnd->device->host->hostdata;
678 u16 lun = lun_from_dev(cmnd->device);
681 evt_struct = get_event_struct(&hostdata->pool);
683 return SCSI_MLQUEUE_HOST_BUSY;
685 /* Set up the actual SRP IU */
686 srp_cmd = &evt_struct->iu.srp.cmd;
687 memset(srp_cmd, 0x00, SRP_MAX_IU_LEN);
688 srp_cmd->opcode = SRP_CMD;
689 memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd));
690 srp_cmd->lun = ((u64) lun) << 48;
692 if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
693 printk(KERN_ERR "ibmvscsi: couldn't convert cmd to srp_cmd\n");
694 free_event_struct(&hostdata->pool, evt_struct);
695 return SCSI_MLQUEUE_HOST_BUSY;
698 init_event_struct(evt_struct,
701 cmnd->timeout_per_command/HZ);
703 evt_struct->cmnd = cmnd;
704 evt_struct->cmnd_done = done;
706 /* Fix up dma address of the buffer itself */
707 indirect = (struct srp_indirect_buf *) srp_cmd->add_data;
708 out_fmt = srp_cmd->buf_fmt >> 4;
709 in_fmt = srp_cmd->buf_fmt & ((1U << 4) - 1);
710 if ((in_fmt == SRP_DATA_DESC_INDIRECT ||
711 out_fmt == SRP_DATA_DESC_INDIRECT) &&
712 indirect->table_desc.va == 0) {
713 indirect->table_desc.va = evt_struct->crq.IU_data_ptr +
714 offsetof(struct srp_cmd, add_data) +
715 offsetof(struct srp_indirect_buf, desc_list);
718 return ibmvscsi_send_srp_event(evt_struct, hostdata);
721 /* ------------------------------------------------------------
722 * Routines for driver initialization
725 * adapter_info_rsp: - Handle response to MAD adapter info request
726 * @evt_struct: srp_event_struct with the response
728 * Used as a "done" callback by when sending adapter_info. Gets called
729 * by ibmvscsi_handle_crq()
731 static void adapter_info_rsp(struct srp_event_struct *evt_struct)
733 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
734 dma_unmap_single(hostdata->dev,
735 evt_struct->iu.mad.adapter_info.buffer,
736 evt_struct->iu.mad.adapter_info.common.length,
739 if (evt_struct->xfer_iu->mad.adapter_info.common.status) {
740 printk("ibmvscsi: error %d getting adapter info\n",
741 evt_struct->xfer_iu->mad.adapter_info.common.status);
743 printk("ibmvscsi: host srp version: %s, "
744 "host partition %s (%d), OS %d, max io %u\n",
745 hostdata->madapter_info.srp_version,
746 hostdata->madapter_info.partition_name,
747 hostdata->madapter_info.partition_number,
748 hostdata->madapter_info.os_type,
749 hostdata->madapter_info.port_max_txu[0]);
751 if (hostdata->madapter_info.port_max_txu[0])
752 hostdata->host->max_sectors =
753 hostdata->madapter_info.port_max_txu[0] >> 9;
755 if (hostdata->madapter_info.os_type == 3 &&
756 strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
757 printk("ibmvscsi: host (Ver. %s) doesn't support large"
759 hostdata->madapter_info.srp_version);
760 printk("ibmvscsi: limiting scatterlists to %d\n",
762 hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
768 * send_mad_adapter_info: - Sends the mad adapter info request
769 * and stores the result so it can be retrieved with
770 * sysfs. We COULD consider causing a failure if the
771 * returned SRP version doesn't match ours.
772 * @hostdata: ibmvscsi_host_data of host
774 * Returns zero if successful.
776 static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
778 struct viosrp_adapter_info *req;
779 struct srp_event_struct *evt_struct;
782 evt_struct = get_event_struct(&hostdata->pool);
784 printk(KERN_ERR "ibmvscsi: couldn't allocate an event "
785 "for ADAPTER_INFO_REQ!\n");
789 init_event_struct(evt_struct,
794 req = &evt_struct->iu.mad.adapter_info;
795 memset(req, 0x00, sizeof(*req));
797 req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
798 req->common.length = sizeof(hostdata->madapter_info);
799 req->buffer = addr = dma_map_single(hostdata->dev,
800 &hostdata->madapter_info,
801 sizeof(hostdata->madapter_info),
804 if (dma_mapping_error(req->buffer)) {
806 "ibmvscsi: Unable to map request_buffer "
807 "for adapter_info!\n");
808 free_event_struct(&hostdata->pool, evt_struct);
812 if (ibmvscsi_send_srp_event(evt_struct, hostdata)) {
813 printk(KERN_ERR "ibmvscsi: couldn't send ADAPTER_INFO_REQ!\n");
814 dma_unmap_single(hostdata->dev,
816 sizeof(hostdata->madapter_info),
822 * login_rsp: - Handle response to SRP login request
823 * @evt_struct: srp_event_struct with the response
825 * Used as a "done" callback by when sending srp_login. Gets called
826 * by ibmvscsi_handle_crq()
828 static void login_rsp(struct srp_event_struct *evt_struct)
830 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
831 switch (evt_struct->xfer_iu->srp.login_rsp.opcode) {
832 case SRP_LOGIN_RSP: /* it worked! */
834 case SRP_LOGIN_REJ: /* refused! */
835 printk(KERN_INFO "ibmvscsi: SRP_LOGIN_REJ reason %u\n",
836 evt_struct->xfer_iu->srp.login_rej.reason);
838 atomic_set(&hostdata->request_limit, -1);
842 "ibmvscsi: Invalid login response typecode 0x%02x!\n",
843 evt_struct->xfer_iu->srp.login_rsp.opcode);
845 atomic_set(&hostdata->request_limit, -1);
849 printk(KERN_INFO "ibmvscsi: SRP_LOGIN succeeded\n");
851 if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta < 0)
852 printk(KERN_ERR "ibmvscsi: Invalid request_limit.\n");
854 /* Now we know what the real request-limit is.
855 * This value is set rather than added to request_limit because
856 * request_limit could have been set to -1 by this client.
858 atomic_set(&hostdata->request_limit,
859 evt_struct->xfer_iu->srp.login_rsp.req_lim_delta);
861 /* If we had any pending I/Os, kick them */
862 scsi_unblock_requests(hostdata->host);
864 send_mad_adapter_info(hostdata);
869 * send_srp_login: - Sends the srp login
870 * @hostdata: ibmvscsi_host_data of host
872 * Returns zero if successful.
874 static int send_srp_login(struct ibmvscsi_host_data *hostdata)
878 struct srp_login_req *login;
879 struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool);
882 "ibmvscsi: couldn't allocate an event for login req!\n");
886 init_event_struct(evt_struct,
891 login = &evt_struct->iu.srp.login_req;
892 memset(login, 0x00, sizeof(struct srp_login_req));
893 login->opcode = SRP_LOGIN_REQ;
894 login->req_it_iu_len = sizeof(union srp_iu);
895 login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
897 spin_lock_irqsave(hostdata->host->host_lock, flags);
898 /* Start out with a request limit of 1, since this is negotiated in
899 * the login request we are just sending
901 atomic_set(&hostdata->request_limit, 1);
903 rc = ibmvscsi_send_srp_event(evt_struct, hostdata);
904 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
905 printk("ibmvscsic: sent SRP login\n");
910 * sync_completion: Signal that a synchronous command has completed
911 * Note that after returning from this call, the evt_struct is freed.
912 * the caller waiting on this completion shouldn't touch the evt_struct
915 static void sync_completion(struct srp_event_struct *evt_struct)
917 /* copy the response back */
918 if (evt_struct->sync_srp)
919 *evt_struct->sync_srp = *evt_struct->xfer_iu;
921 complete(&evt_struct->comp);
925 * ibmvscsi_abort: Abort a command...from scsi host template
926 * send this over to the server and wait synchronously for the response
928 static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
930 struct ibmvscsi_host_data *hostdata =
931 (struct ibmvscsi_host_data *)cmd->device->host->hostdata;
932 struct srp_tsk_mgmt *tsk_mgmt;
933 struct srp_event_struct *evt;
934 struct srp_event_struct *tmp_evt, *found_evt;
935 union viosrp_iu srp_rsp;
938 u16 lun = lun_from_dev(cmd->device);
940 /* First, find this command in our sent list so we can figure
941 * out the correct tag
943 spin_lock_irqsave(hostdata->host->host_lock, flags);
945 list_for_each_entry(tmp_evt, &hostdata->sent, list) {
946 if (tmp_evt->cmnd == cmd) {
953 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
957 evt = get_event_struct(&hostdata->pool);
959 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
960 printk(KERN_ERR "ibmvscsi: failed to allocate abort event\n");
964 init_event_struct(evt,
969 tsk_mgmt = &evt->iu.srp.tsk_mgmt;
971 /* Set up an abort SRP command */
972 memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
973 tsk_mgmt->opcode = SRP_TSK_MGMT;
974 tsk_mgmt->lun = ((u64) lun) << 48;
975 tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;
976 tsk_mgmt->task_tag = (u64) found_evt;
978 printk(KERN_INFO "ibmvscsi: aborting command. lun 0x%lx, tag 0x%lx\n",
979 tsk_mgmt->lun, tsk_mgmt->task_tag);
981 evt->sync_srp = &srp_rsp;
982 init_completion(&evt->comp);
983 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata);
984 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
986 printk(KERN_ERR "ibmvscsi: failed to send abort() event\n");
990 wait_for_completion(&evt->comp);
992 /* make sure we got a good response */
993 if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
994 if (printk_ratelimit())
996 "ibmvscsi: abort bad SRP RSP type %d\n",
997 srp_rsp.srp.rsp.opcode);
1001 if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID)
1002 rsp_rc = *((int *)srp_rsp.srp.rsp.data);
1004 rsp_rc = srp_rsp.srp.rsp.status;
1007 if (printk_ratelimit())
1009 "ibmvscsi: abort code %d for task tag 0x%lx\n",
1011 tsk_mgmt->task_tag);
1015 /* Because we dropped the spinlock above, it's possible
1016 * The event is no longer in our list. Make sure it didn't
1017 * complete while we were aborting
1019 spin_lock_irqsave(hostdata->host->host_lock, flags);
1021 list_for_each_entry(tmp_evt, &hostdata->sent, list) {
1022 if (tmp_evt->cmnd == cmd) {
1023 found_evt = tmp_evt;
1028 if (found_evt == NULL) {
1029 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1031 "ibmvscsi: aborted task tag 0x%lx completed\n",
1032 tsk_mgmt->task_tag);
1037 "ibmvscsi: successfully aborted task tag 0x%lx\n",
1038 tsk_mgmt->task_tag);
1040 cmd->result = (DID_ABORT << 16);
1041 list_del(&found_evt->list);
1042 unmap_cmd_data(&found_evt->iu.srp.cmd, found_evt,
1043 found_evt->hostdata->dev);
1044 free_event_struct(&found_evt->hostdata->pool, found_evt);
1045 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1046 atomic_inc(&hostdata->request_limit);
1051 * ibmvscsi_eh_device_reset_handler: Reset a single LUN...from scsi host
1052 * template send this over to the server and wait synchronously for the
1055 static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
1057 struct ibmvscsi_host_data *hostdata =
1058 (struct ibmvscsi_host_data *)cmd->device->host->hostdata;
1060 struct srp_tsk_mgmt *tsk_mgmt;
1061 struct srp_event_struct *evt;
1062 struct srp_event_struct *tmp_evt, *pos;
1063 union viosrp_iu srp_rsp;
1065 unsigned long flags;
1066 u16 lun = lun_from_dev(cmd->device);
1068 spin_lock_irqsave(hostdata->host->host_lock, flags);
1069 evt = get_event_struct(&hostdata->pool);
1071 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1072 printk(KERN_ERR "ibmvscsi: failed to allocate reset event\n");
1076 init_event_struct(evt,
1081 tsk_mgmt = &evt->iu.srp.tsk_mgmt;
1083 /* Set up a lun reset SRP command */
1084 memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
1085 tsk_mgmt->opcode = SRP_TSK_MGMT;
1086 tsk_mgmt->lun = ((u64) lun) << 48;
1087 tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;
1089 printk(KERN_INFO "ibmvscsi: resetting device. lun 0x%lx\n",
1092 evt->sync_srp = &srp_rsp;
1093 init_completion(&evt->comp);
1094 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata);
1095 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1097 printk(KERN_ERR "ibmvscsi: failed to send reset event\n");
1101 wait_for_completion(&evt->comp);
1103 /* make sure we got a good response */
1104 if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
1105 if (printk_ratelimit())
1107 "ibmvscsi: reset bad SRP RSP type %d\n",
1108 srp_rsp.srp.rsp.opcode);
1112 if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID)
1113 rsp_rc = *((int *)srp_rsp.srp.rsp.data);
1115 rsp_rc = srp_rsp.srp.rsp.status;
1118 if (printk_ratelimit())
1120 "ibmvscsi: reset code %d for task tag 0x%lx\n",
1121 rsp_rc, tsk_mgmt->task_tag);
1125 /* We need to find all commands for this LUN that have not yet been
1126 * responded to, and fail them with DID_RESET
1128 spin_lock_irqsave(hostdata->host->host_lock, flags);
1129 list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) {
1130 if ((tmp_evt->cmnd) && (tmp_evt->cmnd->device == cmd->device)) {
1132 tmp_evt->cmnd->result = (DID_RESET << 16);
1133 list_del(&tmp_evt->list);
1134 unmap_cmd_data(&tmp_evt->iu.srp.cmd, tmp_evt,
1135 tmp_evt->hostdata->dev);
1136 free_event_struct(&tmp_evt->hostdata->pool,
1138 atomic_inc(&hostdata->request_limit);
1139 if (tmp_evt->cmnd_done)
1140 tmp_evt->cmnd_done(tmp_evt->cmnd);
1141 else if (tmp_evt->done)
1142 tmp_evt->done(tmp_evt);
1145 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1150 * purge_requests: Our virtual adapter just shut down. purge any sent requests
1151 * @hostdata: the adapter
1153 static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
1155 struct srp_event_struct *tmp_evt, *pos;
1156 unsigned long flags;
1158 spin_lock_irqsave(hostdata->host->host_lock, flags);
1159 list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) {
1160 list_del(&tmp_evt->list);
1161 if (tmp_evt->cmnd) {
1162 tmp_evt->cmnd->result = (error_code << 16);
1163 unmap_cmd_data(&tmp_evt->iu.srp.cmd,
1165 tmp_evt->hostdata->dev);
1166 if (tmp_evt->cmnd_done)
1167 tmp_evt->cmnd_done(tmp_evt->cmnd);
1169 if (tmp_evt->done) {
1170 tmp_evt->done(tmp_evt);
1173 free_event_struct(&tmp_evt->hostdata->pool, tmp_evt);
1175 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1179 * ibmvscsi_handle_crq: - Handles and frees received events in the CRQ
1180 * @crq: Command/Response queue
1181 * @hostdata: ibmvscsi_host_data of host
1184 void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1185 struct ibmvscsi_host_data *hostdata)
1187 unsigned long flags;
1188 struct srp_event_struct *evt_struct =
1189 (struct srp_event_struct *)crq->IU_data_ptr;
1190 switch (crq->valid) {
1191 case 0xC0: /* initialization */
1192 switch (crq->format) {
1193 case 0x01: /* Initialization message */
1194 printk(KERN_INFO "ibmvscsi: partner initialized\n");
1195 /* Send back a response */
1196 if (ibmvscsi_send_crq(hostdata,
1197 0xC002000000000000LL, 0) == 0) {
1199 send_srp_login(hostdata);
1202 "ibmvscsi: Unable to send init rsp\n");
1206 case 0x02: /* Initialization response */
1208 "ibmvscsi: partner initialization complete\n");
1211 send_srp_login(hostdata);
1214 printk(KERN_ERR "ibmvscsi: unknown crq message type\n");
1217 case 0xFF: /* Hypervisor telling us the connection is closed */
1218 scsi_block_requests(hostdata->host);
1219 atomic_set(&hostdata->request_limit, 0);
1220 if (crq->format == 0x06) {
1221 /* We need to re-setup the interpartition connection */
1223 "ibmvscsi: Re-enabling adapter!\n");
1224 purge_requests(hostdata, DID_REQUEUE);
1225 if ((ibmvscsi_reenable_crq_queue(&hostdata->queue,
1227 (ibmvscsi_send_crq(hostdata,
1228 0xC001000000000000LL, 0))) {
1229 atomic_set(&hostdata->request_limit,
1232 "ibmvscsi: error after"
1237 "ibmvscsi: Virtual adapter failed rc %d!\n",
1240 purge_requests(hostdata, DID_ERROR);
1241 if ((ibmvscsi_reset_crq_queue(&hostdata->queue,
1243 (ibmvscsi_send_crq(hostdata,
1244 0xC001000000000000LL, 0))) {
1245 atomic_set(&hostdata->request_limit,
1248 "ibmvscsi: error after reset\n");
1251 scsi_unblock_requests(hostdata->host);
1253 case 0x80: /* real payload */
1257 "ibmvscsi: got an invalid message type 0x%02x\n",
1262 /* The only kind of payload CRQs we should get are responses to
1263 * things we send. Make sure this response is to something we
1266 if (!valid_event_struct(&hostdata->pool, evt_struct)) {
1268 "ibmvscsi: returned correlation_token 0x%p is invalid!\n",
1269 (void *)crq->IU_data_ptr);
1273 if (atomic_read(&evt_struct->free)) {
1275 "ibmvscsi: received duplicate correlation_token 0x%p!\n",
1276 (void *)crq->IU_data_ptr);
1280 if (crq->format == VIOSRP_SRP_FORMAT)
1281 atomic_add(evt_struct->xfer_iu->srp.rsp.req_lim_delta,
1282 &hostdata->request_limit);
1284 if (evt_struct->done)
1285 evt_struct->done(evt_struct);
1288 "ibmvscsi: returned done() is NULL; not running it!\n");
1291 * Lock the host_lock before messing with these structures, since we
1292 * are running in a task context
1294 spin_lock_irqsave(evt_struct->hostdata->host->host_lock, flags);
1295 list_del(&evt_struct->list);
1296 free_event_struct(&evt_struct->hostdata->pool, evt_struct);
1297 spin_unlock_irqrestore(evt_struct->hostdata->host->host_lock, flags);
1301 * ibmvscsi_get_host_config: Send the command to the server to get host
1302 * configuration data. The data is opaque to us.
1304 static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
1305 unsigned char *buffer, int length)
1307 struct viosrp_host_config *host_config;
1308 struct srp_event_struct *evt_struct;
1312 evt_struct = get_event_struct(&hostdata->pool);
1315 "ibmvscsi: could't allocate event for HOST_CONFIG!\n");
1319 init_event_struct(evt_struct,
1324 host_config = &evt_struct->iu.mad.host_config;
1326 /* Set up a lun reset SRP command */
1327 memset(host_config, 0x00, sizeof(*host_config));
1328 host_config->common.type = VIOSRP_HOST_CONFIG_TYPE;
1329 host_config->common.length = length;
1330 host_config->buffer = addr = dma_map_single(hostdata->dev, buffer,
1334 if (dma_mapping_error(host_config->buffer)) {
1336 "ibmvscsi: dma_mapping error " "getting host config\n");
1337 free_event_struct(&hostdata->pool, evt_struct);
1341 init_completion(&evt_struct->comp);
1342 rc = ibmvscsi_send_srp_event(evt_struct, hostdata);
1344 wait_for_completion(&evt_struct->comp);
1345 dma_unmap_single(hostdata->dev, addr, length, DMA_BIDIRECTIONAL);
1351 * ibmvscsi_slave_configure: Set the "allow_restart" flag for each disk.
1352 * @sdev: struct scsi_device device to configure
1354 * Enable allow_restart for a device if it is a disk. Adjust the
1355 * queue_depth here also as is required by the documentation for
1356 * struct scsi_host_template.
1358 static int ibmvscsi_slave_configure(struct scsi_device *sdev)
1360 struct Scsi_Host *shost = sdev->host;
1361 unsigned long lock_flags = 0;
1363 spin_lock_irqsave(shost->host_lock, lock_flags);
1364 if (sdev->type == TYPE_DISK)
1365 sdev->allow_restart = 1;
1366 scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
1367 spin_unlock_irqrestore(shost->host_lock, lock_flags);
1372 * ibmvscsi_change_queue_depth - Change the device's queue depth
1373 * @sdev: scsi device struct
1374 * @qdepth: depth to set
1379 static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
1381 if (qdepth > IBMVSCSI_MAX_CMDS_PER_LUN)
1382 qdepth = IBMVSCSI_MAX_CMDS_PER_LUN;
1384 scsi_adjust_queue_depth(sdev, 0, qdepth);
1385 return sdev->queue_depth;
1388 /* ------------------------------------------------------------
1391 static ssize_t show_host_srp_version(struct class_device *class_dev, char *buf)
1393 struct Scsi_Host *shost = class_to_shost(class_dev);
1394 struct ibmvscsi_host_data *hostdata =
1395 (struct ibmvscsi_host_data *)shost->hostdata;
1398 len = snprintf(buf, PAGE_SIZE, "%s\n",
1399 hostdata->madapter_info.srp_version);
1403 static struct class_device_attribute ibmvscsi_host_srp_version = {
1405 .name = "srp_version",
1408 .show = show_host_srp_version,
1411 static ssize_t show_host_partition_name(struct class_device *class_dev,
1414 struct Scsi_Host *shost = class_to_shost(class_dev);
1415 struct ibmvscsi_host_data *hostdata =
1416 (struct ibmvscsi_host_data *)shost->hostdata;
1419 len = snprintf(buf, PAGE_SIZE, "%s\n",
1420 hostdata->madapter_info.partition_name);
1424 static struct class_device_attribute ibmvscsi_host_partition_name = {
1426 .name = "partition_name",
1429 .show = show_host_partition_name,
1432 static ssize_t show_host_partition_number(struct class_device *class_dev,
1435 struct Scsi_Host *shost = class_to_shost(class_dev);
1436 struct ibmvscsi_host_data *hostdata =
1437 (struct ibmvscsi_host_data *)shost->hostdata;
1440 len = snprintf(buf, PAGE_SIZE, "%d\n",
1441 hostdata->madapter_info.partition_number);
1445 static struct class_device_attribute ibmvscsi_host_partition_number = {
1447 .name = "partition_number",
1450 .show = show_host_partition_number,
1453 static ssize_t show_host_mad_version(struct class_device *class_dev, char *buf)
1455 struct Scsi_Host *shost = class_to_shost(class_dev);
1456 struct ibmvscsi_host_data *hostdata =
1457 (struct ibmvscsi_host_data *)shost->hostdata;
1460 len = snprintf(buf, PAGE_SIZE, "%d\n",
1461 hostdata->madapter_info.mad_version);
1465 static struct class_device_attribute ibmvscsi_host_mad_version = {
1467 .name = "mad_version",
1470 .show = show_host_mad_version,
1473 static ssize_t show_host_os_type(struct class_device *class_dev, char *buf)
1475 struct Scsi_Host *shost = class_to_shost(class_dev);
1476 struct ibmvscsi_host_data *hostdata =
1477 (struct ibmvscsi_host_data *)shost->hostdata;
1480 len = snprintf(buf, PAGE_SIZE, "%d\n", hostdata->madapter_info.os_type);
1484 static struct class_device_attribute ibmvscsi_host_os_type = {
1489 .show = show_host_os_type,
1492 static ssize_t show_host_config(struct class_device *class_dev, char *buf)
1494 struct Scsi_Host *shost = class_to_shost(class_dev);
1495 struct ibmvscsi_host_data *hostdata =
1496 (struct ibmvscsi_host_data *)shost->hostdata;
1498 /* returns null-terminated host config data */
1499 if (ibmvscsi_do_host_config(hostdata, buf, PAGE_SIZE) == 0)
1505 static struct class_device_attribute ibmvscsi_host_config = {
1510 .show = show_host_config,
1513 static struct class_device_attribute *ibmvscsi_attrs[] = {
1514 &ibmvscsi_host_srp_version,
1515 &ibmvscsi_host_partition_name,
1516 &ibmvscsi_host_partition_number,
1517 &ibmvscsi_host_mad_version,
1518 &ibmvscsi_host_os_type,
1519 &ibmvscsi_host_config,
1523 /* ------------------------------------------------------------
1524 * SCSI driver registration
1526 static struct scsi_host_template driver_template = {
1527 .module = THIS_MODULE,
1528 .name = "IBM POWER Virtual SCSI Adapter " IBMVSCSI_VERSION,
1529 .proc_name = "ibmvscsi",
1530 .queuecommand = ibmvscsi_queuecommand,
1531 .eh_abort_handler = ibmvscsi_eh_abort_handler,
1532 .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler,
1533 .slave_configure = ibmvscsi_slave_configure,
1534 .change_queue_depth = ibmvscsi_change_queue_depth,
1536 .can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT,
1538 .sg_tablesize = SG_ALL,
1539 .use_clustering = ENABLE_CLUSTERING,
1540 .shost_attrs = ibmvscsi_attrs,
1544 * Called by bus code for each adapter
1546 static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1548 struct ibmvscsi_host_data *hostdata;
1549 struct Scsi_Host *host;
1550 struct device *dev = &vdev->dev;
1551 unsigned long wait_switch = 0;
1554 vdev->dev.driver_data = NULL;
1556 driver_template.can_queue = max_requests;
1557 host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
1559 printk(KERN_ERR "ibmvscsi: couldn't allocate host data\n");
1560 goto scsi_host_alloc_failed;
1563 hostdata = (struct ibmvscsi_host_data *)host->hostdata;
1564 memset(hostdata, 0x00, sizeof(*hostdata));
1565 INIT_LIST_HEAD(&hostdata->sent);
1566 hostdata->host = host;
1567 hostdata->dev = dev;
1568 atomic_set(&hostdata->request_limit, -1);
1569 hostdata->host->max_sectors = 32 * 8; /* default max I/O 32 pages */
1571 rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_requests);
1572 if (rc != 0 && rc != H_RESOURCE) {
1573 printk(KERN_ERR "ibmvscsi: couldn't initialize crq\n");
1574 goto init_crq_failed;
1576 if (initialize_event_pool(&hostdata->pool, max_requests, hostdata) != 0) {
1577 printk(KERN_ERR "ibmvscsi: couldn't initialize event pool\n");
1578 goto init_pool_failed;
1582 host->max_id = max_id;
1583 host->max_channel = max_channel;
1585 if (scsi_add_host(hostdata->host, hostdata->dev))
1586 goto add_host_failed;
1588 /* Try to send an initialization message. Note that this is allowed
1589 * to fail if the other end is not acive. In that case we don't
1592 if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0
1593 || rc == H_RESOURCE) {
1595 * Wait around max init_timeout secs for the adapter to finish
1596 * initializing. When we are done initializing, we will have a
1597 * valid request_limit. We don't want Linux scanning before
1600 for (wait_switch = jiffies + (init_timeout * HZ);
1601 time_before(jiffies, wait_switch) &&
1602 atomic_read(&hostdata->request_limit) < 2;) {
1607 /* if we now have a valid request_limit, initiate a scan */
1608 if (atomic_read(&hostdata->request_limit) > 0)
1609 scsi_scan_host(host);
1612 vdev->dev.driver_data = hostdata;
1616 release_event_pool(&hostdata->pool, hostdata);
1618 ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_requests);
1620 scsi_host_put(host);
1621 scsi_host_alloc_failed:
1625 static int ibmvscsi_remove(struct vio_dev *vdev)
1627 struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data;
1628 release_event_pool(&hostdata->pool, hostdata);
1629 ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
1632 scsi_remove_host(hostdata->host);
1633 scsi_host_put(hostdata->host);
1639 * ibmvscsi_device_table: Used by vio.c to match devices in the device tree we
1642 static struct vio_device_id ibmvscsi_device_table[] __devinitdata = {
1643 {"vscsi", "IBM,v-scsi"},
1646 MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table);
1648 static struct vio_driver ibmvscsi_driver = {
1649 .id_table = ibmvscsi_device_table,
1650 .probe = ibmvscsi_probe,
1651 .remove = ibmvscsi_remove,
1654 .owner = THIS_MODULE,
1658 int __init ibmvscsi_module_init(void)
1660 return vio_register_driver(&ibmvscsi_driver);
1663 void __exit ibmvscsi_module_exit(void)
1665 vio_unregister_driver(&ibmvscsi_driver);
1668 module_init(ibmvscsi_module_init);
1669 module_exit(ibmvscsi_module_exit);