#include <linux/completion.h>
 #include <linux/compat.h>
 #include <linux/chio.h>                        /* here are all the ioctls */
+#include <linux/mutex.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
        u_int               counts[CH_TYPES];
        u_int               unit_attention;
        u_int               voltags;
-       struct semaphore    lock;
+       struct mutex        lock;
 } scsi_changer;
 
 static LIST_HEAD(ch_devlist);
        u_char data[16];
        unsigned int i;
        
-       down(&ch->lock);
+       mutex_lock(&ch->lock);
        for (i = 0; i < ch->counts[type]; i++) {
                if (0 != ch_read_element_status
                    (ch, ch->firsts[type]+i,data)) {
                if (0 != retval)
                        break;
        }
-       up(&ch->lock);
+       mutex_unlock(&ch->lock);
        return retval;
 }
 
                        dprintk("CHIOPOSITION: invalid parameter\n");
                        return -EBADSLT;
                }
-               down(&ch->lock);
+               mutex_lock(&ch->lock);
                retval = ch_position(ch,0,
                                     ch->firsts[pos.cp_type] + pos.cp_unit,
                                     pos.cp_flags & CP_INVERT);
-               up(&ch->lock);
+               mutex_unlock(&ch->lock);
                return retval;
        }
        
                        return -EBADSLT;
                }
                
-               down(&ch->lock);
+               mutex_lock(&ch->lock);
                retval = ch_move(ch,0,
                                 ch->firsts[mv.cm_fromtype] + mv.cm_fromunit,
                                 ch->firsts[mv.cm_totype]   + mv.cm_tounit,
                                 mv.cm_flags & CM_INVERT);
-               up(&ch->lock);
+               mutex_unlock(&ch->lock);
                return retval;
        }
 
                        return -EBADSLT;
                }
                
-               down(&ch->lock);
+               mutex_lock(&ch->lock);
                retval = ch_exchange
                        (ch,0,
                         ch->firsts[mv.ce_srctype]  + mv.ce_srcunit,
                         ch->firsts[mv.ce_fdsttype] + mv.ce_fdstunit,
                         ch->firsts[mv.ce_sdsttype] + mv.ce_sdstunit,
                         mv.ce_flags & CE_INVERT1, mv.ce_flags & CE_INVERT2);
-               up(&ch->lock);
+               mutex_unlock(&ch->lock);
                return retval;
        }
 
                buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
                if (!buffer)
                        return -ENOMEM;
-               down(&ch->lock);
+               mutex_lock(&ch->lock);
                
        voltag_retry:
                memset(cmd,0,sizeof(cmd));
                        goto voltag_retry;
                }
                kfree(buffer);
-               up(&ch->lock);
+               mutex_unlock(&ch->lock);
                
                if (copy_to_user(argp, &cge, sizeof (cge)))
                        return -EFAULT;
 
        case CHIOINITELEM:
        {
-               down(&ch->lock);
+               mutex_lock(&ch->lock);
                retval = ch_init_elem(ch);
-               up(&ch->lock);
+               mutex_unlock(&ch->lock);
                return retval;
        }
                
                        return -EBADSLT;
                }
                elem = ch->firsts[csv.csv_type] + csv.csv_unit;
-               down(&ch->lock);
+               mutex_lock(&ch->lock);
                retval = ch_set_voltag(ch, elem,
                                       csv.csv_flags & CSV_AVOLTAG,
                                       csv.csv_flags & CSV_CLEARTAG,
                                       csv.csv_voltag);
-               up(&ch->lock);
+               mutex_unlock(&ch->lock);
                return retval;
        }
 
        memset(ch,0,sizeof(*ch));
        ch->minor = ch_devcount;
        sprintf(ch->name,"ch%d",ch->minor);
-       init_MUTEX(&ch->lock);
+       mutex_init(&ch->lock);
        ch->device = sd;
        ch_readconfig(ch);
        if (init)
 
 #include <linux/timer.h>
 #include <linux/string.h>
 #include <linux/ioport.h>
+#include <linux/mutex.h>
 
 #include <asm/processor.h>     /* for boot_cpu_data */
 #include <asm/pgtable.h>
  *============================================================================
  */
 
-static DECLARE_MUTEX(adpt_configuration_lock);
+static DEFINE_MUTEX(adpt_configuration_lock);
 
 static struct i2o_sys_tbl *sys_tbl = NULL;
 static int sys_tbl_ind = 0;
         */
 
        // Find HBA (host bus adapter) we are looking for
-       down(&adpt_configuration_lock);
+       mutex_lock(&adpt_configuration_lock);
        for (pHba = hba_chain; pHba; pHba = pHba->next) {
                if (pHba->host == host) {
                        break;  /* found adapter */
                }
        }
-       up(&adpt_configuration_lock);
+       mutex_unlock(&adpt_configuration_lock);
        if (pHba == NULL) {
                return 0;
        }
        }
        memset(pHba, 0, sizeof(adpt_hba));
 
-       down(&adpt_configuration_lock);
+       mutex_lock(&adpt_configuration_lock);
 
        if(hba_chain != NULL){
                for(p = hba_chain; p->next; p = p->next);
        sprintf(pHba->name, "dpti%d", hba_count);
        hba_count++;
        
-       up(&adpt_configuration_lock);
+       mutex_unlock(&adpt_configuration_lock);
 
        pHba->pDev = pDev;
        pHba->base_addr_phys = base_addr0_phys;
        struct adpt_device* pNext;
 
 
-       down(&adpt_configuration_lock);
+       mutex_lock(&adpt_configuration_lock);
        // scsi_unregister calls our adpt_release which
        // does a quiese
        if(pHba->host){
        }
 
        hba_count--;
-       up(&adpt_configuration_lock);
+       mutex_unlock(&adpt_configuration_lock);
 
        iounmap(pHba->base_addr_virt);
        pci_release_regions(pHba->pDev);
  
 static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
 {
-       down(&adpt_configuration_lock);
+       mutex_lock(&adpt_configuration_lock);
        d->controller=pHba;
        d->owner=NULL;
        d->next=pHba->devices;
        pHba->devices=d;
        *d->dev_name = 0;
 
-       up(&adpt_configuration_lock);
+       mutex_unlock(&adpt_configuration_lock);
        return 0;
 }
 
        if (minor >= hba_count) {
                return -ENXIO;
        }
-       down(&adpt_configuration_lock);
+       mutex_lock(&adpt_configuration_lock);
        for (pHba = hba_chain; pHba; pHba = pHba->next) {
                if (pHba->unit == minor) {
                        break;  /* found adapter */
                }
        }
        if (pHba == NULL) {
-               up(&adpt_configuration_lock);
+               mutex_unlock(&adpt_configuration_lock);
                return -ENXIO;
        }
 
 //     if(pHba->in_use){
-       //      up(&adpt_configuration_lock);
+       //      mutex_unlock(&adpt_configuration_lock);
 //             return -EBUSY;
 //     }
 
        pHba->in_use = 1;
-       up(&adpt_configuration_lock);
+       mutex_unlock(&adpt_configuration_lock);
 
        return 0;
 }
        if (minor >= hba_count) {
                return -ENXIO;
        }
-       down(&adpt_configuration_lock);
+       mutex_lock(&adpt_configuration_lock);
        for (pHba = hba_chain; pHba; pHba = pHba->next) {
                if (pHba->unit == minor) {
                        break;  /* found adapter */
                }
        }
-       up(&adpt_configuration_lock);
+       mutex_unlock(&adpt_configuration_lock);
        if (pHba == NULL) {
                return -ENXIO;
        }
        if (minor >= DPTI_MAX_HBA){
                return -ENXIO;
        }
-       down(&adpt_configuration_lock);
+       mutex_lock(&adpt_configuration_lock);
        for (pHba = hba_chain; pHba; pHba = pHba->next) {
                if (pHba->unit == minor) {
                        break;  /* found adapter */
                }
        }
-       up(&adpt_configuration_lock);
+       mutex_unlock(&adpt_configuration_lock);
        if(pHba == NULL){
                return -ENXIO;
        }
 
 void scsi_remove_host(struct Scsi_Host *shost)
 {
        unsigned long flags;
-       down(&shost->scan_mutex);
+       mutex_lock(&shost->scan_mutex);
        spin_lock_irqsave(shost->host_lock, flags);
        if (scsi_host_set_state(shost, SHOST_CANCEL))
                if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY)) {
                        spin_unlock_irqrestore(shost->host_lock, flags);
-                       up(&shost->scan_mutex);
+                       mutex_unlock(&shost->scan_mutex);
                        return;
                }
        spin_unlock_irqrestore(shost->host_lock, flags);
-       up(&shost->scan_mutex);
+       mutex_unlock(&shost->scan_mutex);
        scsi_forget_host(shost);
        scsi_proc_host_rm(shost);
 
        INIT_LIST_HEAD(&shost->starved_list);
        init_waitqueue_head(&shost->host_wait);
 
-       init_MUTEX(&shost->scan_mutex);
+       mutex_init(&shost->scan_mutex);
 
        shost->host_no = scsi_host_next_hn++; /* XXX(hch): still racy */
        shost->dma_channel = 0xff;
 
 #include <linux/delay.h>
 #include <linux/kfifo.h>
 #include <linux/scatterlist.h>
+#include <linux/mutex.h>
 #include <net/tcp.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_device.h>
        /*
         * serialize Xmit worker on a per-connection basis.
         */
-       down(&conn->xmitsema);
+       mutex_lock(&conn->xmitmutex);
        if (iscsi_data_xmit(conn))
                schedule_work(&conn->xmitwork);
-       up(&conn->xmitsema);
+       mutex_unlock(&conn->xmitmutex);
 }
 
 #define FAILURE_BAD_HOST               1
                session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
        spin_unlock(&session->lock);
 
-        if (!in_interrupt() && !down_trylock(&conn->xmitsema)) {
+        if (!in_interrupt() && mutex_trylock(&conn->xmitmutex)) {
                spin_unlock_irq(host->host_lock);
                if (iscsi_data_xmit(conn))
                        schedule_work(&conn->xmitwork);
-               up(&conn->xmitsema);
+               mutex_unlock(&conn->xmitmutex);
                spin_lock_irq(host->host_lock);
        } else
                schedule_work(&conn->xmitwork);
                goto max_recv_dlenght_alloc_fail;
 
        init_timer(&conn->tmabort_timer);
-       init_MUTEX(&conn->xmitsema);
+       mutex_init(&conn->xmitmutex);
        init_waitqueue_head(&conn->ehwait);
 
        return iscsi_handle(conn);
        struct iscsi_conn *conn = iscsi_ptr(connh);
        struct iscsi_session *session = conn->session;
 
-       down(&conn->xmitsema);
+       mutex_lock(&conn->xmitmutex);
        set_bit(SUSPEND_BIT, &conn->suspend_tx);
        if (conn->c_stage == ISCSI_CONN_INITIAL_STAGE && conn->sock) {
                struct sock *sk = conn->sock->sk;
        }
        spin_unlock_bh(&session->lock);
 
-       up(&conn->xmitsema);
+       mutex_unlock(&conn->xmitmutex);
 
        /*
         * Block until all in-progress commands for this connection
        set_bit(SUSPEND_BIT, &conn->suspend_rx);
        write_unlock_bh(&sk->sk_callback_lock);
 
-       down(&conn->xmitsema);
+       mutex_lock(&conn->xmitmutex);
 
        spin_lock_irqsave(session->host->host_lock, flags);
        spin_lock(&session->lock);
                        conn->datadgst_en = 0;
                }
        }
-       up(&conn->xmitsema);
+       mutex_unlock(&conn->xmitmutex);
 }
 
 static int
         * 1) connection-level failure;
         * 2) recovery due protocol error;
         */
-       down(&conn->xmitsema);
+       mutex_lock(&conn->xmitmutex);
        spin_lock_bh(&session->lock);
        if (session->state != ISCSI_STATE_LOGGED_IN) {
                if (session->state == ISCSI_STATE_TERMINATE) {
                        spin_unlock_bh(&session->lock);
-                       up(&conn->xmitsema);
+                       mutex_unlock(&conn->xmitmutex);
                        goto failed;
                }
                spin_unlock_bh(&session->lock);
                         * 2) session was re-open during time out of ctask.
                         */
                        spin_unlock_bh(&session->lock);
-                       up(&conn->xmitsema);
+                       mutex_unlock(&conn->xmitmutex);
                        goto success;
                }
                conn->tmabort_state = TMABORT_INITIAL;
                                    conn->tmabort_state == TMABORT_SUCCESS) {
                                        conn->tmabort_state = TMABORT_INITIAL;
                                        spin_unlock_bh(&session->lock);
-                                       up(&conn->xmitsema);
+                                       mutex_unlock(&conn->xmitmutex);
                                        goto success;
                                }
                                conn->tmabort_state = TMABORT_INITIAL;
                        spin_unlock_bh(&session->lock);
                }
        }
-       up(&conn->xmitsema);
+       mutex_unlock(&conn->xmitmutex);
 
 
        /*
 exit:
        del_timer_sync(&conn->tmabort_timer);
 
-       down(&conn->xmitsema);
+       mutex_lock(&conn->xmitmutex);
        if (conn->sock) {
                struct sock *sk = conn->sock->sk;
 
                iscsi_ctask_cleanup(conn, ctask);
                write_unlock_bh(&sk->sk_callback_lock);
        }
-       up(&conn->xmitsema);
+       mutex_unlock(&conn->xmitmutex);
        return rc;
 }
 
        struct iscsi_conn *conn = iscsi_ptr(connh);
        int rc;
 
-       down(&conn->xmitsema);
+       mutex_lock(&conn->xmitmutex);
        rc = iscsi_conn_send_generic(conn, hdr, data, data_size);
-       up(&conn->xmitsema);
+       mutex_unlock(&conn->xmitmutex);
 
        return rc;
 }
 
        struct kfifo            *mgmtqueue;     /* mgmt (control) xmit queue */
        struct kfifo            *xmitqueue;     /* data-path cmd queue */
        struct work_struct      xmitwork;       /* per-conn. xmit workqueue */
-       struct semaphore        xmitsema;       /* serializes connection xmit,
+       struct mutex            xmitmutex;      /* serializes connection xmit,
                                                 * access to kfifos:      *
                                                 * xmitqueue, writequeue, *
                                                 * immqueue, mgmtqueue    */
 
         * serialized. This is so because we want to reserve maximum number of
         * available command ids for the I/O commands.
         */
-       down(&adapter->int_mtx);
+       mutex_lock(&adapter->int_mtx);
 
        scb = &adapter->int_scb;
        memset(scb, 0, sizeof(scb_t));
                        mc->cmd, mc->opcode, mc->subopcode, scmd->result);
        }
 
-       up(&adapter->int_mtx);
+       mutex_unlock(&adapter->int_mtx);
 
        return rval;
 }
                adapter->has_64bit_addr = 0;
        }
                
-       init_MUTEX(&adapter->int_mtx);
+       mutex_init(&adapter->int_mtx);
        init_completion(&adapter->int_waitq);
 
        adapter->this_id = DEFAULT_INITIATOR_ID;
 
 #define __MEGARAID_H__
 
 #include <linux/spinlock.h>
-
+#include <linux/mutex.h>
 
 #define MEGARAID_VERSION       \
        "v2.00.3 (Release Date: Wed Feb 19 08:51:30 EST 2003)\n"
 
        scb_t                   int_scb;
        Scsi_Cmnd               int_scmd;
-       struct semaphore        int_mtx;        /* To synchronize the internal
+       struct mutex            int_mtx;        /* To synchronize the internal
                                                commands */
        struct completion       int_waitq;      /* wait queue for internal
                                                 cmds */
 
 #include <asm/uaccess.h>
 #include <linux/fs.h>
 #include <linux/compat.h>
+#include <linux/mutex.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
 static int megasas_mgmt_majorno;
 static struct megasas_mgmt_info megasas_mgmt_info;
 static struct fasync_struct *megasas_async_queue;
-static DECLARE_MUTEX(megasas_async_queue_mutex);
+static DEFINE_MUTEX(megasas_async_queue_mutex);
 
 /**
  * megasas_get_cmd -   Get a command from the free pool
 {
        int rc;
 
-       down(&megasas_async_queue_mutex);
+       mutex_lock(&megasas_async_queue_mutex);
 
        rc = fasync_helper(fd, filep, mode, &megasas_async_queue);
 
-       up(&megasas_async_queue_mutex);
+       mutex_unlock(&megasas_async_queue_mutex);
 
        if (rc >= 0) {
                /* For sanity check when we get ioctl */
 
 #include <linux/interrupt.h>
 #include <linux/notifier.h>
 #include <linux/cpu.h>
+#include <linux/mutex.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
        .gfp_mask       = __GFP_DMA,
 };
 
-static DECLARE_MUTEX(host_cmd_pool_mutex);
+static DEFINE_MUTEX(host_cmd_pool_mutex);
 
 static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost,
                                            gfp_t gfp_mask)
         * Select a command slab for this host and create it if not
         * yet existant.
         */
-       down(&host_cmd_pool_mutex);
+       mutex_lock(&host_cmd_pool_mutex);
        pool = (shost->unchecked_isa_dma ? &scsi_cmd_dma_pool : &scsi_cmd_pool);
        if (!pool->users) {
                pool->slab = kmem_cache_create(pool->name,
 
        pool->users++;
        shost->cmd_pool = pool;
-       up(&host_cmd_pool_mutex);
+       mutex_unlock(&host_cmd_pool_mutex);
 
        /*
         * Get one backup command for this host.
                kmem_cache_destroy(pool->slab);
        return -ENOMEM;
  fail:
-       up(&host_cmd_pool_mutex);
+       mutex_unlock(&host_cmd_pool_mutex);
        return -ENOMEM;
 
 }
                kmem_cache_free(shost->cmd_pool->slab, cmd);
        }
 
-       down(&host_cmd_pool_mutex);
+       mutex_lock(&host_cmd_pool_mutex);
        if (!--shost->cmd_pool->users)
                kmem_cache_destroy(shost->cmd_pool->slab);
-       up(&host_cmd_pool_mutex);
+       mutex_unlock(&host_cmd_pool_mutex);
 }
 
 #ifdef CONFIG_SCSI_LOGGING
 
 #include <linux/errno.h>
 #include <linux/blkdev.h>
 #include <linux/seq_file.h>
+#include <linux/mutex.h>
 #include <asm/uaccess.h>
 
 #include <scsi/scsi.h>
 static struct proc_dir_entry *proc_scsi;
 
 /* Protect sht->present and sht->proc_dir */
-static DECLARE_MUTEX(global_host_template_sem);
+static DEFINE_MUTEX(global_host_template_mutex);
 
 static int proc_scsi_read(char *buffer, char **start, off_t offset,
                          int length, int *eof, void *data)
        if (!sht->proc_info)
                return;
 
-       down(&global_host_template_sem);
+       mutex_lock(&global_host_template_mutex);
        if (!sht->present++) {
                sht->proc_dir = proc_mkdir(sht->proc_name, proc_scsi);
                if (!sht->proc_dir)
                else
                        sht->proc_dir->owner = sht->module;
        }
-       up(&global_host_template_sem);
+       mutex_unlock(&global_host_template_mutex);
 }
 
 void scsi_proc_hostdir_rm(struct scsi_host_template *sht)
        if (!sht->proc_info)
                return;
 
-       down(&global_host_template_sem);
+       mutex_lock(&global_host_template_mutex);
        if (!--sht->present && sht->proc_dir) {
                remove_proc_entry(sht->proc_name, proc_scsi);
                sht->proc_dir = NULL;
        }
-       up(&global_host_template_sem);
+       mutex_unlock(&global_host_template_mutex);
 }
 
 void scsi_proc_host_add(struct Scsi_Host *shost)
 
                return ERR_PTR(-ENOMEM);
 
        get_device(&starget->dev);
-       down(&shost->scan_mutex);
+       mutex_lock(&shost->scan_mutex);
        if (scsi_host_scan_allowed(shost)) {
                res = scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1,
                                             hostdata);
                if (res != SCSI_SCAN_LUN_PRESENT)
                        sdev = ERR_PTR(-ENODEV);
        }
-       up(&shost->scan_mutex);
+       mutex_unlock(&shost->scan_mutex);
        scsi_target_reap(starget);
        put_device(&starget->dev);
 
 {
        struct Scsi_Host *shost = dev_to_shost(parent);
 
-       down(&shost->scan_mutex);
+       mutex_lock(&shost->scan_mutex);
        if (scsi_host_scan_allowed(shost))
                __scsi_scan_target(parent, channel, id, lun, rescan);
-       up(&shost->scan_mutex);
+       mutex_unlock(&shost->scan_mutex);
 }
 EXPORT_SYMBOL(scsi_scan_target);
 
            ((lun != SCAN_WILD_CARD) && (lun > shost->max_lun)))
                return -EINVAL;
 
-       down(&shost->scan_mutex);
+       mutex_lock(&shost->scan_mutex);
        if (scsi_host_scan_allowed(shost)) {
                if (channel == SCAN_WILD_CARD)
                        for (channel = 0; channel <= shost->max_channel;
                else
                        scsi_scan_channel(shost, channel, id, lun, rescan);
        }
-       up(&shost->scan_mutex);
+       mutex_unlock(&shost->scan_mutex);
 
        return 0;
 }
        struct scsi_device *sdev = NULL;
        struct scsi_target *starget;
 
-       down(&shost->scan_mutex);
+       mutex_lock(&shost->scan_mutex);
        if (!scsi_host_scan_allowed(shost))
                goto out;
        starget = scsi_alloc_target(&shost->shost_gendev, 0, shost->this_id);
        }
        put_device(&starget->dev);
  out:
-       up(&shost->scan_mutex);
+       mutex_unlock(&shost->scan_mutex);
        return sdev;
 }
 EXPORT_SYMBOL(scsi_get_host_dev);
 
 {
        struct Scsi_Host *shost = sdev->host;
 
-       down(&shost->scan_mutex);
+       mutex_lock(&shost->scan_mutex);
        __scsi_remove_device(sdev);
-       up(&shost->scan_mutex);
+       mutex_unlock(&shost->scan_mutex);
 }
 EXPORT_SYMBOL(scsi_remove_device);
 
 
 #include <linux/string.h>
 #include <linux/slab.h>
 #include <linux/mempool.h>
+#include <linux/mutex.h>
 #include <net/tcp.h>
 
 #include <scsi/scsi.h>
        struct list_head sessions;
        /*
         * lock to serialize access to the sessions list which must
-        * be taken after the rx_queue_sema
+        * be taken after the rx_queue_mutex
         */
        spinlock_t session_lock;
        /*
 /*
  * list of registered transports and lock that must
  * be held while accessing list. The iscsi_transport_lock must
- * be acquired after the rx_queue_sema.
+ * be acquired after the rx_queue_mutex.
  */
 static LIST_HEAD(iscsi_transports);
 static DEFINE_SPINLOCK(iscsi_transport_lock);
 
 static struct sock *nls;
 static int daemon_pid;
-static DECLARE_MUTEX(rx_queue_sema);
+static DEFINE_MUTEX(rx_queue_mutex);
 
 struct mempool_zone {
        mempool_t *pool;
 {
        struct sk_buff *skb;
 
-       down(&rx_queue_sema);
+       mutex_lock(&rx_queue_mutex);
        while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
                while (skb->len >= NLMSG_SPACE(0)) {
                        int err;
                }
                kfree_skb(skb);
        }
-       up(&rx_queue_sema);
+       mutex_unlock(&rx_queue_mutex);
 }
 
 /*
 
        BUG_ON(!tt);
 
-       down(&rx_queue_sema);
+       mutex_lock(&rx_queue_mutex);
 
        priv = iscsi_if_transport_lookup(tt);
        BUG_ON (!priv);
        spin_lock_irqsave(&priv->session_lock, flags);
        if (!list_empty(&priv->sessions)) {
                spin_unlock_irqrestore(&priv->session_lock, flags);
-               up(&rx_queue_sema);
+               mutex_unlock(&rx_queue_mutex);
                return -EPERM;
        }
        spin_unlock_irqrestore(&priv->session_lock, flags);
 
        sysfs_remove_group(&priv->cdev.kobj, &iscsi_transport_group);
        class_device_unregister(&priv->cdev);
-       up(&rx_queue_sema);
+       mutex_unlock(&rx_queue_mutex);
 
        return 0;
 }
 
 #include <linux/blkpg.h>
 #include <linux/kref.h>
 #include <linux/delay.h>
+#include <linux/mutex.h>
 #include <asm/uaccess.h>
 
 #include <scsi/scsi.h>
 /* This semaphore is used to mediate the 0->1 reference get in the
  * face of object destruction (i.e. we can't allow a get on an
  * object after last put) */
-static DECLARE_MUTEX(sd_ref_sem);
+static DEFINE_MUTEX(sd_ref_mutex);
 
 static int sd_revalidate_disk(struct gendisk *disk);
 static void sd_rw_intr(struct scsi_cmnd * SCpnt);
 {
        struct scsi_disk *sdkp;
 
-       down(&sd_ref_sem);
+       mutex_lock(&sd_ref_mutex);
        sdkp = __scsi_disk_get(disk);
-       up(&sd_ref_sem);
+       mutex_unlock(&sd_ref_mutex);
        return sdkp;
 }
 
 {
        struct scsi_disk *sdkp;
 
-       down(&sd_ref_sem);
+       mutex_lock(&sd_ref_mutex);
        sdkp = dev_get_drvdata(dev);
        if (sdkp)
                sdkp = __scsi_disk_get(sdkp->disk);
-       up(&sd_ref_sem);
+       mutex_unlock(&sd_ref_mutex);
        return sdkp;
 }
 
 {
        struct scsi_device *sdev = sdkp->device;
 
-       down(&sd_ref_sem);
+       mutex_lock(&sd_ref_mutex);
        kref_put(&sdkp->kref, scsi_disk_release);
        scsi_device_put(sdev);
-       up(&sd_ref_sem);
+       mutex_unlock(&sd_ref_mutex);
 }
 
 /**
        del_gendisk(sdkp->disk);
        sd_shutdown(dev);
 
-       down(&sd_ref_sem);
+       mutex_lock(&sd_ref_mutex);
        dev_set_drvdata(dev, NULL);
        kref_put(&sdkp->kref, scsi_disk_release);
-       up(&sd_ref_sem);
+       mutex_unlock(&sd_ref_mutex);
 
        return 0;
 }
  *     scsi_disk_release - Called to free the scsi_disk structure
  *     @kref: pointer to embedded kref
  *
- *     sd_ref_sem must be held entering this routine.  Because it is
+ *     sd_ref_mutex must be held entering this routine.  Because it is
  *     called on last put, you should always use the scsi_disk_get()
  *     scsi_disk_put() helpers which manipulate the semaphore directly
  *     and never do a direct kref_put().
 
 #include <linux/interrupt.h>
 #include <linux/init.h>
 #include <linux/blkdev.h>
+#include <linux/mutex.h>
 #include <asm/uaccess.h>
 
 #include <scsi/scsi.h>
 /* This semaphore is used to mediate the 0->1 reference get in the
  * face of object destruction (i.e. we can't allow a get on an
  * object after last put) */
-static DECLARE_MUTEX(sr_ref_sem);
+static DEFINE_MUTEX(sr_ref_mutex);
 
 static int sr_open(struct cdrom_device_info *, int);
 static void sr_release(struct cdrom_device_info *);
 {
        struct scsi_cd *cd = NULL;
 
-       down(&sr_ref_sem);
+       mutex_lock(&sr_ref_mutex);
        if (disk->private_data == NULL)
                goto out;
        cd = scsi_cd(disk);
        kref_put(&cd->kref, sr_kref_release);
        cd = NULL;
  out:
-       up(&sr_ref_sem);
+       mutex_unlock(&sr_ref_mutex);
        return cd;
 }
 
 {
        struct scsi_device *sdev = cd->device;
 
-       down(&sr_ref_sem);
+       mutex_lock(&sr_ref_mutex);
        kref_put(&cd->kref, sr_kref_release);
        scsi_device_put(sdev);
-       up(&sr_ref_sem);
+       mutex_unlock(&sr_ref_mutex);
 }
 
 /*
  *     sr_kref_release - Called to free the scsi_cd structure
  *     @kref: pointer to embedded kref
  *
- *     sr_ref_sem must be held entering this routine.  Because it is
+ *     sr_ref_mutex must be held entering this routine.  Because it is
  *     called on last put, you should always use the scsi_cd_get()
  *     scsi_cd_put() helpers which manipulate the semaphore directly
  *     and never do a direct kref_put().
 
        del_gendisk(cd->disk);
 
-       down(&sr_ref_sem);
+       mutex_lock(&sr_ref_mutex);
        kref_put(&cd->kref, sr_kref_release);
-       up(&sr_ref_sem);
+       mutex_unlock(&sr_ref_mutex);
 
        return 0;
 }
 
 #include <linux/devfs_fs_kernel.h>
 #include <linux/cdev.h>
 #include <linux/delay.h>
+#include <linux/mutex.h>
 
 #include <asm/uaccess.h>
 #include <asm/dma.h>
 
 #define to_scsi_tape(obj) container_of(obj, struct scsi_tape, kref)
 
-static DECLARE_MUTEX(st_ref_sem);
+static DEFINE_MUTEX(st_ref_mutex);
 
 \f
 #include "osst_detect.h"
 {
        struct scsi_tape *STp = NULL;
 
-       down(&st_ref_sem);
+       mutex_lock(&st_ref_mutex);
        write_lock(&st_dev_arr_lock);
 
        if (dev < st_dev_max && scsi_tapes != NULL)
        STp = NULL;
 out:
        write_unlock(&st_dev_arr_lock);
-       up(&st_ref_sem);
+       mutex_unlock(&st_ref_mutex);
        return STp;
 }
 
 {
        struct scsi_device *sdev = STp->device;
 
-       down(&st_ref_sem);
+       mutex_lock(&st_ref_mutex);
        kref_put(&STp->kref, scsi_tape_release);
        scsi_device_put(sdev);
-       up(&st_ref_sem);
+       mutex_unlock(&st_ref_mutex);
 }
 
 struct st_reject_data {
                                }
                        }
 
-                       down(&st_ref_sem);
+                       mutex_lock(&st_ref_mutex);
                        kref_put(&tpnt->kref, scsi_tape_release);
-                       up(&st_ref_sem);
+                       mutex_unlock(&st_ref_mutex);
                        return 0;
                }
        }
  *      scsi_tape_release - Called to free the Scsi_Tape structure
  *      @kref: pointer to embedded kref
  *
- *      st_ref_sem must be held entering this routine.  Because it is
+ *      st_ref_mutex must be held entering this routine.  Because it is
  *      called on last put, you should always use the scsi_tape_get()
  *      scsi_tape_put() helpers which manipulate the semaphore directly
  *      and never do a direct kref_put().
 
 #include <linux/list.h>
 #include <linux/types.h>
 #include <linux/workqueue.h>
+#include <linux/mutex.h>
 
 struct block_device;
 struct completion;
        spinlock_t              default_lock;
        spinlock_t              *host_lock;
 
-       struct semaphore        scan_mutex;/* serialize scanning activity */
+       struct mutex            scan_mutex;/* serialize scanning activity */
 
        struct list_head        eh_cmd_q;
        struct task_struct    * ehandler;  /* Error recovery thread. */