]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Manual merge of for-linus to upstream (fix conflicts in drivers/infiniband/core/ucm.c)
authorRoland Dreier <rolandd@cisco.com>
Mon, 24 Oct 2005 17:55:29 +0000 (10:55 -0700)
committerRoland Dreier <rolandd@cisco.com>
Mon, 24 Oct 2005 17:55:29 +0000 (10:55 -0700)
1  2 
drivers/infiniband/core/cm.c
drivers/infiniband/core/sa_query.c
drivers/infiniband/core/ucm.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/hw/mthca/mthca_eq.c

index 6f747debca90e4e7538c0340ec9e8bdb1eaf08f1,151ef83cc14f32bcddede4f9708ebc3d42eb5ee8..3fe6f4754fa8fe81adda9ab85b477ed0bb071816
@@@ -366,15 -366,9 +366,15 @@@ static struct cm_id_private * cm_insert
                cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
                                          service_node);
                if ((cur_cm_id_priv->id.service_mask & service_id) ==
 -                  (service_mask & cur_cm_id_priv->id.service_id))
 -                      return cm_id_priv;
 -              if (service_id < cur_cm_id_priv->id.service_id)
 +                  (service_mask & cur_cm_id_priv->id.service_id) &&
 +                  (cm_id_priv->id.device == cur_cm_id_priv->id.device))
 +                      return cur_cm_id_priv;
 +
 +              if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
 +                      link = &(*link)->rb_left;
 +              else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
 +                      link = &(*link)->rb_right;
 +              else if (service_id < cur_cm_id_priv->id.service_id)
                        link = &(*link)->rb_left;
                else
                        link = &(*link)->rb_right;
        return NULL;
  }
  
 -static struct cm_id_private * cm_find_listen(__be64 service_id)
 +static struct cm_id_private * cm_find_listen(struct ib_device *device,
 +                                           __be64 service_id)
  {
        struct rb_node *node = cm.listen_service_table.rb_node;
        struct cm_id_private *cm_id_priv;
        while (node) {
                cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
                if ((cm_id_priv->id.service_mask & service_id) ==
 -                  (cm_id_priv->id.service_mask & cm_id_priv->id.service_id))
 +                   cm_id_priv->id.service_id &&
 +                  (cm_id_priv->id.device == device))
                        return cm_id_priv;
 -              if (service_id < cm_id_priv->id.service_id)
 +
 +              if (device < cm_id_priv->id.device)
 +                      node = node->rb_left;
 +              else if (device > cm_id_priv->id.device)
 +                      node = node->rb_right;
 +              else if (service_id < cm_id_priv->id.service_id)
                        node = node->rb_left;
                else
                        node = node->rb_right;
@@@ -536,8 -523,7 +536,8 @@@ static void cm_reject_sidr_req(struct c
        ib_send_cm_sidr_rep(&cm_id_priv->id, &param);
  }
  
 -struct ib_cm_id *ib_create_cm_id(ib_cm_handler cm_handler,
 +struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
 +                               ib_cm_handler cm_handler,
                                 void *context)
  {
        struct cm_id_private *cm_id_priv;
  
        memset(cm_id_priv, 0, sizeof *cm_id_priv);
        cm_id_priv->id.state = IB_CM_IDLE;
 +      cm_id_priv->id.device = device;
        cm_id_priv->id.cm_handler = cm_handler;
        cm_id_priv->id.context = context;
        cm_id_priv->id.remote_cm_qpn = 1;
@@@ -1062,6 -1047,7 +1062,6 @@@ static void cm_format_req_event(struct 
        req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
        param = &work->cm_event.param.req_rcvd;
        param->listen_id = listen_id;
 -      param->device = cm_id_priv->av.port->mad_agent->device;
        param->port = cm_id_priv->av.port->port_num;
        param->primary_path = &work->path[0];
        if (req_msg->alt_local_lid)
@@@ -1240,8 -1226,7 +1240,8 @@@ static struct cm_id_private * cm_match_
        }
  
        /* Find matching listen request. */
 -      listen_cm_id_priv = cm_find_listen(req_msg->service_id);
 +      listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
 +                                         req_msg->service_id);
        if (!listen_cm_id_priv) {
                spin_unlock_irqrestore(&cm.lock, flags);
                cm_issue_rej(work->port, work->mad_recv_wc,
@@@ -1269,7 -1254,7 +1269,7 @@@ static int cm_req_handler(struct cm_wor
  
        req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
  
 -      cm_id = ib_create_cm_id(NULL, NULL);
 +      cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL);
        if (IS_ERR(cm_id))
                return PTR_ERR(cm_id);
  
@@@ -2644,6 -2629,7 +2644,6 @@@ static void cm_format_sidr_req_event(st
        param = &work->cm_event.param.sidr_req_rcvd;
        param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
        param->listen_id = listen_id;
 -      param->device = work->port->mad_agent->device;
        param->port = work->port->port_num;
        work->cm_event.private_data = &sidr_req_msg->private_data;
  }
@@@ -2656,7 -2642,7 +2656,7 @@@ static int cm_sidr_req_handler(struct c
        struct ib_wc *wc;
        unsigned long flags;
  
 -      cm_id = ib_create_cm_id(NULL, NULL);
 +      cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL);
        if (IS_ERR(cm_id))
                return PTR_ERR(cm_id);
        cm_id_priv = container_of(cm_id, struct cm_id_private, id);
                spin_unlock_irqrestore(&cm.lock, flags);
                goto out; /* Duplicate message. */
        }
 -      cur_cm_id_priv = cm_find_listen(sidr_req_msg->service_id);
 +      cur_cm_id_priv = cm_find_listen(cm_id->device,
 +                                      sidr_req_msg->service_id);
        if (!cur_cm_id_priv) {
                rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
                spin_unlock_irqrestore(&cm.lock, flags);
@@@ -3338,6 -3323,7 +3338,7 @@@ static void __exit ib_cm_cleanup(void
        flush_workqueue(cm.wq);
        destroy_workqueue(cm.wq);
        ib_unregister_client(&cm_client);
+       idr_destroy(&cm.local_id_table);
  }
  
  module_init(ib_cm_init);
index a2c4234ca8a36a903ac85613965e6378b85aeead,e215cf0478d666d66881f1145d9989d65076f3e2..0e5ef97f7637edc19df101b4afe21829f7f2f095
@@@ -583,16 -583,10 +583,16 @@@ int ib_sa_path_rec_get(struct ib_devic
  {
        struct ib_sa_path_query *query;
        struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
 -      struct ib_sa_port   *port   = &sa_dev->port[port_num - sa_dev->start_port];
 -      struct ib_mad_agent *agent  = port->agent;
 +      struct ib_sa_port   *port;
 +      struct ib_mad_agent *agent;
        int ret;
  
 +      if (!sa_dev)
 +              return -ENODEV;
 +
 +      port  = &sa_dev->port[port_num - sa_dev->start_port];
 +      agent = port->agent;
 +
        query = kmalloc(sizeof *query, gfp_mask);
        if (!query)
                return -ENOMEM;
@@@ -691,16 -685,10 +691,16 @@@ int ib_sa_service_rec_query(struct ib_d
  {
        struct ib_sa_service_query *query;
        struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
 -      struct ib_sa_port   *port   = &sa_dev->port[port_num - sa_dev->start_port];
 -      struct ib_mad_agent *agent  = port->agent;
 +      struct ib_sa_port   *port;
 +      struct ib_mad_agent *agent;
        int ret;
  
 +      if (!sa_dev)
 +              return -ENODEV;
 +
 +      port  = &sa_dev->port[port_num - sa_dev->start_port];
 +      agent = port->agent;
 +
        if (method != IB_MGMT_METHOD_GET &&
            method != IB_MGMT_METHOD_SET &&
            method != IB_SA_METHOD_DELETE)
@@@ -780,16 -768,10 +780,16 @@@ int ib_sa_mcmember_rec_query(struct ib_
  {
        struct ib_sa_mcmember_query *query;
        struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
 -      struct ib_sa_port   *port   = &sa_dev->port[port_num - sa_dev->start_port];
 -      struct ib_mad_agent *agent  = port->agent;
 +      struct ib_sa_port   *port;
 +      struct ib_mad_agent *agent;
        int ret;
  
 +      if (!sa_dev)
 +              return -ENODEV;
 +
 +      port  = &sa_dev->port[port_num - sa_dev->start_port];
 +      agent = port->agent;
 +
        query = kmalloc(sizeof *query, gfp_mask);
        if (!query)
                return -ENOMEM;
@@@ -993,6 -975,7 +993,7 @@@ static int __init ib_sa_init(void
  static void __exit ib_sa_cleanup(void)
  {
        ib_unregister_client(&sa_client);
+       idr_destroy(&query_idr);
  }
  
  module_init(ib_sa_init);
index 02ca642089bdac7c760f532626cbadd3e25f5963,5a6ba4030d44f60869b9789a47ab2e61a9d0629d..28477565ecba8df54012a0562cee7a0eec78b6ef
  #include <linux/file.h>
  #include <linux/mount.h>
  #include <linux/cdev.h>
 +#include <linux/idr.h>
  
  #include <asm/uaccess.h>
  
 -#include "ucm.h"
 +#include <rdma/ib_cm.h>
 +#include <rdma/ib_user_cm.h>
  
  MODULE_AUTHOR("Libor Michalek");
  MODULE_DESCRIPTION("InfiniBand userspace Connection Manager access");
  MODULE_LICENSE("Dual BSD/GPL");
  
 -static int ucm_debug_level;
 +struct ib_ucm_device {
 +      int                     devnum;
 +      struct cdev             dev;
 +      struct class_device     class_dev;
 +      struct ib_device        *ib_dev;
 +};
 +
 +struct ib_ucm_file {
 +      struct semaphore mutex;
 +      struct file *filp;
 +      struct ib_ucm_device *device;
 +
 +      struct list_head  ctxs;
 +      struct list_head  events;
 +      wait_queue_head_t poll_wait;
 +};
 +
 +struct ib_ucm_context {
 +      int                 id;
 +      wait_queue_head_t   wait;
 +      atomic_t            ref;
 +      int                 events_reported;
 +
 +      struct ib_ucm_file *file;
 +      struct ib_cm_id    *cm_id;
 +      __u64              uid;
 +
 +      struct list_head    events;    /* list of pending events. */
 +      struct list_head    file_list; /* member in file ctx list */
 +};
 +
 +struct ib_ucm_event {
 +      struct ib_ucm_context *ctx;
 +      struct list_head file_list; /* member in file event list */
 +      struct list_head ctx_list;  /* member in ctx event list */
  
 -module_param_named(debug_level, ucm_debug_level, int, 0644);
 -MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
 +      struct ib_cm_id *cm_id;
 +      struct ib_ucm_event_resp resp;
 +      void *data;
 +      void *info;
 +      int data_len;
 +      int info_len;
 +};
  
  enum {
        IB_UCM_MAJOR = 231,
 -      IB_UCM_MINOR = 255
 +      IB_UCM_BASE_MINOR = 224,
 +      IB_UCM_MAX_DEVICES = 32
  };
  
 -#define IB_UCM_DEV MKDEV(IB_UCM_MAJOR, IB_UCM_MINOR)
 +#define IB_UCM_BASE_DEV MKDEV(IB_UCM_MAJOR, IB_UCM_BASE_MINOR)
  
 -#define PFX "UCM: "
 +static void ib_ucm_add_one(struct ib_device *device);
 +static void ib_ucm_remove_one(struct ib_device *device);
  
 -#define ucm_dbg(format, arg...)                       \
 -      do {                                    \
 -              if (ucm_debug_level > 0)        \
 -                      printk(KERN_DEBUG PFX format, ## arg); \
 -      } while (0)
 +static struct ib_client ucm_client = {
 +      .name   = "ucm",
 +      .add    = ib_ucm_add_one,
 +      .remove = ib_ucm_remove_one
 +};
  
 -static struct semaphore ctx_id_mutex;
 -static struct idr       ctx_id_table;
 +static DECLARE_MUTEX(ctx_id_mutex);
 +static DEFINE_IDR(ctx_id_table);
 +static DECLARE_BITMAP(dev_map, IB_UCM_MAX_DEVICES);
  
  static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id)
  {
@@@ -196,13 -152,17 +196,13 @@@ static struct ib_ucm_context *ib_ucm_ct
                goto error;
  
        list_add_tail(&ctx->file_list, &file->ctxs);
 -      ucm_dbg("Allocated CM ID <%d>\n", ctx->id);
        return ctx;
  
  error:
        kfree(ctx);
        return NULL;
  }
 -/*
 - * Event portion of the API, handle CM events
 - * and allow event polling.
 - */
 +
  static void ib_ucm_event_path_get(struct ib_ucm_path_rec *upath,
                                  struct ib_sa_path_rec  *kpath)
  {
@@@ -249,7 -209,6 +249,7 @@@ static void ib_ucm_event_req_get(struc
        ureq->retry_count                = kreq->retry_count;
        ureq->rnr_retry_count            = kreq->rnr_retry_count;
        ureq->srq                        = kreq->srq;
 +      ureq->port                       = kreq->port;
  
        ib_ucm_event_path_get(&ureq->primary_path, kreq->primary_path);
        ib_ucm_event_path_get(&ureq->alternate_path, kreq->alternate_path);
@@@ -336,8 -295,6 +336,8 @@@ static int ib_ucm_event_process(struct 
        case IB_CM_SIDR_REQ_RECEIVED:
                uvt->resp.u.sidr_req_resp.pkey = 
                                        evt->param.sidr_req_rcvd.pkey;
 +              uvt->resp.u.sidr_req_resp.port = 
 +                                      evt->param.sidr_req_rcvd.port;
                uvt->data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE;
                break;
        case IB_CM_SIDR_REP_RECEIVED:
@@@ -430,7 -387,9 +430,7 @@@ static ssize_t ib_ucm_event(struct ib_u
  
        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
                return -EFAULT;
 -      /*
 -       * wait
 -       */
 +
        down(&file->mutex);
        while (list_empty(&file->events)) {
  
@@@ -512,6 -471,7 +512,6 @@@ done
        return result;
  }
  
 -
  static ssize_t ib_ucm_create_id(struct ib_ucm_file *file,
                                const char __user *inbuf,
                                int in_len, int out_len)
                return -ENOMEM;
  
        ctx->uid = cmd.uid;
 -      ctx->cm_id = ib_create_cm_id(ib_ucm_event_handler, ctx);
 +      ctx->cm_id = ib_create_cm_id(file->device->ib_dev,
 +                                   ib_ucm_event_handler, ctx);
        if (IS_ERR(ctx->cm_id)) {
                result = PTR_ERR(ctx->cm_id);
 -              goto err;
 +              goto err1;
        }
  
        resp.id = ctx->id;
        if (copy_to_user((void __user *)(unsigned long)cmd.response,
                         &resp, sizeof(resp))) {
                result = -EFAULT;
 -              goto err;
 +              goto err2;
        }
 -
        return 0;
  
 -err:
 +err2:
 +      ib_destroy_cm_id(ctx->cm_id);
 +err1:
        down(&ctx_id_mutex);
        idr_remove(&ctx_id_table, ctx->id);
        up(&ctx_id_mutex);
 -
 -      if (!IS_ERR(ctx->cm_id))
 -              ib_destroy_cm_id(ctx->cm_id);
 -
        kfree(ctx);
        return result;
  }
@@@ -1222,6 -1184,9 +1222,6 @@@ static ssize_t ib_ucm_write(struct fil
        if (copy_from_user(&hdr, buf, sizeof(hdr)))
                return -EFAULT;
  
 -      ucm_dbg("Write. cmd <%d> in <%d> out <%d> len <%Zu>\n",
 -              hdr.cmd, hdr.in, hdr.out, len);
 -
        if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucm_cmd_table))
                return -EINVAL;
  
@@@ -1266,7 -1231,8 +1266,7 @@@ static int ib_ucm_open(struct inode *in
  
        filp->private_data = file;
        file->filp = filp;
 -
 -      ucm_dbg("Created struct\n");
 +      file->device = container_of(inode->i_cdev, struct ib_ucm_device, dev);
  
        return 0;
  }
@@@ -1297,17 -1263,7 +1297,17 @@@ static int ib_ucm_close(struct inode *i
        return 0;
  }
  
 -static struct file_operations ib_ucm_fops = {
 +static void ib_ucm_release_class_dev(struct class_device *class_dev)
 +{
 +      struct ib_ucm_device *dev;
 +
 +      dev = container_of(class_dev, struct ib_ucm_device, class_dev);
 +      cdev_del(&dev->dev);
 +      clear_bit(dev->devnum, dev_map);
 +      kfree(dev);
 +}
 +
 +static struct file_operations ucm_fops = {
        .owner   = THIS_MODULE,
        .open    = ib_ucm_open,
        .release = ib_ucm_close,
        .poll    = ib_ucm_poll,
  };
  
 +static struct class ucm_class = {
 +      .name    = "infiniband_cm",
 +      .release = ib_ucm_release_class_dev
 +};
  
 -static struct class *ib_ucm_class;
 -static struct cdev      ib_ucm_cdev;
 +static ssize_t show_dev(struct class_device *class_dev, char *buf)
 +{
 +      struct ib_ucm_device *dev;
 +      
 +      dev = container_of(class_dev, struct ib_ucm_device, class_dev);
 +      return print_dev_t(buf, dev->dev.dev);
 +}
 +static CLASS_DEVICE_ATTR(dev, S_IRUGO, show_dev, NULL);
  
 -static int __init ib_ucm_init(void)
 +static ssize_t show_ibdev(struct class_device *class_dev, char *buf)
  {
 -      int result;
 +      struct ib_ucm_device *dev;
 +      
 +      dev = container_of(class_dev, struct ib_ucm_device, class_dev);
 +      return sprintf(buf, "%s\n", dev->ib_dev->name);
 +}
 +static CLASS_DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
  
 -      result = register_chrdev_region(IB_UCM_DEV, 1, "infiniband_cm");
 -      if (result) {
 -              ucm_dbg("Error <%d> registering dev\n", result);
 -              goto err_chr;
 -      }
 +static void ib_ucm_add_one(struct ib_device *device)
 +{
 +      struct ib_ucm_device *ucm_dev;
 +
 +      if (!device->alloc_ucontext)
 +              return;
 +
 +      ucm_dev = kmalloc(sizeof *ucm_dev, GFP_KERNEL);
 +      if (!ucm_dev)
 +              return;
  
 -      cdev_init(&ib_ucm_cdev, &ib_ucm_fops);
 +      memset(ucm_dev, 0, sizeof *ucm_dev);
 +      ucm_dev->ib_dev = device;
 +
 +      ucm_dev->devnum = find_first_zero_bit(dev_map, IB_UCM_MAX_DEVICES);
 +      if (ucm_dev->devnum >= IB_UCM_MAX_DEVICES)
 +              goto err;
 +
 +      set_bit(ucm_dev->devnum, dev_map);
 +
 +      cdev_init(&ucm_dev->dev, &ucm_fops);
 +      ucm_dev->dev.owner = THIS_MODULE;
 +      kobject_set_name(&ucm_dev->dev.kobj, "ucm%d", ucm_dev->devnum);
 +      if (cdev_add(&ucm_dev->dev, IB_UCM_BASE_DEV + ucm_dev->devnum, 1))
 +              goto err;
  
 -      result = cdev_add(&ib_ucm_cdev, IB_UCM_DEV, 1);
 -      if (result) {
 -              ucm_dbg("Error <%d> adding cdev\n", result);
 +      ucm_dev->class_dev.class = &ucm_class;
 +      ucm_dev->class_dev.dev = device->dma_device;
 +      snprintf(ucm_dev->class_dev.class_id, BUS_ID_SIZE, "ucm%d",
 +               ucm_dev->devnum);
 +      if (class_device_register(&ucm_dev->class_dev))
                goto err_cdev;
 -      }
  
 -      ib_ucm_class = class_create(THIS_MODULE, "infiniband_cm");
 -      if (IS_ERR(ib_ucm_class)) {
 -              result = PTR_ERR(ib_ucm_class);
 -              ucm_dbg("Error <%d> creating class\n", result);
 +      if (class_device_create_file(&ucm_dev->class_dev,
 +                                   &class_device_attr_dev))
 +              goto err_class;
 +      if (class_device_create_file(&ucm_dev->class_dev,
 +                                   &class_device_attr_ibdev))
                goto err_class;
 +
 +      ib_set_client_data(device, &ucm_client, ucm_dev);
 +      return;
 +
 +err_class:
 +      class_device_unregister(&ucm_dev->class_dev);
 +err_cdev:
 +      cdev_del(&ucm_dev->dev);
 +      clear_bit(ucm_dev->devnum, dev_map);
 +err:
 +      kfree(ucm_dev);
 +      return;
 +}
 +
 +static void ib_ucm_remove_one(struct ib_device *device)
 +{
 +      struct ib_ucm_device *ucm_dev = ib_get_client_data(device, &ucm_client);
 +
 +      if (!ucm_dev)
 +              return;
 +
 +      class_device_unregister(&ucm_dev->class_dev);
 +}
 +
 +static ssize_t show_abi_version(struct class *class, char *buf)
 +{
 +      return sprintf(buf, "%d\n", IB_USER_CM_ABI_VERSION);
 +}
 +static CLASS_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
 +
 +static int __init ib_ucm_init(void)
 +{
 +      int ret;
 +
 +      ret = register_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES,
 +                                   "infiniband_cm");
 +      if (ret) {
 +              printk(KERN_ERR "ucm: couldn't register device number\n");
 +              goto err;
        }
  
 -      class_device_create(ib_ucm_class, IB_UCM_DEV, NULL, "ucm");
 +      ret = class_register(&ucm_class);
 +      if (ret) {
 +              printk(KERN_ERR "ucm: couldn't create class infiniband_cm\n");
 +              goto err_chrdev;
 +      }
  
 -      idr_init(&ctx_id_table);
 -      init_MUTEX(&ctx_id_mutex);
 +      ret = class_create_file(&ucm_class, &class_attr_abi_version);
 +      if (ret) {
 +              printk(KERN_ERR "ucm: couldn't create abi_version attribute\n");
 +              goto err_class;
 +      }
  
 +      ret = ib_register_client(&ucm_client);
 +      if (ret) {
 +              printk(KERN_ERR "ucm: couldn't register client\n");
 +              goto err_class;
 +      }
        return 0;
 +
  err_class:
 -      cdev_del(&ib_ucm_cdev);
 -err_cdev:
 -      unregister_chrdev_region(IB_UCM_DEV, 1);
 -err_chr:
 -      return result;
 +      class_unregister(&ucm_class);
 +err_chrdev:
 +      unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES);
 +err:
 +      return ret;
  }
  
  static void __exit ib_ucm_cleanup(void)
  {
 -      class_device_destroy(ib_ucm_class, IB_UCM_DEV);
 -      class_destroy(ib_ucm_class);
 -      cdev_del(&ib_ucm_cdev);
 -      unregister_chrdev_region(IB_UCM_DEV, 1);
 +      ib_unregister_client(&ucm_client);
 +      class_unregister(&ucm_class);
 +      unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES);
+       idr_destroy(&ctx_id_table);
  }
  
  module_init(ib_ucm_init);
index ef2312a9ea76f75f5335a634e656afbe75f56afb,add45f7faa5b6d3267eee920bf9a8d2e02e935e0..251c752a7ae6ec1a815d644826d654121795284a
@@@ -3,7 -3,6 +3,7 @@@
   * Copyright (c) 2005 Cisco Systems.  All rights reserved.
   * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
   * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
 + * Copyright (c) 2005 PathScale, Inc. All rights reserved.
   *
   * This software is available to you under a choice of one of two
   * licenses.  You may choose to be licensed under the terms of the GNU
@@@ -78,31 -77,26 +78,31 @@@ static DECLARE_BITMAP(dev_map, IB_UVERB
  static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
                                     const char __user *buf, int in_len,
                                     int out_len) = {
 -      [IB_USER_VERBS_CMD_QUERY_PARAMS]  = ib_uverbs_query_params,
 -      [IB_USER_VERBS_CMD_GET_CONTEXT]   = ib_uverbs_get_context,
 -      [IB_USER_VERBS_CMD_QUERY_DEVICE]  = ib_uverbs_query_device,
 -      [IB_USER_VERBS_CMD_QUERY_PORT]    = ib_uverbs_query_port,
 -      [IB_USER_VERBS_CMD_QUERY_GID]     = ib_uverbs_query_gid,
 -      [IB_USER_VERBS_CMD_QUERY_PKEY]    = ib_uverbs_query_pkey,
 -      [IB_USER_VERBS_CMD_ALLOC_PD]      = ib_uverbs_alloc_pd,
 -      [IB_USER_VERBS_CMD_DEALLOC_PD]    = ib_uverbs_dealloc_pd,
 -      [IB_USER_VERBS_CMD_REG_MR]        = ib_uverbs_reg_mr,
 -      [IB_USER_VERBS_CMD_DEREG_MR]      = ib_uverbs_dereg_mr,
 -      [IB_USER_VERBS_CMD_CREATE_CQ]     = ib_uverbs_create_cq,
 -      [IB_USER_VERBS_CMD_DESTROY_CQ]    = ib_uverbs_destroy_cq,
 -      [IB_USER_VERBS_CMD_CREATE_QP]     = ib_uverbs_create_qp,
 -      [IB_USER_VERBS_CMD_MODIFY_QP]     = ib_uverbs_modify_qp,
 -      [IB_USER_VERBS_CMD_DESTROY_QP]    = ib_uverbs_destroy_qp,
 -      [IB_USER_VERBS_CMD_ATTACH_MCAST]  = ib_uverbs_attach_mcast,
 -      [IB_USER_VERBS_CMD_DETACH_MCAST]  = ib_uverbs_detach_mcast,
 -      [IB_USER_VERBS_CMD_CREATE_SRQ]    = ib_uverbs_create_srq,
 -      [IB_USER_VERBS_CMD_MODIFY_SRQ]    = ib_uverbs_modify_srq,
 -      [IB_USER_VERBS_CMD_DESTROY_SRQ]   = ib_uverbs_destroy_srq,
 +      [IB_USER_VERBS_CMD_GET_CONTEXT]         = ib_uverbs_get_context,
 +      [IB_USER_VERBS_CMD_QUERY_DEVICE]        = ib_uverbs_query_device,
 +      [IB_USER_VERBS_CMD_QUERY_PORT]          = ib_uverbs_query_port,
 +      [IB_USER_VERBS_CMD_ALLOC_PD]            = ib_uverbs_alloc_pd,
 +      [IB_USER_VERBS_CMD_DEALLOC_PD]          = ib_uverbs_dealloc_pd,
 +      [IB_USER_VERBS_CMD_REG_MR]              = ib_uverbs_reg_mr,
 +      [IB_USER_VERBS_CMD_DEREG_MR]            = ib_uverbs_dereg_mr,
 +      [IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL] = ib_uverbs_create_comp_channel,
 +      [IB_USER_VERBS_CMD_CREATE_CQ]           = ib_uverbs_create_cq,
 +      [IB_USER_VERBS_CMD_POLL_CQ]             = ib_uverbs_poll_cq,
 +      [IB_USER_VERBS_CMD_REQ_NOTIFY_CQ]       = ib_uverbs_req_notify_cq,
 +      [IB_USER_VERBS_CMD_DESTROY_CQ]          = ib_uverbs_destroy_cq,
 +      [IB_USER_VERBS_CMD_CREATE_QP]           = ib_uverbs_create_qp,
 +      [IB_USER_VERBS_CMD_MODIFY_QP]           = ib_uverbs_modify_qp,
 +      [IB_USER_VERBS_CMD_DESTROY_QP]          = ib_uverbs_destroy_qp,
 +      [IB_USER_VERBS_CMD_POST_SEND]           = ib_uverbs_post_send,
 +      [IB_USER_VERBS_CMD_POST_RECV]           = ib_uverbs_post_recv,
 +      [IB_USER_VERBS_CMD_POST_SRQ_RECV]       = ib_uverbs_post_srq_recv,
 +      [IB_USER_VERBS_CMD_CREATE_AH]           = ib_uverbs_create_ah,
 +      [IB_USER_VERBS_CMD_DESTROY_AH]          = ib_uverbs_destroy_ah,
 +      [IB_USER_VERBS_CMD_ATTACH_MCAST]        = ib_uverbs_attach_mcast,
 +      [IB_USER_VERBS_CMD_DETACH_MCAST]        = ib_uverbs_detach_mcast,
 +      [IB_USER_VERBS_CMD_CREATE_SRQ]          = ib_uverbs_create_srq,
 +      [IB_USER_VERBS_CMD_MODIFY_SRQ]          = ib_uverbs_modify_srq,
 +      [IB_USER_VERBS_CMD_DESTROY_SRQ]         = ib_uverbs_destroy_srq,
  };
  
  static struct vfsmount *uverbs_event_mnt;
@@@ -119,13 -113,7 +119,13 @@@ static int ib_dealloc_ucontext(struct i
  
        down(&ib_uverbs_idr_mutex);
  
 -      /* XXX Free AHs */
 +      list_for_each_entry_safe(uobj, tmp, &context->ah_list, list) {
 +              struct ib_ah *ah = idr_find(&ib_uverbs_ah_idr, uobj->id);
 +              idr_remove(&ib_uverbs_ah_idr, uobj->id);
 +              ib_destroy_ah(ah);
 +              list_del(&uobj->list);
 +              kfree(uobj);
 +      }
  
        list_for_each_entry_safe(uobj, tmp, &context->qp_list, list) {
                struct ib_qp *qp = idr_find(&ib_uverbs_qp_idr, uobj->id);
@@@ -200,19 -188,25 +200,19 @@@ static ssize_t ib_uverbs_event_read(str
  
        spin_lock_irq(&file->lock);
  
 -      while (list_empty(&file->event_list) && file->fd >= 0) {
 +      while (list_empty(&file->event_list)) {
                spin_unlock_irq(&file->lock);
  
                if (filp->f_flags & O_NONBLOCK)
                        return -EAGAIN;
  
                if (wait_event_interruptible(file->poll_wait,
 -                                           !list_empty(&file->event_list) ||
 -                                           file->fd < 0))
 +                                           !list_empty(&file->event_list)))
                        return -ERESTARTSYS;
  
                spin_lock_irq(&file->lock);
        }
  
 -      if (file->fd < 0) {
 -              spin_unlock_irq(&file->lock);
 -              return -ENODEV;
 -      }
 -
        event = list_entry(file->event_list.next, struct ib_uverbs_event, list);
  
        if (file->is_async)
@@@ -254,19 -248,26 +254,19 @@@ static unsigned int ib_uverbs_event_pol
        poll_wait(filp, &file->poll_wait, wait);
  
        spin_lock_irq(&file->lock);
 -      if (file->fd < 0)
 -              pollflags = POLLERR;
 -      else if (!list_empty(&file->event_list))
 +      if (!list_empty(&file->event_list))
                pollflags = POLLIN | POLLRDNORM;
        spin_unlock_irq(&file->lock);
  
        return pollflags;
  }
  
 -static void ib_uverbs_event_release(struct ib_uverbs_event_file *file)
 +void ib_uverbs_release_event_file(struct kref *ref)
  {
 -      struct ib_uverbs_event *entry, *tmp;
 +      struct ib_uverbs_event_file *file =
 +              container_of(ref, struct ib_uverbs_event_file, ref);
  
 -      spin_lock_irq(&file->lock);
 -      if (file->fd != -1) {
 -              file->fd = -1;
 -              list_for_each_entry_safe(entry, tmp, &file->event_list, list)
 -                      kfree(entry);
 -      }
 -      spin_unlock_irq(&file->lock);
 +      kfree(file);
  }
  
  static int ib_uverbs_event_fasync(int fd, struct file *filp, int on)
  static int ib_uverbs_event_close(struct inode *inode, struct file *filp)
  {
        struct ib_uverbs_event_file *file = filp->private_data;
 +      struct ib_uverbs_event *entry, *tmp;
 +
 +      spin_lock_irq(&file->lock);
 +      file->file = NULL;
 +      list_for_each_entry_safe(entry, tmp, &file->event_list, list) {
 +              if (entry->counter)
 +                      list_del(&entry->obj_list);
 +              kfree(entry);
 +      }
 +      spin_unlock_irq(&file->lock);
  
 -      ib_uverbs_event_release(file);
        ib_uverbs_event_fasync(-1, filp, 0);
 -      kref_put(&file->uverbs_file->ref, ib_uverbs_release_file);
 +
 +      if (file->is_async) {
 +              ib_unregister_event_handler(&file->uverbs_file->event_handler);
 +              kref_put(&file->uverbs_file->ref, ib_uverbs_release_file);
 +      }
 +      kref_put(&file->ref, ib_uverbs_release_event_file);
  
        return 0;
  }
  
  static struct file_operations uverbs_event_fops = {
 -      /*
 -       * No .owner field since we artificially create event files,
 -       * so there is no increment to the module reference count in
 -       * the open path.  All event files come from a uverbs command
 -       * file, which already takes a module reference, so this is OK.
 -       */
 +      .owner   = THIS_MODULE,
        .read    = ib_uverbs_event_read,
        .poll    = ib_uverbs_event_poll,
        .release = ib_uverbs_event_close,
  
  void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
  {
 -      struct ib_uverbs_file  *file = cq_context;
 -      struct ib_ucq_object   *uobj;
 -      struct ib_uverbs_event *entry;
 -      unsigned long           flags;
 +      struct ib_uverbs_event_file    *file = cq_context;
 +      struct ib_ucq_object           *uobj;
 +      struct ib_uverbs_event         *entry;
 +      unsigned long                   flags;
 +
 +      if (!file)
 +              return;
 +
 +      spin_lock_irqsave(&file->lock, flags);
 +      if (!file->file) {
 +              spin_unlock_irqrestore(&file->lock, flags);
 +              return;
 +      }
  
        entry = kmalloc(sizeof *entry, GFP_ATOMIC);
 -      if (!entry)
 +      if (!entry) {
 +              spin_unlock_irqrestore(&file->lock, flags);
                return;
 +      }
  
        uobj = container_of(cq->uobject, struct ib_ucq_object, uobject);
  
        entry->desc.comp.cq_handle = cq->uobject->user_handle;
        entry->counter             = &uobj->comp_events_reported;
  
 -      spin_lock_irqsave(&file->comp_file[0].lock, flags);
 -      list_add_tail(&entry->list, &file->comp_file[0].event_list);
 +      list_add_tail(&entry->list, &file->event_list);
        list_add_tail(&entry->obj_list, &uobj->comp_list);
 -      spin_unlock_irqrestore(&file->comp_file[0].lock, flags);
 +      spin_unlock_irqrestore(&file->lock, flags);
  
 -      wake_up_interruptible(&file->comp_file[0].poll_wait);
 -      kill_fasync(&file->comp_file[0].async_queue, SIGIO, POLL_IN);
 +      wake_up_interruptible(&file->poll_wait);
 +      kill_fasync(&file->async_queue, SIGIO, POLL_IN);
  }
  
  static void ib_uverbs_async_handler(struct ib_uverbs_file *file,
        struct ib_uverbs_event *entry;
        unsigned long flags;
  
 +      spin_lock_irqsave(&file->async_file->lock, flags);
 +      if (!file->async_file->file) {
 +              spin_unlock_irqrestore(&file->async_file->lock, flags);
 +              return;
 +      }
 +
        entry = kmalloc(sizeof *entry, GFP_ATOMIC);
 -      if (!entry)
 +      if (!entry) {
 +              spin_unlock_irqrestore(&file->async_file->lock, flags);
                return;
 +      }
  
        entry->desc.async.element    = element;
        entry->desc.async.event_type = event;
        entry->counter               = counter;
  
 -      spin_lock_irqsave(&file->async_file.lock, flags);
 -      list_add_tail(&entry->list, &file->async_file.event_list);
 +      list_add_tail(&entry->list, &file->async_file->event_list);
        if (obj_list)
                list_add_tail(&entry->obj_list, obj_list);
 -      spin_unlock_irqrestore(&file->async_file.lock, flags);
 +      spin_unlock_irqrestore(&file->async_file->lock, flags);
  
 -      wake_up_interruptible(&file->async_file.poll_wait);
 -      kill_fasync(&file->async_file.async_queue, SIGIO, POLL_IN);
 +      wake_up_interruptible(&file->async_file->poll_wait);
 +      kill_fasync(&file->async_file->async_queue, SIGIO, POLL_IN);
  }
  
  void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr)
  {
 +      struct ib_uverbs_event_file *ev_file = context_ptr;
        struct ib_ucq_object *uobj;
  
        uobj = container_of(event->element.cq->uobject,
                            struct ib_ucq_object, uobject);
  
 -      ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
 +      ib_uverbs_async_handler(ev_file->uverbs_file, uobj->uobject.user_handle,
                                event->event, &uobj->async_list,
                                &uobj->async_events_reported);
                                
@@@ -415,8 -389,8 +415,8 @@@ void ib_uverbs_srq_event_handler(struc
                                &uobj->events_reported);
  }
  
 -static void ib_uverbs_event_handler(struct ib_event_handler *handler,
 -                                  struct ib_event *event)
 +void ib_uverbs_event_handler(struct ib_event_handler *handler,
 +                           struct ib_event *event)
  {
        struct ib_uverbs_file *file =
                container_of(handler, struct ib_uverbs_file, event_handler);
                                NULL, NULL);
  }
  
 -static int ib_uverbs_event_init(struct ib_uverbs_event_file *file,
 -                              struct ib_uverbs_file *uverbs_file)
 +struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
 +                                      int is_async, int *fd)
  {
 +      struct ib_uverbs_event_file *ev_file;
        struct file *filp;
 +      int ret;
  
 -      spin_lock_init(&file->lock);
 -      INIT_LIST_HEAD(&file->event_list);
 -      init_waitqueue_head(&file->poll_wait);
 -      file->uverbs_file = uverbs_file;
 -      file->async_queue = NULL;
 -
 -      file->fd = get_unused_fd();
 -      if (file->fd < 0)
 -              return file->fd;
 +      ev_file = kmalloc(sizeof *ev_file, GFP_KERNEL);
 +      if (!ev_file)
 +              return ERR_PTR(-ENOMEM);
 +
 +      kref_init(&ev_file->ref);
 +      spin_lock_init(&ev_file->lock);
 +      INIT_LIST_HEAD(&ev_file->event_list);
 +      init_waitqueue_head(&ev_file->poll_wait);
 +      ev_file->uverbs_file = uverbs_file;
 +      ev_file->async_queue = NULL;
 +      ev_file->is_async    = is_async;
 +
 +      *fd = get_unused_fd();
 +      if (*fd < 0) {
 +              ret = *fd;
 +              goto err;
 +      }
  
        filp = get_empty_filp();
        if (!filp) {
 -              put_unused_fd(file->fd);
 -              return -ENFILE;
 +              ret = -ENFILE;
 +              goto err_fd;
        }
  
 -      filp->f_op         = &uverbs_event_fops;
 +      ev_file->file      = filp;
 +
 +      /*
 +       * fops_get() can't fail here, because we're coming from a
 +       * system call on a uverbs file, which will already have a
 +       * module reference.
 +       */
 +      filp->f_op         = fops_get(&uverbs_event_fops);
        filp->f_vfsmnt     = mntget(uverbs_event_mnt);
        filp->f_dentry     = dget(uverbs_event_mnt->mnt_root);
        filp->f_mapping    = filp->f_dentry->d_inode->i_mapping;
        filp->f_flags      = O_RDONLY;
        filp->f_mode       = FMODE_READ;
 -      filp->private_data = file;
 +      filp->private_data = ev_file;
  
 -      fd_install(file->fd, filp);
 +      return filp;
  
 -      return 0;
 +err_fd:
 +      put_unused_fd(*fd);
 +
 +err:
 +      kfree(ev_file);
 +      return ERR_PTR(ret);
 +}
 +
 +/*
 + * Look up a completion event file by FD.  If lookup is successful,
 + * takes a ref to the event file struct that it returns; if
 + * unsuccessful, returns NULL.
 + */
 +struct ib_uverbs_event_file *ib_uverbs_lookup_comp_file(int fd)
 +{
 +      struct ib_uverbs_event_file *ev_file = NULL;
 +      struct file *filp;
 +
 +      filp = fget(fd);
 +      if (!filp)
 +              return NULL;
 +
 +      if (filp->f_op != &uverbs_event_fops)
 +              goto out;
 +
 +      ev_file = filp->private_data;
 +      if (ev_file->is_async) {
 +              ev_file = NULL;
 +              goto out;
 +      }
 +
 +      kref_get(&ev_file->ref);
 +
 +out:
 +      fput(filp);
 +      return ev_file;
  }
  
  static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
  
        if (hdr.command < 0                             ||
            hdr.command >= ARRAY_SIZE(uverbs_cmd_table) ||
 -          !uverbs_cmd_table[hdr.command])
 +          !uverbs_cmd_table[hdr.command]              ||
 +          !(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command)))
                return -EINVAL;
  
 -      if (!file->ucontext                               &&
 -          hdr.command != IB_USER_VERBS_CMD_QUERY_PARAMS &&
 +      if (!file->ucontext &&
            hdr.command != IB_USER_VERBS_CMD_GET_CONTEXT)
                return -EINVAL;
  
@@@ -555,33 -477,82 +555,33 @@@ static int ib_uverbs_open(struct inode 
        struct ib_uverbs_device *dev =
                container_of(inode->i_cdev, struct ib_uverbs_device, dev);
        struct ib_uverbs_file *file;
 -      int i = 0;
 -      int ret;
  
        if (!try_module_get(dev->ib_dev->owner))
                return -ENODEV;
  
 -      file = kmalloc(sizeof *file +
 -                     (dev->num_comp - 1) * sizeof (struct ib_uverbs_event_file),
 -                     GFP_KERNEL);
 +      file = kmalloc(sizeof *file, GFP_KERNEL);
        if (!file) {
 -              ret = -ENOMEM;
 -              goto err;
 +              module_put(dev->ib_dev->owner);
 +              return -ENOMEM;
        }
  
 -      file->device = dev;
 +      file->device   = dev;
 +      file->ucontext = NULL;
        kref_init(&file->ref);
        init_MUTEX(&file->mutex);
  
 -      file->ucontext = NULL;
 -
 -      kref_get(&file->ref);
 -      ret = ib_uverbs_event_init(&file->async_file, file);
 -      if (ret)
 -              goto err_kref;
 -
 -      file->async_file.is_async = 1;
 -
 -      for (i = 0; i < dev->num_comp; ++i) {
 -              kref_get(&file->ref);
 -              ret = ib_uverbs_event_init(&file->comp_file[i], file);
 -              if (ret)
 -                      goto err_async;
 -              file->comp_file[i].is_async = 0;
 -      }
 -
 -
        filp->private_data = file;
  
 -      INIT_IB_EVENT_HANDLER(&file->event_handler, dev->ib_dev,
 -                            ib_uverbs_event_handler);
 -      if (ib_register_event_handler(&file->event_handler))
 -              goto err_async;
 -
        return 0;
 -
 -err_async:
 -      while (i--)
 -              ib_uverbs_event_release(&file->comp_file[i]);
 -
 -      ib_uverbs_event_release(&file->async_file);
 -
 -err_kref:
 -      /*
 -       * One extra kref_put() because we took a reference before the
 -       * event file creation that failed and got us here.
 -       */
 -      kref_put(&file->ref, ib_uverbs_release_file);
 -      kref_put(&file->ref, ib_uverbs_release_file);
 -
 -err:
 -      module_put(dev->ib_dev->owner);
 -      return ret;
  }
  
  static int ib_uverbs_close(struct inode *inode, struct file *filp)
  {
        struct ib_uverbs_file *file = filp->private_data;
 -      int i;
  
 -      ib_unregister_event_handler(&file->event_handler);
 -      ib_uverbs_event_release(&file->async_file);
        ib_dealloc_ucontext(file->ucontext);
  
 -      for (i = 0; i < file->device->num_comp; ++i)
 -              ib_uverbs_event_release(&file->comp_file[i]);
 -
 +      kref_put(&file->async_file->ref, ib_uverbs_release_event_file);
        kref_put(&file->ref, ib_uverbs_release_file);
  
        return 0;
@@@ -617,15 -588,6 +617,15 @@@ static ssize_t show_ibdev(struct class_
  }
  static CLASS_DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
  
 +static ssize_t show_dev_abi_version(struct class_device *class_dev, char *buf)
 +{
 +      struct ib_uverbs_device *dev =
 +              container_of(class_dev, struct ib_uverbs_device, class_dev);
 +
 +      return sprintf(buf, "%d\n", dev->ib_dev->uverbs_abi_ver);
 +}
 +static CLASS_DEVICE_ATTR(abi_version, S_IRUGO, show_dev_abi_version, NULL);
 +
  static void ib_uverbs_release_class_dev(struct class_device *class_dev)
  {
        struct ib_uverbs_device *dev =
@@@ -669,8 -631,8 +669,8 @@@ static void ib_uverbs_add_one(struct ib
        set_bit(uverbs_dev->devnum, dev_map);
        spin_unlock(&map_lock);
  
 -      uverbs_dev->ib_dev   = device;
 -      uverbs_dev->num_comp = 1;
 +      uverbs_dev->ib_dev           = device;
 +      uverbs_dev->num_comp_vectors = 1;
  
        if (device->mmap)
                cdev_init(&uverbs_dev->dev, &uverbs_mmap_fops);
  
        if (class_device_create_file(&uverbs_dev->class_dev, &class_device_attr_ibdev))
                goto err_class;
 +      if (class_device_create_file(&uverbs_dev->class_dev, &class_device_attr_abi_version))
 +              goto err_class;
  
        ib_set_client_data(device, &uverbs_client, uverbs_dev);
  
@@@ -802,6 -762,13 +802,13 @@@ static void __exit ib_uverbs_cleanup(vo
        unregister_filesystem(&uverbs_event_fs);
        class_unregister(&uverbs_class);
        unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES);
+       idr_destroy(&ib_uverbs_pd_idr);
+       idr_destroy(&ib_uverbs_mr_idr);
+       idr_destroy(&ib_uverbs_mw_idr);
+       idr_destroy(&ib_uverbs_ah_idr);
+       idr_destroy(&ib_uverbs_cq_idr);
+       idr_destroy(&ib_uverbs_qp_idr);
+       idr_destroy(&ib_uverbs_srq_idr);
  }
  
  module_init(ib_uverbs_init);
index f2afdc6c7e60ecaa9e21db599d7a8005a8b49332,8dfafda5ed241c9c4269877704981f21f020673d..e5a047a6dbeb85fdd0a9c65dd37aad69ae04030c
@@@ -83,8 -83,7 +83,8 @@@ enum 
        MTHCA_EVENT_TYPE_PATH_MIG           = 0x01,
        MTHCA_EVENT_TYPE_COMM_EST           = 0x02,
        MTHCA_EVENT_TYPE_SQ_DRAINED         = 0x03,
 -      MTHCA_EVENT_TYPE_SRQ_LAST_WQE       = 0x13,
 +      MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE    = 0x13,
 +      MTHCA_EVENT_TYPE_SRQ_LIMIT          = 0x14,
        MTHCA_EVENT_TYPE_CQ_ERROR           = 0x04,
        MTHCA_EVENT_TYPE_WQ_CATAS_ERROR     = 0x05,
        MTHCA_EVENT_TYPE_EEC_CATAS_ERROR    = 0x06,
                                (1ULL << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR)  | \
                                (1ULL << MTHCA_EVENT_TYPE_PORT_CHANGE)        | \
                                (1ULL << MTHCA_EVENT_TYPE_ECC_DETECT))
 -#define MTHCA_SRQ_EVENT_MASK    (1ULL << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR)    | \
 -                              (1ULL << MTHCA_EVENT_TYPE_SRQ_LAST_WQE)
 +#define MTHCA_SRQ_EVENT_MASK   ((1ULL << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR)    | \
 +                              (1ULL << MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE)    | \
 +                              (1ULL << MTHCA_EVENT_TYPE_SRQ_LIMIT))
  #define MTHCA_CMD_EVENT_MASK    (1ULL << MTHCA_EVENT_TYPE_CMD)
  
  #define MTHCA_EQ_DB_INC_CI     (1 << 24)
@@@ -143,9 -141,6 +143,9 @@@ struct mthca_eqe 
                struct {
                        __be32 qpn;
                } __attribute__((packed)) qp;
 +              struct {
 +                      __be32 srqn;
 +              } __attribute__((packed)) srq;
                struct {
                        __be32 cqn;
                        u32    reserved1;
@@@ -310,16 -305,6 +310,16 @@@ static int mthca_eq_int(struct mthca_de
                                       IB_EVENT_SQ_DRAINED);
                        break;
  
 +              case MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE:
 +                      mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
 +                                     IB_EVENT_QP_LAST_WQE_REACHED);
 +                      break;
 +
 +              case MTHCA_EVENT_TYPE_SRQ_LIMIT:
 +                      mthca_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
 +                                      IB_EVENT_SRQ_LIMIT_REACHED);
 +                      break;
 +
                case MTHCA_EVENT_TYPE_WQ_CATAS_ERROR:
                        mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
                                       IB_EVENT_QP_FATAL);
@@@ -411,20 -396,21 +411,21 @@@ static irqreturn_t mthca_tavor_interrup
                writel(dev->eq_table.clr_mask, dev->eq_table.clr_int);
  
        ecr = readl(dev->eq_regs.tavor.ecr_base + 4);
-       if (ecr) {
-               writel(ecr, dev->eq_regs.tavor.ecr_base +
-                      MTHCA_ECR_CLR_BASE - MTHCA_ECR_BASE + 4);
+       if (!ecr)
+               return IRQ_NONE;
  
-               for (i = 0; i < MTHCA_NUM_EQ; ++i)
-                       if (ecr & dev->eq_table.eq[i].eqn_mask &&
-                           mthca_eq_int(dev, &dev->eq_table.eq[i])) {
+       writel(ecr, dev->eq_regs.tavor.ecr_base +
+              MTHCA_ECR_CLR_BASE - MTHCA_ECR_BASE + 4);
+       for (i = 0; i < MTHCA_NUM_EQ; ++i)
+               if (ecr & dev->eq_table.eq[i].eqn_mask) {
+                       if (mthca_eq_int(dev, &dev->eq_table.eq[i]))
                                tavor_set_eq_ci(dev, &dev->eq_table.eq[i],
                                                dev->eq_table.eq[i].cons_index);
-                               tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn);
-                       }
-       }
+                       tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn);
+               }
  
-       return IRQ_RETVAL(ecr);
+       return IRQ_HANDLED;
  }
  
  static irqreturn_t mthca_tavor_msi_x_interrupt(int irq, void *eq_ptr,