]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - drivers/gpu/drm/i915/i915_gem.c
drm/i915: Don't return busy for buffers left on the flushing list.
[linux-2.6-omap-h63xx.git] / drivers / gpu / drm / i915 / i915_gem.c
index 50c75327d5673728905346782ac4835e261d2316..24fe8c10b4b22c6bac1cfa17d79d55f02dd59179 100644 (file)
@@ -435,6 +435,13 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
 #endif
        if (read_domains & I915_GEM_DOMAIN_GTT) {
                ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
+
+               /* Silently promote "you're not bound, there was nothing to do"
+                * to success, since the client was just asking us to
+                * make sure everything was done.
+                */
+               if (ret == -EINVAL)
+                       ret = 0;
        } else {
                ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
        }
@@ -1087,6 +1094,21 @@ i915_gem_evict_something(struct drm_device *dev)
        return ret;
 }
 
+static int
+i915_gem_evict_everything(struct drm_device *dev)
+{
+       int ret;
+
+       for (;;) {
+               ret = i915_gem_evict_something(dev);
+               if (ret != 0)
+                       break;
+       }
+       if (ret == -ENOMEM)
+               return 0;
+       return ret;
+}
+
 static int
 i915_gem_object_get_page_list(struct drm_gem_object *obj)
 {
@@ -1173,7 +1195,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
 
                ret = i915_gem_evict_something(dev);
                if (ret != 0) {
-                       DRM_ERROR("Failed to evict a buffer %d\n", ret);
+                       if (ret != -ERESTARTSYS)
+                               DRM_ERROR("Failed to evict a buffer %d\n", ret);
                        return ret;
                }
                goto search_free;
@@ -1290,6 +1313,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
        int ret;
 
+       /* Not valid to be called on unbound objects. */
+       if (obj_priv->gtt_space == NULL)
+               return -EINVAL;
+
        i915_gem_object_flush_gpu_write_domain(obj);
        /* Wait on any GPU rendering and flushing to occur. */
        ret = i915_gem_object_wait_rendering(obj);
@@ -1646,38 +1673,6 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
        return 0;
 }
 
-/**
- * Once all of the objects have been set in the proper domain,
- * perform the necessary flush and invalidate operations.
- *
- * Returns the write domains flushed, for use in flush tracking.
- */
-static uint32_t
-i915_gem_dev_set_domain(struct drm_device *dev)
-{
-       uint32_t flush_domains = dev->flush_domains;
-
-       /*
-        * Now that all the buffers are synced to the proper domains,
-        * flush and invalidate the collected domains
-        */
-       if (dev->invalidate_domains | dev->flush_domains) {
-#if WATCH_EXEC
-               DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
-                         __func__,
-                        dev->invalidate_domains,
-                        dev->flush_domains);
-#endif
-               i915_gem_flush(dev,
-                              dev->invalidate_domains,
-                              dev->flush_domains);
-               dev->invalidate_domains = 0;
-               dev->flush_domains = 0;
-       }
-
-       return flush_domains;
-}
-
 /**
  * Pin an object to the GTT and evaluate the relocations landing in it.
  */
@@ -1954,6 +1949,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
        int ret, i, pinned = 0;
        uint64_t exec_offset;
        uint32_t seqno, flush_domains;
+       int pin_tries;
 
 #if WATCH_EXEC
        DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
@@ -2002,14 +1998,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
                return -EBUSY;
        }
 
-       /* Zero the gloabl flush/invalidate flags. These
-        * will be modified as each object is bound to the
-        * gtt
-        */
-       dev->invalidate_domains = 0;
-       dev->flush_domains = 0;
-
-       /* Look up object handles and perform the relocations */
+       /* Look up object handles */
        for (i = 0; i < args->buffer_count; i++) {
                object_list[i] = drm_gem_object_lookup(dev, file_priv,
                                                       exec_list[i].handle);
@@ -2019,17 +2008,39 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
                        ret = -EBADF;
                        goto err;
                }
+       }
 
-               object_list[i]->pending_read_domains = 0;
-               object_list[i]->pending_write_domain = 0;
-               ret = i915_gem_object_pin_and_relocate(object_list[i],
-                                                      file_priv,
-                                                      &exec_list[i]);
-               if (ret) {
-                       DRM_ERROR("object bind and relocate failed %d\n", ret);
+       /* Pin and relocate */
+       for (pin_tries = 0; ; pin_tries++) {
+               ret = 0;
+               for (i = 0; i < args->buffer_count; i++) {
+                       object_list[i]->pending_read_domains = 0;
+                       object_list[i]->pending_write_domain = 0;
+                       ret = i915_gem_object_pin_and_relocate(object_list[i],
+                                                              file_priv,
+                                                              &exec_list[i]);
+                       if (ret)
+                               break;
+                       pinned = i + 1;
+               }
+               /* success */
+               if (ret == 0)
+                       break;
+
+               /* error other than GTT full, or we've already tried again */
+               if (ret != -ENOMEM || pin_tries >= 1) {
+                       DRM_ERROR("Failed to pin buffers %d\n", ret);
                        goto err;
                }
-               pinned = i + 1;
+
+               /* unpin all of our buffers */
+               for (i = 0; i < pinned; i++)
+                       i915_gem_object_unpin(object_list[i]);
+
+               /* evict everyone we can from the aperture */
+               ret = i915_gem_evict_everything(dev);
+               if (ret)
+                       goto err;
        }
 
        /* Set the pending read domains for the batch buffer to COMMAND */
@@ -2039,10 +2050,17 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
 
        i915_verify_inactive(dev, __FILE__, __LINE__);
 
+       /* Zero the global flush/invalidate flags. These
+        * will be modified as new domains are computed
+        * for each object
+        */
+       dev->invalidate_domains = 0;
+       dev->flush_domains = 0;
+
        for (i = 0; i < args->buffer_count; i++) {
                struct drm_gem_object *obj = object_list[i];
 
-               /* Compute new gpu domains and update invalidate/flushing */
+               /* Compute new gpu domains and update invalidate/flush */
                i915_gem_object_set_to_gpu_domain(obj,
                                                  obj->pending_read_domains,
                                                  obj->pending_write_domain);
@@ -2050,8 +2068,19 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
 
        i915_verify_inactive(dev, __FILE__, __LINE__);
 
-       /* Flush/invalidate caches and chipset buffer */
-       flush_domains = i915_gem_dev_set_domain(dev);
+       if (dev->invalidate_domains | dev->flush_domains) {
+#if WATCH_EXEC
+               DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
+                         __func__,
+                        dev->invalidate_domains,
+                        dev->flush_domains);
+#endif
+               i915_gem_flush(dev,
+                              dev->invalidate_domains,
+                              dev->flush_domains);
+               if (dev->flush_domains)
+                       (void)i915_add_request(dev, dev->flush_domains);
+       }
 
        i915_verify_inactive(dev, __FILE__, __LINE__);
 
@@ -2071,8 +2100,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
                              ~0);
 #endif
 
-       (void)i915_add_request(dev, flush_domains);
-
        /* Exec the batchbuffer */
        ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
        if (ret) {
@@ -2282,7 +2309,14 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
        }
 
        obj_priv = obj->driver_private;
-       args->busy = obj_priv->active;
+       /* Don't count being on the flushing list against the object being
+        * done.  Otherwise, a buffer left on the flushing list but not getting
+        * flushed (because nobody's flushing that domain) won't ever return
+        * unbusy and get reused by libdrm's bo cache.  The other expected
+        * consumer of this interface, OpenGL's occlusion queries, also specs
+        * that the objects get unbusy "eventually" without any interference.
+        */
+       args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
 
        drm_gem_object_unreference(obj);
        mutex_unlock(&dev->struct_mutex);