#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
-static int
-i915_gem_object_set_domain(struct drm_gem_object *obj,
- uint32_t read_domains,
- uint32_t write_domain);
+static void
+i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
+ uint32_t read_domains,
+ uint32_t write_domain);
static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
#endif
if (read_domains & I915_GEM_DOMAIN_GTT) {
ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
+
+ /* Silently promote "you're not bound, there was nothing to do"
+ * to success, since the client was just asking us to
+ * make sure everything was done.
+ */
+ if (ret == -EINVAL)
+ ret = 0;
} else {
ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
}
return ret;
}
+static int
+i915_gem_evict_everything(struct drm_device *dev)
+{
+ int ret;
+
+ for (;;) {
+ ret = i915_gem_evict_something(dev);
+ if (ret != 0)
+ break;
+ }
+ if (ret == -ENOMEM)
+ return 0;
+ return ret;
+}
+
static int
i915_gem_object_get_page_list(struct drm_gem_object *obj)
{
ret = i915_gem_evict_something(dev);
if (ret != 0) {
- DRM_ERROR("Failed to evict a buffer %d\n", ret);
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Failed to evict a buffer %d\n", ret);
return ret;
}
goto search_free;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int ret;
+ /* Not valid to be called on unbound objects. */
+ if (obj_priv->gtt_space == NULL)
+ return -EINVAL;
+
i915_gem_object_flush_gpu_write_domain(obj);
/* Wait on any GPU rendering and flushing to occur. */
ret = i915_gem_object_wait_rendering(obj);
* MI_FLUSH
* drm_agp_chipset_flush
*/
-static int
-i915_gem_object_set_domain(struct drm_gem_object *obj,
- uint32_t read_domains,
- uint32_t write_domain)
+static void
+i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
+ uint32_t read_domains,
+ uint32_t write_domain)
{
struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
obj->read_domains, obj->write_domain,
dev->invalidate_domains, dev->flush_domains);
#endif
- return 0;
}
/**
return 0;
}
-/**
- * Once all of the objects have been set in the proper domain,
- * perform the necessary flush and invalidate operations.
- *
- * Returns the write domains flushed, for use in flush tracking.
- */
-static uint32_t
-i915_gem_dev_set_domain(struct drm_device *dev)
-{
- uint32_t flush_domains = dev->flush_domains;
-
- /*
- * Now that all the buffers are synced to the proper domains,
- * flush and invalidate the collected domains
- */
- if (dev->invalidate_domains | dev->flush_domains) {
-#if WATCH_EXEC
- DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
- __func__,
- dev->invalidate_domains,
- dev->flush_domains);
-#endif
- i915_gem_flush(dev,
- dev->invalidate_domains,
- dev->flush_domains);
- dev->invalidate_domains = 0;
- dev->flush_domains = 0;
- }
-
- return flush_domains;
-}
-
/**
* Pin an object to the GTT and evaluate the relocations landing in it.
*/
int ret, i, pinned = 0;
uint64_t exec_offset;
uint32_t seqno, flush_domains;
+ int pin_tries;
#if WATCH_EXEC
DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
return -EBUSY;
}
- /* Zero the gloabl flush/invalidate flags. These
- * will be modified as each object is bound to the
- * gtt
- */
- dev->invalidate_domains = 0;
- dev->flush_domains = 0;
-
- /* Look up object handles and perform the relocations */
+ /* Look up object handles */
for (i = 0; i < args->buffer_count; i++) {
object_list[i] = drm_gem_object_lookup(dev, file_priv,
exec_list[i].handle);
ret = -EBADF;
goto err;
}
+ }
- object_list[i]->pending_read_domains = 0;
- object_list[i]->pending_write_domain = 0;
- ret = i915_gem_object_pin_and_relocate(object_list[i],
- file_priv,
- &exec_list[i]);
- if (ret) {
- DRM_ERROR("object bind and relocate failed %d\n", ret);
+ /* Pin and relocate */
+ for (pin_tries = 0; ; pin_tries++) {
+ ret = 0;
+ for (i = 0; i < args->buffer_count; i++) {
+ object_list[i]->pending_read_domains = 0;
+ object_list[i]->pending_write_domain = 0;
+ ret = i915_gem_object_pin_and_relocate(object_list[i],
+ file_priv,
+ &exec_list[i]);
+ if (ret)
+ break;
+ pinned = i + 1;
+ }
+ /* success */
+ if (ret == 0)
+ break;
+
+ /* error other than GTT full, or we've already tried again */
+ if (ret != -ENOMEM || pin_tries >= 1) {
+ DRM_ERROR("Failed to pin buffers %d\n", ret);
goto err;
}
- pinned = i + 1;
+
+ /* unpin all of our buffers */
+ for (i = 0; i < pinned; i++)
+ i915_gem_object_unpin(object_list[i]);
+
+ /* evict everyone we can from the aperture */
+ ret = i915_gem_evict_everything(dev);
+ if (ret)
+ goto err;
}
/* Set the pending read domains for the batch buffer to COMMAND */
i915_verify_inactive(dev, __FILE__, __LINE__);
+ /* Zero the global flush/invalidate flags. These
+ * will be modified as new domains are computed
+ * for each object
+ */
+ dev->invalidate_domains = 0;
+ dev->flush_domains = 0;
+
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
- /* make sure all previous memory operations have passed */
- ret = i915_gem_object_set_domain(obj,
- obj->pending_read_domains,
- obj->pending_write_domain);
- if (ret) {
- /* As we've partially updated domains on our buffers,
- * we have to emit the flush we've accumulated
- * before exiting, or we'll have broken the
- * active/flushing/inactive invariants.
- *
- * We'll potentially have some things marked as
- * being in write domains that they actually aren't,
- * but that should be merely a minor performance loss.
- */
- flush_domains = i915_gem_dev_set_domain(dev);
- (void)i915_add_request(dev, flush_domains);
- goto err;
- }
+ /* Compute new gpu domains and update invalidate/flush */
+ i915_gem_object_set_to_gpu_domain(obj,
+ obj->pending_read_domains,
+ obj->pending_write_domain);
}
i915_verify_inactive(dev, __FILE__, __LINE__);
- /* Flush/invalidate caches and chipset buffer */
- flush_domains = i915_gem_dev_set_domain(dev);
+ if (dev->invalidate_domains | dev->flush_domains) {
+#if WATCH_EXEC
+ DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
+ __func__,
+ dev->invalidate_domains,
+ dev->flush_domains);
+#endif
+ i915_gem_flush(dev,
+ dev->invalidate_domains,
+ dev->flush_domains);
+ if (dev->flush_domains)
+ (void)i915_add_request(dev, dev->flush_domains);
+ }
i915_verify_inactive(dev, __FILE__, __LINE__);
~0);
#endif
- (void)i915_add_request(dev, flush_domains);
-
/* Exec the batchbuffer */
ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
if (ret) {
}
obj_priv = obj->driver_private;
- args->busy = obj_priv->active;
+ /* Don't count being on the flushing list against the object being
+ * done. Otherwise, a buffer left on the flushing list but not getting
+ * flushed (because nobody's flushing that domain) won't ever return
+ * unbusy and get reused by libdrm's bo cache. The other expected
+ * consumer of this interface, OpenGL's occlusion queries, also specs
+ * that the objects get unbusy "eventually" without any interference.
+ */
+ args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);