2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
32 #include <linux/swap.h>
35 i915_gem_object_set_domain(struct drm_gem_object *obj,
36 uint32_t read_domains,
37 uint32_t write_domain);
39 i915_gem_object_set_domain_range(struct drm_gem_object *obj,
42 uint32_t read_domains,
43 uint32_t write_domain);
45 i915_gem_set_domain(struct drm_gem_object *obj,
46 struct drm_file *file_priv,
47 uint32_t read_domains,
48 uint32_t write_domain);
49 static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
50 static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
51 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
54 i915_gem_init_ioctl(struct drm_device *dev, void *data,
55 struct drm_file *file_priv)
57 drm_i915_private_t *dev_priv = dev->dev_private;
58 struct drm_i915_gem_init *args = data;
60 mutex_lock(&dev->struct_mutex);
62 if (args->gtt_start >= args->gtt_end ||
63 (args->gtt_start & (PAGE_SIZE - 1)) != 0 ||
64 (args->gtt_end & (PAGE_SIZE - 1)) != 0) {
65 mutex_unlock(&dev->struct_mutex);
69 drm_mm_init(&dev_priv->mm.gtt_space, args->gtt_start,
70 args->gtt_end - args->gtt_start);
72 dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start);
74 mutex_unlock(&dev->struct_mutex);
81 * Creates a new mm object and returns a handle to it.
84 i915_gem_create_ioctl(struct drm_device *dev, void *data,
85 struct drm_file *file_priv)
87 struct drm_i915_gem_create *args = data;
88 struct drm_gem_object *obj;
91 args->size = roundup(args->size, PAGE_SIZE);
93 /* Allocate the new object */
94 obj = drm_gem_object_alloc(dev, args->size);
98 ret = drm_gem_handle_create(file_priv, obj, &handle);
99 mutex_lock(&dev->struct_mutex);
100 drm_gem_object_handle_unreference(obj);
101 mutex_unlock(&dev->struct_mutex);
106 args->handle = handle;
112 * Reads data from the object referenced by handle.
114 * On error, the contents of *data are undefined.
117 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
118 struct drm_file *file_priv)
120 struct drm_i915_gem_pread *args = data;
121 struct drm_gem_object *obj;
122 struct drm_i915_gem_object *obj_priv;
127 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
130 obj_priv = obj->driver_private;
132 /* Bounds check source.
134 * XXX: This could use review for overflow issues...
136 if (args->offset > obj->size || args->size > obj->size ||
137 args->offset + args->size > obj->size) {
138 drm_gem_object_unreference(obj);
142 mutex_lock(&dev->struct_mutex);
144 ret = i915_gem_object_set_domain_range(obj, args->offset, args->size,
145 I915_GEM_DOMAIN_CPU, 0);
147 drm_gem_object_unreference(obj);
148 mutex_unlock(&dev->struct_mutex);
152 offset = args->offset;
154 read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
155 args->size, &offset);
156 if (read != args->size) {
157 drm_gem_object_unreference(obj);
158 mutex_unlock(&dev->struct_mutex);
165 drm_gem_object_unreference(obj);
166 mutex_unlock(&dev->struct_mutex);
172 i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
173 struct drm_i915_gem_pwrite *args,
174 struct drm_file *file_priv)
176 struct drm_i915_gem_object *obj_priv = obj->driver_private;
179 char __user *user_data;
185 unsigned long unwritten;
187 user_data = (char __user *) (uintptr_t) args->data_ptr;
189 if (!access_ok(VERIFY_READ, user_data, remain))
193 mutex_lock(&dev->struct_mutex);
194 ret = i915_gem_object_pin(obj, 0);
196 mutex_unlock(&dev->struct_mutex);
199 ret = i915_gem_set_domain(obj, file_priv,
200 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
204 obj_priv = obj->driver_private;
205 offset = obj_priv->gtt_offset + args->offset;
209 /* Operation in this page
212 * o = offset within page
215 i = offset >> PAGE_SHIFT;
216 o = offset & (PAGE_SIZE-1);
218 if ((o + l) > PAGE_SIZE)
221 pfn = (dev->agp->base >> PAGE_SHIFT) + i;
223 #ifdef CONFIG_HIGHMEM
224 /* This is a workaround for the low performance of iounmap
225 * (approximate 10% cpu cost on normal 3D workloads).
226 * kmap_atomic on HIGHMEM kernels happens to let us map card
227 * memory without taking IPIs. When the vmap rework lands
228 * we should be able to dump this hack.
230 vaddr_atomic = kmap_atomic_pfn(pfn, KM_USER0);
232 DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n",
233 i, o, l, pfn, vaddr_atomic);
235 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + o,
237 kunmap_atomic(vaddr_atomic, KM_USER0);
240 #endif /* CONFIG_HIGHMEM */
242 vaddr = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE);
244 DRM_INFO("pwrite slow i %d o %d l %d "
245 "pfn %ld vaddr %p\n",
246 i, o, l, pfn, vaddr);
252 unwritten = __copy_from_user(vaddr + o, user_data, l);
254 DRM_INFO("unwritten %ld\n", unwritten);
267 #if WATCH_PWRITE && 1
268 i915_gem_clflush_object(obj);
269 i915_gem_dump_object(obj, args->offset + args->size, __func__, ~0);
270 i915_gem_clflush_object(obj);
274 i915_gem_object_unpin(obj);
275 mutex_unlock(&dev->struct_mutex);
281 i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
282 struct drm_i915_gem_pwrite *args,
283 struct drm_file *file_priv)
289 mutex_lock(&dev->struct_mutex);
291 ret = i915_gem_set_domain(obj, file_priv,
292 I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
294 mutex_unlock(&dev->struct_mutex);
298 offset = args->offset;
300 written = vfs_write(obj->filp,
301 (char __user *)(uintptr_t) args->data_ptr,
302 args->size, &offset);
303 if (written != args->size) {
304 mutex_unlock(&dev->struct_mutex);
311 mutex_unlock(&dev->struct_mutex);
317 * Writes data to the object referenced by handle.
319 * On error, the contents of the buffer that were to be modified are undefined.
322 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
323 struct drm_file *file_priv)
325 struct drm_i915_gem_pwrite *args = data;
326 struct drm_gem_object *obj;
327 struct drm_i915_gem_object *obj_priv;
330 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
333 obj_priv = obj->driver_private;
335 /* Bounds check destination.
337 * XXX: This could use review for overflow issues...
339 if (args->offset > obj->size || args->size > obj->size ||
340 args->offset + args->size > obj->size) {
341 drm_gem_object_unreference(obj);
345 /* We can only do the GTT pwrite on untiled buffers, as otherwise
346 * it would end up going through the fenced access, and we'll get
347 * different detiling behavior between reading and writing.
348 * pread/pwrite currently are reading and writing from the CPU
349 * perspective, requiring manual detiling by the client.
351 if (obj_priv->tiling_mode == I915_TILING_NONE &&
353 ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
355 ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
359 DRM_INFO("pwrite failed %d\n", ret);
362 drm_gem_object_unreference(obj);
368 * Called when user space prepares to use an object
371 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
372 struct drm_file *file_priv)
374 struct drm_i915_gem_set_domain *args = data;
375 struct drm_gem_object *obj;
378 if (!(dev->driver->driver_features & DRIVER_GEM))
381 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
385 mutex_lock(&dev->struct_mutex);
387 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
388 obj, obj->size, args->read_domains, args->write_domain);
390 ret = i915_gem_set_domain(obj, file_priv,
391 args->read_domains, args->write_domain);
392 drm_gem_object_unreference(obj);
393 mutex_unlock(&dev->struct_mutex);
398 * Called when user space has done writes to this buffer
401 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
402 struct drm_file *file_priv)
404 struct drm_i915_gem_sw_finish *args = data;
405 struct drm_gem_object *obj;
406 struct drm_i915_gem_object *obj_priv;
409 if (!(dev->driver->driver_features & DRIVER_GEM))
412 mutex_lock(&dev->struct_mutex);
413 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
415 mutex_unlock(&dev->struct_mutex);
420 DRM_INFO("%s: sw_finish %d (%p %d)\n",
421 __func__, args->handle, obj, obj->size);
423 obj_priv = obj->driver_private;
425 /* Pinned buffers may be scanout, so flush the cache */
426 if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) {
427 i915_gem_clflush_object(obj);
428 drm_agp_chipset_flush(dev);
430 drm_gem_object_unreference(obj);
431 mutex_unlock(&dev->struct_mutex);
436 * Maps the contents of an object, returning the address it is mapped
439 * While the mapping holds a reference on the contents of the object, it doesn't
440 * imply a ref on the object itself.
443 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
444 struct drm_file *file_priv)
446 struct drm_i915_gem_mmap *args = data;
447 struct drm_gem_object *obj;
451 if (!(dev->driver->driver_features & DRIVER_GEM))
454 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
458 offset = args->offset;
460 down_write(¤t->mm->mmap_sem);
461 addr = do_mmap(obj->filp, 0, args->size,
462 PROT_READ | PROT_WRITE, MAP_SHARED,
464 up_write(¤t->mm->mmap_sem);
465 mutex_lock(&dev->struct_mutex);
466 drm_gem_object_unreference(obj);
467 mutex_unlock(&dev->struct_mutex);
468 if (IS_ERR((void *)addr))
471 args->addr_ptr = (uint64_t) addr;
477 i915_gem_object_free_page_list(struct drm_gem_object *obj)
479 struct drm_i915_gem_object *obj_priv = obj->driver_private;
480 int page_count = obj->size / PAGE_SIZE;
483 if (obj_priv->page_list == NULL)
487 for (i = 0; i < page_count; i++)
488 if (obj_priv->page_list[i] != NULL) {
490 set_page_dirty(obj_priv->page_list[i]);
491 mark_page_accessed(obj_priv->page_list[i]);
492 page_cache_release(obj_priv->page_list[i]);
496 drm_free(obj_priv->page_list,
497 page_count * sizeof(struct page *),
499 obj_priv->page_list = NULL;
503 i915_gem_object_move_to_active(struct drm_gem_object *obj)
505 struct drm_device *dev = obj->dev;
506 drm_i915_private_t *dev_priv = dev->dev_private;
507 struct drm_i915_gem_object *obj_priv = obj->driver_private;
509 /* Add a reference if we're newly entering the active list. */
510 if (!obj_priv->active) {
511 drm_gem_object_reference(obj);
512 obj_priv->active = 1;
514 /* Move from whatever list we were on to the tail of execution. */
515 list_move_tail(&obj_priv->list,
516 &dev_priv->mm.active_list);
521 i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
523 struct drm_device *dev = obj->dev;
524 drm_i915_private_t *dev_priv = dev->dev_private;
525 struct drm_i915_gem_object *obj_priv = obj->driver_private;
527 i915_verify_inactive(dev, __FILE__, __LINE__);
528 if (obj_priv->pin_count != 0)
529 list_del_init(&obj_priv->list);
531 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
533 if (obj_priv->active) {
534 obj_priv->active = 0;
535 drm_gem_object_unreference(obj);
537 i915_verify_inactive(dev, __FILE__, __LINE__);
541 * Creates a new sequence number, emitting a write of it to the status page
542 * plus an interrupt, which will trigger i915_user_interrupt_handler.
544 * Must be called with struct_lock held.
546 * Returned sequence numbers are nonzero on success.
549 i915_add_request(struct drm_device *dev, uint32_t flush_domains)
551 drm_i915_private_t *dev_priv = dev->dev_private;
552 struct drm_i915_gem_request *request;
557 request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
561 /* Grab the seqno we're going to make this request be, and bump the
562 * next (skipping 0 so it can be the reserved no-seqno value).
564 seqno = dev_priv->mm.next_gem_seqno;
565 dev_priv->mm.next_gem_seqno++;
566 if (dev_priv->mm.next_gem_seqno == 0)
567 dev_priv->mm.next_gem_seqno++;
570 OUT_RING(MI_STORE_DWORD_INDEX);
571 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
574 OUT_RING(MI_USER_INTERRUPT);
577 DRM_DEBUG("%d\n", seqno);
579 request->seqno = seqno;
580 request->emitted_jiffies = jiffies;
581 request->flush_domains = flush_domains;
582 was_empty = list_empty(&dev_priv->mm.request_list);
583 list_add_tail(&request->list, &dev_priv->mm.request_list);
586 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
591 * Command execution barrier
593 * Ensures that all commands in the ring are finished
594 * before signalling the CPU
597 i915_retire_commands(struct drm_device *dev)
599 drm_i915_private_t *dev_priv = dev->dev_private;
600 uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
601 uint32_t flush_domains = 0;
604 /* The sampler always gets flushed on i965 (sigh) */
606 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
609 OUT_RING(0); /* noop */
611 return flush_domains;
615 * Moves buffers associated only with the given active seqno from the active
616 * to inactive list, potentially freeing them.
619 i915_gem_retire_request(struct drm_device *dev,
620 struct drm_i915_gem_request *request)
622 drm_i915_private_t *dev_priv = dev->dev_private;
624 /* Move any buffers on the active list that are no longer referenced
625 * by the ringbuffer to the flushing/inactive lists as appropriate.
627 while (!list_empty(&dev_priv->mm.active_list)) {
628 struct drm_gem_object *obj;
629 struct drm_i915_gem_object *obj_priv;
631 obj_priv = list_first_entry(&dev_priv->mm.active_list,
632 struct drm_i915_gem_object,
636 /* If the seqno being retired doesn't match the oldest in the
637 * list, then the oldest in the list must still be newer than
640 if (obj_priv->last_rendering_seqno != request->seqno)
643 DRM_INFO("%s: retire %d moves to inactive list %p\n",
644 __func__, request->seqno, obj);
647 if (obj->write_domain != 0) {
648 list_move_tail(&obj_priv->list,
649 &dev_priv->mm.flushing_list);
651 i915_gem_object_move_to_inactive(obj);
655 if (request->flush_domains != 0) {
656 struct drm_i915_gem_object *obj_priv, *next;
658 /* Clear the write domain and activity from any buffers
659 * that are just waiting for a flush matching the one retired.
661 list_for_each_entry_safe(obj_priv, next,
662 &dev_priv->mm.flushing_list, list) {
663 struct drm_gem_object *obj = obj_priv->obj;
665 if (obj->write_domain & request->flush_domains) {
666 obj->write_domain = 0;
667 i915_gem_object_move_to_inactive(obj);
675 * Returns true if seq1 is later than seq2.
678 i915_seqno_passed(uint32_t seq1, uint32_t seq2)
680 return (int32_t)(seq1 - seq2) >= 0;
684 i915_get_gem_seqno(struct drm_device *dev)
686 drm_i915_private_t *dev_priv = dev->dev_private;
688 return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
692 * This function clears the request list as sequence numbers are passed.
695 i915_gem_retire_requests(struct drm_device *dev)
697 drm_i915_private_t *dev_priv = dev->dev_private;
700 seqno = i915_get_gem_seqno(dev);
702 while (!list_empty(&dev_priv->mm.request_list)) {
703 struct drm_i915_gem_request *request;
704 uint32_t retiring_seqno;
706 request = list_first_entry(&dev_priv->mm.request_list,
707 struct drm_i915_gem_request,
709 retiring_seqno = request->seqno;
711 if (i915_seqno_passed(seqno, retiring_seqno) ||
712 dev_priv->mm.wedged) {
713 i915_gem_retire_request(dev, request);
715 list_del(&request->list);
716 drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
723 i915_gem_retire_work_handler(struct work_struct *work)
725 drm_i915_private_t *dev_priv;
726 struct drm_device *dev;
728 dev_priv = container_of(work, drm_i915_private_t,
729 mm.retire_work.work);
732 mutex_lock(&dev->struct_mutex);
733 i915_gem_retire_requests(dev);
734 if (!list_empty(&dev_priv->mm.request_list))
735 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
736 mutex_unlock(&dev->struct_mutex);
740 * Waits for a sequence number to be signaled, and cleans up the
741 * request and object lists appropriately for that event.
744 i915_wait_request(struct drm_device *dev, uint32_t seqno)
746 drm_i915_private_t *dev_priv = dev->dev_private;
751 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
752 dev_priv->mm.waiting_gem_seqno = seqno;
753 i915_user_irq_get(dev);
754 ret = wait_event_interruptible(dev_priv->irq_queue,
755 i915_seqno_passed(i915_get_gem_seqno(dev),
757 dev_priv->mm.wedged);
758 i915_user_irq_put(dev);
759 dev_priv->mm.waiting_gem_seqno = 0;
761 if (dev_priv->mm.wedged)
764 if (ret && ret != -ERESTARTSYS)
765 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
766 __func__, ret, seqno, i915_get_gem_seqno(dev));
768 /* Directly dispatch request retiring. While we have the work queue
769 * to handle this, the waiter on a request often wants an associated
770 * buffer to have made it to the inactive list, and we would need
771 * a separate wait queue to handle that.
774 i915_gem_retire_requests(dev);
780 i915_gem_flush(struct drm_device *dev,
781 uint32_t invalidate_domains,
782 uint32_t flush_domains)
784 drm_i915_private_t *dev_priv = dev->dev_private;
789 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
790 invalidate_domains, flush_domains);
793 if (flush_domains & I915_GEM_DOMAIN_CPU)
794 drm_agp_chipset_flush(dev);
796 if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
797 I915_GEM_DOMAIN_GTT)) {
801 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
802 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
803 * also flushed at 2d versus 3d pipeline switches.
807 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
808 * MI_READ_FLUSH is set, and is always flushed on 965.
810 * I915_GEM_DOMAIN_COMMAND may not exist?
812 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
813 * invalidated when MI_EXE_FLUSH is set.
815 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
816 * invalidated with every MI_FLUSH.
820 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
821 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
822 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
823 * are flushed at any MI_FLUSH.
826 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
827 if ((invalidate_domains|flush_domains) &
828 I915_GEM_DOMAIN_RENDER)
829 cmd &= ~MI_NO_WRITE_FLUSH;
830 if (!IS_I965G(dev)) {
832 * On the 965, the sampler cache always gets flushed
833 * and this bit is reserved.
835 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
836 cmd |= MI_READ_FLUSH;
838 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
842 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
846 OUT_RING(0); /* noop */
852 * Ensures that all rendering to the object has completed and the object is
853 * safe to unbind from the GTT or access from the CPU.
856 i915_gem_object_wait_rendering(struct drm_gem_object *obj)
858 struct drm_device *dev = obj->dev;
859 struct drm_i915_gem_object *obj_priv = obj->driver_private;
862 /* If there are writes queued to the buffer, flush and
863 * create a new seqno to wait for.
865 if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) {
866 uint32_t write_domain = obj->write_domain;
868 DRM_INFO("%s: flushing object %p from write domain %08x\n",
869 __func__, obj, write_domain);
871 i915_gem_flush(dev, 0, write_domain);
873 i915_gem_object_move_to_active(obj);
874 obj_priv->last_rendering_seqno = i915_add_request(dev,
876 BUG_ON(obj_priv->last_rendering_seqno == 0);
878 DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
882 /* If there is rendering queued on the buffer being evicted, wait for
885 if (obj_priv->active) {
887 DRM_INFO("%s: object %p wait for seqno %08x\n",
888 __func__, obj, obj_priv->last_rendering_seqno);
890 ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
899 * Unbinds an object from the GTT aperture.
902 i915_gem_object_unbind(struct drm_gem_object *obj)
904 struct drm_device *dev = obj->dev;
905 struct drm_i915_gem_object *obj_priv = obj->driver_private;
909 DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
910 DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
912 if (obj_priv->gtt_space == NULL)
915 if (obj_priv->pin_count != 0) {
916 DRM_ERROR("Attempting to unbind pinned buffer\n");
920 /* Wait for any rendering to complete
922 ret = i915_gem_object_wait_rendering(obj);
924 DRM_ERROR("wait_rendering failed: %d\n", ret);
928 /* Move the object to the CPU domain to ensure that
929 * any possible CPU writes while it's not in the GTT
930 * are flushed when we go to remap it. This will
931 * also ensure that all pending GPU writes are finished
934 ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU,
935 I915_GEM_DOMAIN_CPU);
937 DRM_ERROR("set_domain failed: %d\n", ret);
941 if (obj_priv->agp_mem != NULL) {
942 drm_unbind_agp(obj_priv->agp_mem);
943 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
944 obj_priv->agp_mem = NULL;
947 BUG_ON(obj_priv->active);
949 i915_gem_object_free_page_list(obj);
951 if (obj_priv->gtt_space) {
952 atomic_dec(&dev->gtt_count);
953 atomic_sub(obj->size, &dev->gtt_memory);
955 drm_mm_put_block(obj_priv->gtt_space);
956 obj_priv->gtt_space = NULL;
959 /* Remove ourselves from the LRU list if present. */
960 if (!list_empty(&obj_priv->list))
961 list_del_init(&obj_priv->list);
967 i915_gem_evict_something(struct drm_device *dev)
969 drm_i915_private_t *dev_priv = dev->dev_private;
970 struct drm_gem_object *obj;
971 struct drm_i915_gem_object *obj_priv;
975 /* If there's an inactive buffer available now, grab it
978 if (!list_empty(&dev_priv->mm.inactive_list)) {
979 obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
980 struct drm_i915_gem_object,
983 BUG_ON(obj_priv->pin_count != 0);
985 DRM_INFO("%s: evicting %p\n", __func__, obj);
987 BUG_ON(obj_priv->active);
989 /* Wait on the rendering and unbind the buffer. */
990 ret = i915_gem_object_unbind(obj);
994 /* If we didn't get anything, but the ring is still processing
995 * things, wait for one of those things to finish and hopefully
996 * leave us a buffer to evict.
998 if (!list_empty(&dev_priv->mm.request_list)) {
999 struct drm_i915_gem_request *request;
1001 request = list_first_entry(&dev_priv->mm.request_list,
1002 struct drm_i915_gem_request,
1005 ret = i915_wait_request(dev, request->seqno);
1009 /* if waiting caused an object to become inactive,
1010 * then loop around and wait for it. Otherwise, we
1011 * assume that waiting freed and unbound something,
1012 * so there should now be some space in the GTT
1014 if (!list_empty(&dev_priv->mm.inactive_list))
1019 /* If we didn't have anything on the request list but there
1020 * are buffers awaiting a flush, emit one and try again.
1021 * When we wait on it, those buffers waiting for that flush
1022 * will get moved to inactive.
1024 if (!list_empty(&dev_priv->mm.flushing_list)) {
1025 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
1026 struct drm_i915_gem_object,
1028 obj = obj_priv->obj;
1033 i915_add_request(dev, obj->write_domain);
1039 DRM_ERROR("inactive empty %d request empty %d "
1040 "flushing empty %d\n",
1041 list_empty(&dev_priv->mm.inactive_list),
1042 list_empty(&dev_priv->mm.request_list),
1043 list_empty(&dev_priv->mm.flushing_list));
1044 /* If we didn't do any of the above, there's nothing to be done
1045 * and we just can't fit it in.
1053 i915_gem_object_get_page_list(struct drm_gem_object *obj)
1055 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1057 struct address_space *mapping;
1058 struct inode *inode;
1062 if (obj_priv->page_list)
1065 /* Get the list of pages out of our struct file. They'll be pinned
1066 * at this point until we release them.
1068 page_count = obj->size / PAGE_SIZE;
1069 BUG_ON(obj_priv->page_list != NULL);
1070 obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
1072 if (obj_priv->page_list == NULL) {
1073 DRM_ERROR("Faled to allocate page list\n");
1077 inode = obj->filp->f_path.dentry->d_inode;
1078 mapping = inode->i_mapping;
1079 for (i = 0; i < page_count; i++) {
1080 page = read_mapping_page(mapping, i, NULL);
1082 ret = PTR_ERR(page);
1083 DRM_ERROR("read_mapping_page failed: %d\n", ret);
1084 i915_gem_object_free_page_list(obj);
1087 obj_priv->page_list[i] = page;
1093 * Finds free space in the GTT aperture and binds the object there.
1096 i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
1098 struct drm_device *dev = obj->dev;
1099 drm_i915_private_t *dev_priv = dev->dev_private;
1100 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1101 struct drm_mm_node *free_space;
1102 int page_count, ret;
1105 alignment = PAGE_SIZE;
1106 if (alignment & (PAGE_SIZE - 1)) {
1107 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
1112 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
1113 obj->size, alignment, 0);
1114 if (free_space != NULL) {
1115 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
1117 if (obj_priv->gtt_space != NULL) {
1118 obj_priv->gtt_space->private = obj;
1119 obj_priv->gtt_offset = obj_priv->gtt_space->start;
1122 if (obj_priv->gtt_space == NULL) {
1123 /* If the gtt is empty and we're still having trouble
1124 * fitting our object in, we're out of memory.
1127 DRM_INFO("%s: GTT full, evicting something\n", __func__);
1129 if (list_empty(&dev_priv->mm.inactive_list) &&
1130 list_empty(&dev_priv->mm.flushing_list) &&
1131 list_empty(&dev_priv->mm.active_list)) {
1132 DRM_ERROR("GTT full, but LRU list empty\n");
1136 ret = i915_gem_evict_something(dev);
1138 DRM_ERROR("Failed to evict a buffer %d\n", ret);
1145 DRM_INFO("Binding object of size %d at 0x%08x\n",
1146 obj->size, obj_priv->gtt_offset);
1148 ret = i915_gem_object_get_page_list(obj);
1150 drm_mm_put_block(obj_priv->gtt_space);
1151 obj_priv->gtt_space = NULL;
1155 page_count = obj->size / PAGE_SIZE;
1156 /* Create an AGP memory structure pointing at our pages, and bind it
1159 obj_priv->agp_mem = drm_agp_bind_pages(dev,
1160 obj_priv->page_list,
1162 obj_priv->gtt_offset,
1163 obj_priv->agp_type);
1164 if (obj_priv->agp_mem == NULL) {
1165 i915_gem_object_free_page_list(obj);
1166 drm_mm_put_block(obj_priv->gtt_space);
1167 obj_priv->gtt_space = NULL;
1170 atomic_inc(&dev->gtt_count);
1171 atomic_add(obj->size, &dev->gtt_memory);
1173 /* Assert that the object is not currently in any GPU domain. As it
1174 * wasn't in the GTT, there shouldn't be any way it could have been in
1177 BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
1178 BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
1184 i915_gem_clflush_object(struct drm_gem_object *obj)
1186 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1188 /* If we don't have a page list set up, then we're not pinned
1189 * to GPU, and we can ignore the cache flush because it'll happen
1190 * again at bind time.
1192 if (obj_priv->page_list == NULL)
1195 drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
1199 * Set the next domain for the specified object. This
1200 * may not actually perform the necessary flushing/invaliding though,
1201 * as that may want to be batched with other set_domain operations
1203 * This is (we hope) the only really tricky part of gem. The goal
1204 * is fairly simple -- track which caches hold bits of the object
1205 * and make sure they remain coherent. A few concrete examples may
1206 * help to explain how it works. For shorthand, we use the notation
1207 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
1208 * a pair of read and write domain masks.
1210 * Case 1: the batch buffer
1216 * 5. Unmapped from GTT
1219 * Let's take these a step at a time
1222 * Pages allocated from the kernel may still have
1223 * cache contents, so we set them to (CPU, CPU) always.
1224 * 2. Written by CPU (using pwrite)
1225 * The pwrite function calls set_domain (CPU, CPU) and
1226 * this function does nothing (as nothing changes)
1228 * This function asserts that the object is not
1229 * currently in any GPU-based read or write domains
1231 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
1232 * As write_domain is zero, this function adds in the
1233 * current read domains (CPU+COMMAND, 0).
1234 * flush_domains is set to CPU.
1235 * invalidate_domains is set to COMMAND
1236 * clflush is run to get data out of the CPU caches
1237 * then i915_dev_set_domain calls i915_gem_flush to
1238 * emit an MI_FLUSH and drm_agp_chipset_flush
1239 * 5. Unmapped from GTT
1240 * i915_gem_object_unbind calls set_domain (CPU, CPU)
1241 * flush_domains and invalidate_domains end up both zero
1242 * so no flushing/invalidating happens
1246 * Case 2: The shared render buffer
1250 * 3. Read/written by GPU
1251 * 4. set_domain to (CPU,CPU)
1252 * 5. Read/written by CPU
1253 * 6. Read/written by GPU
1256 * Same as last example, (CPU, CPU)
1258 * Nothing changes (assertions find that it is not in the GPU)
1259 * 3. Read/written by GPU
1260 * execbuffer calls set_domain (RENDER, RENDER)
1261 * flush_domains gets CPU
1262 * invalidate_domains gets GPU
1264 * MI_FLUSH and drm_agp_chipset_flush
1265 * 4. set_domain (CPU, CPU)
1266 * flush_domains gets GPU
1267 * invalidate_domains gets CPU
1268 * wait_rendering (obj) to make sure all drawing is complete.
1269 * This will include an MI_FLUSH to get the data from GPU
1271 * clflush (obj) to invalidate the CPU cache
1272 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
1273 * 5. Read/written by CPU
1274 * cache lines are loaded and dirtied
1275 * 6. Read written by GPU
1276 * Same as last GPU access
1278 * Case 3: The constant buffer
1283 * 4. Updated (written) by CPU again
1292 * flush_domains = CPU
1293 * invalidate_domains = RENDER
1296 * drm_agp_chipset_flush
1297 * 4. Updated (written) by CPU again
1299 * flush_domains = 0 (no previous write domain)
1300 * invalidate_domains = 0 (no new read domains)
1303 * flush_domains = CPU
1304 * invalidate_domains = RENDER
1307 * drm_agp_chipset_flush
1310 i915_gem_object_set_domain(struct drm_gem_object *obj,
1311 uint32_t read_domains,
1312 uint32_t write_domain)
1314 struct drm_device *dev = obj->dev;
1315 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1316 uint32_t invalidate_domains = 0;
1317 uint32_t flush_domains = 0;
1321 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
1323 obj->read_domains, read_domains,
1324 obj->write_domain, write_domain);
1327 * If the object isn't moving to a new write domain,
1328 * let the object stay in multiple read domains
1330 if (write_domain == 0)
1331 read_domains |= obj->read_domains;
1333 obj_priv->dirty = 1;
1336 * Flush the current write domain if
1337 * the new read domains don't match. Invalidate
1338 * any read domains which differ from the old
1341 if (obj->write_domain && obj->write_domain != read_domains) {
1342 flush_domains |= obj->write_domain;
1343 invalidate_domains |= read_domains & ~obj->write_domain;
1346 * Invalidate any read caches which may have
1347 * stale data. That is, any new read domains.
1349 invalidate_domains |= read_domains & ~obj->read_domains;
1350 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
1352 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
1353 __func__, flush_domains, invalidate_domains);
1356 * If we're invaliding the CPU cache and flushing a GPU cache,
1357 * then pause for rendering so that the GPU caches will be
1358 * flushed before the cpu cache is invalidated
1360 if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
1361 (flush_domains & ~(I915_GEM_DOMAIN_CPU |
1362 I915_GEM_DOMAIN_GTT))) {
1363 ret = i915_gem_object_wait_rendering(obj);
1367 i915_gem_clflush_object(obj);
1370 if ((write_domain | flush_domains) != 0)
1371 obj->write_domain = write_domain;
1373 /* If we're invalidating the CPU domain, clear the per-page CPU
1374 * domain list as well.
1376 if (obj_priv->page_cpu_valid != NULL &&
1377 (write_domain != 0 ||
1378 read_domains & I915_GEM_DOMAIN_CPU)) {
1379 drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
1381 obj_priv->page_cpu_valid = NULL;
1383 obj->read_domains = read_domains;
1385 dev->invalidate_domains |= invalidate_domains;
1386 dev->flush_domains |= flush_domains;
1388 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
1390 obj->read_domains, obj->write_domain,
1391 dev->invalidate_domains, dev->flush_domains);
1397 * Set the read/write domain on a range of the object.
1399 * Currently only implemented for CPU reads, otherwise drops to normal
1400 * i915_gem_object_set_domain().
1403 i915_gem_object_set_domain_range(struct drm_gem_object *obj,
1406 uint32_t read_domains,
1407 uint32_t write_domain)
1409 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1412 if (obj->read_domains & I915_GEM_DOMAIN_CPU)
1415 if (read_domains != I915_GEM_DOMAIN_CPU ||
1417 return i915_gem_object_set_domain(obj,
1418 read_domains, write_domain);
1420 /* Wait on any GPU rendering to the object to be flushed. */
1421 if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) {
1422 ret = i915_gem_object_wait_rendering(obj);
1427 if (obj_priv->page_cpu_valid == NULL) {
1428 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
1432 /* Flush the cache on any pages that are still invalid from the CPU's
1435 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; i++) {
1436 if (obj_priv->page_cpu_valid[i])
1439 drm_clflush_pages(obj_priv->page_list + i, 1);
1441 obj_priv->page_cpu_valid[i] = 1;
1448 * Once all of the objects have been set in the proper domain,
1449 * perform the necessary flush and invalidate operations.
1451 * Returns the write domains flushed, for use in flush tracking.
1454 i915_gem_dev_set_domain(struct drm_device *dev)
1456 uint32_t flush_domains = dev->flush_domains;
1459 * Now that all the buffers are synced to the proper domains,
1460 * flush and invalidate the collected domains
1462 if (dev->invalidate_domains | dev->flush_domains) {
1464 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
1466 dev->invalidate_domains,
1467 dev->flush_domains);
1470 dev->invalidate_domains,
1471 dev->flush_domains);
1472 dev->invalidate_domains = 0;
1473 dev->flush_domains = 0;
1476 return flush_domains;
1480 * Pin an object to the GTT and evaluate the relocations landing in it.
1483 i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
1484 struct drm_file *file_priv,
1485 struct drm_i915_gem_exec_object *entry)
1487 struct drm_device *dev = obj->dev;
1488 struct drm_i915_gem_relocation_entry reloc;
1489 struct drm_i915_gem_relocation_entry __user *relocs;
1490 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1492 uint32_t last_reloc_offset = -1;
1493 void __iomem *reloc_page = NULL;
1495 /* Choose the GTT offset for our buffer and put it there. */
1496 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
1500 entry->offset = obj_priv->gtt_offset;
1502 relocs = (struct drm_i915_gem_relocation_entry __user *)
1503 (uintptr_t) entry->relocs_ptr;
1504 /* Apply the relocations, using the GTT aperture to avoid cache
1505 * flushing requirements.
1507 for (i = 0; i < entry->relocation_count; i++) {
1508 struct drm_gem_object *target_obj;
1509 struct drm_i915_gem_object *target_obj_priv;
1510 uint32_t reloc_val, reloc_offset;
1511 uint32_t __iomem *reloc_entry;
1513 ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
1515 i915_gem_object_unpin(obj);
1519 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
1520 reloc.target_handle);
1521 if (target_obj == NULL) {
1522 i915_gem_object_unpin(obj);
1525 target_obj_priv = target_obj->driver_private;
1527 /* The target buffer should have appeared before us in the
1528 * exec_object list, so it should have a GTT space bound by now.
1530 if (target_obj_priv->gtt_space == NULL) {
1531 DRM_ERROR("No GTT space found for object %d\n",
1532 reloc.target_handle);
1533 drm_gem_object_unreference(target_obj);
1534 i915_gem_object_unpin(obj);
1538 if (reloc.offset > obj->size - 4) {
1539 DRM_ERROR("Relocation beyond object bounds: "
1540 "obj %p target %d offset %d size %d.\n",
1541 obj, reloc.target_handle,
1542 (int) reloc.offset, (int) obj->size);
1543 drm_gem_object_unreference(target_obj);
1544 i915_gem_object_unpin(obj);
1547 if (reloc.offset & 3) {
1548 DRM_ERROR("Relocation not 4-byte aligned: "
1549 "obj %p target %d offset %d.\n",
1550 obj, reloc.target_handle,
1551 (int) reloc.offset);
1552 drm_gem_object_unreference(target_obj);
1553 i915_gem_object_unpin(obj);
1557 if (reloc.write_domain && target_obj->pending_write_domain &&
1558 reloc.write_domain != target_obj->pending_write_domain) {
1559 DRM_ERROR("Write domain conflict: "
1560 "obj %p target %d offset %d "
1561 "new %08x old %08x\n",
1562 obj, reloc.target_handle,
1565 target_obj->pending_write_domain);
1566 drm_gem_object_unreference(target_obj);
1567 i915_gem_object_unpin(obj);
1572 DRM_INFO("%s: obj %p offset %08x target %d "
1573 "read %08x write %08x gtt %08x "
1574 "presumed %08x delta %08x\n",
1578 (int) reloc.target_handle,
1579 (int) reloc.read_domains,
1580 (int) reloc.write_domain,
1581 (int) target_obj_priv->gtt_offset,
1582 (int) reloc.presumed_offset,
1586 target_obj->pending_read_domains |= reloc.read_domains;
1587 target_obj->pending_write_domain |= reloc.write_domain;
1589 /* If the relocation already has the right value in it, no
1590 * more work needs to be done.
1592 if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
1593 drm_gem_object_unreference(target_obj);
1597 /* Now that we're going to actually write some data in,
1598 * make sure that any rendering using this buffer's contents
1601 i915_gem_object_wait_rendering(obj);
1603 /* As we're writing through the gtt, flush
1604 * any CPU writes before we write the relocations
1606 if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
1607 i915_gem_clflush_object(obj);
1608 drm_agp_chipset_flush(dev);
1609 obj->write_domain = 0;
1612 /* Map the page containing the relocation we're going to
1615 reloc_offset = obj_priv->gtt_offset + reloc.offset;
1616 if (reloc_page == NULL ||
1617 (last_reloc_offset & ~(PAGE_SIZE - 1)) !=
1618 (reloc_offset & ~(PAGE_SIZE - 1))) {
1619 if (reloc_page != NULL)
1620 iounmap(reloc_page);
1622 reloc_page = ioremap_wc(dev->agp->base +
1626 last_reloc_offset = reloc_offset;
1627 if (reloc_page == NULL) {
1628 drm_gem_object_unreference(target_obj);
1629 i915_gem_object_unpin(obj);
1634 reloc_entry = (uint32_t __iomem *)(reloc_page +
1635 (reloc_offset & (PAGE_SIZE - 1)));
1636 reloc_val = target_obj_priv->gtt_offset + reloc.delta;
1639 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
1640 obj, (unsigned int) reloc.offset,
1641 readl(reloc_entry), reloc_val);
1643 writel(reloc_val, reloc_entry);
1645 /* Write the updated presumed offset for this entry back out
1648 reloc.presumed_offset = target_obj_priv->gtt_offset;
1649 ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
1651 drm_gem_object_unreference(target_obj);
1652 i915_gem_object_unpin(obj);
1656 drm_gem_object_unreference(target_obj);
1659 if (reloc_page != NULL)
1660 iounmap(reloc_page);
1664 i915_gem_dump_object(obj, 128, __func__, ~0);
1669 /** Dispatch a batchbuffer to the ring
1672 i915_dispatch_gem_execbuffer(struct drm_device *dev,
1673 struct drm_i915_gem_execbuffer *exec,
1674 uint64_t exec_offset)
1676 drm_i915_private_t *dev_priv = dev->dev_private;
1677 struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
1678 (uintptr_t) exec->cliprects_ptr;
1679 int nbox = exec->num_cliprects;
1681 uint32_t exec_start, exec_len;
1684 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
1685 exec_len = (uint32_t) exec->batch_len;
1687 if ((exec_start | exec_len) & 0x7) {
1688 DRM_ERROR("alignment\n");
1695 count = nbox ? nbox : 1;
1697 for (i = 0; i < count; i++) {
1699 int ret = i915_emit_box(dev, boxes, i,
1700 exec->DR1, exec->DR4);
1705 if (IS_I830(dev) || IS_845G(dev)) {
1707 OUT_RING(MI_BATCH_BUFFER);
1708 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
1709 OUT_RING(exec_start + exec_len - 4);
1714 if (IS_I965G(dev)) {
1715 OUT_RING(MI_BATCH_BUFFER_START |
1717 MI_BATCH_NON_SECURE_I965);
1718 OUT_RING(exec_start);
1720 OUT_RING(MI_BATCH_BUFFER_START |
1722 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
1728 /* XXX breadcrumb */
1732 /* Throttle our rendering by waiting until the ring has completed our requests
1733 * emitted over 20 msec ago.
1735 * This should get us reasonable parallelism between CPU and GPU but also
1736 * relatively low latency when blocking on a particular request to finish.
1739 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
1741 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1745 mutex_lock(&dev->struct_mutex);
1746 seqno = i915_file_priv->mm.last_gem_throttle_seqno;
1747 i915_file_priv->mm.last_gem_throttle_seqno =
1748 i915_file_priv->mm.last_gem_seqno;
1750 ret = i915_wait_request(dev, seqno);
1751 mutex_unlock(&dev->struct_mutex);
1756 i915_gem_execbuffer(struct drm_device *dev, void *data,
1757 struct drm_file *file_priv)
1759 drm_i915_private_t *dev_priv = dev->dev_private;
1760 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1761 struct drm_i915_gem_execbuffer *args = data;
1762 struct drm_i915_gem_exec_object *exec_list = NULL;
1763 struct drm_gem_object **object_list = NULL;
1764 struct drm_gem_object *batch_obj;
1765 int ret, i, pinned = 0;
1766 uint64_t exec_offset;
1767 uint32_t seqno, flush_domains;
1770 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
1771 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
1774 if (args->buffer_count < 1) {
1775 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
1778 /* Copy in the exec list from userland */
1779 exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
1781 object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
1783 if (exec_list == NULL || object_list == NULL) {
1784 DRM_ERROR("Failed to allocate exec or object list "
1786 args->buffer_count);
1790 ret = copy_from_user(exec_list,
1791 (struct drm_i915_relocation_entry __user *)
1792 (uintptr_t) args->buffers_ptr,
1793 sizeof(*exec_list) * args->buffer_count);
1795 DRM_ERROR("copy %d exec entries failed %d\n",
1796 args->buffer_count, ret);
1800 mutex_lock(&dev->struct_mutex);
1802 i915_verify_inactive(dev, __FILE__, __LINE__);
1804 if (dev_priv->mm.wedged) {
1805 DRM_ERROR("Execbuf while wedged\n");
1806 mutex_unlock(&dev->struct_mutex);
1810 if (dev_priv->mm.suspended) {
1811 DRM_ERROR("Execbuf while VT-switched.\n");
1812 mutex_unlock(&dev->struct_mutex);
1816 /* Zero the gloabl flush/invalidate flags. These
1817 * will be modified as each object is bound to the
1820 dev->invalidate_domains = 0;
1821 dev->flush_domains = 0;
1823 /* Look up object handles and perform the relocations */
1824 for (i = 0; i < args->buffer_count; i++) {
1825 object_list[i] = drm_gem_object_lookup(dev, file_priv,
1826 exec_list[i].handle);
1827 if (object_list[i] == NULL) {
1828 DRM_ERROR("Invalid object handle %d at index %d\n",
1829 exec_list[i].handle, i);
1834 object_list[i]->pending_read_domains = 0;
1835 object_list[i]->pending_write_domain = 0;
1836 ret = i915_gem_object_pin_and_relocate(object_list[i],
1840 DRM_ERROR("object bind and relocate failed %d\n", ret);
1846 /* Set the pending read domains for the batch buffer to COMMAND */
1847 batch_obj = object_list[args->buffer_count-1];
1848 batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
1849 batch_obj->pending_write_domain = 0;
1851 i915_verify_inactive(dev, __FILE__, __LINE__);
1853 for (i = 0; i < args->buffer_count; i++) {
1854 struct drm_gem_object *obj = object_list[i];
1855 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1857 if (obj_priv->gtt_space == NULL) {
1858 /* We evicted the buffer in the process of validating
1859 * our set of buffers in. We could try to recover by
1860 * kicking them everything out and trying again from
1867 /* make sure all previous memory operations have passed */
1868 ret = i915_gem_object_set_domain(obj,
1869 obj->pending_read_domains,
1870 obj->pending_write_domain);
1875 i915_verify_inactive(dev, __FILE__, __LINE__);
1877 /* Flush/invalidate caches and chipset buffer */
1878 flush_domains = i915_gem_dev_set_domain(dev);
1880 i915_verify_inactive(dev, __FILE__, __LINE__);
1883 for (i = 0; i < args->buffer_count; i++) {
1884 i915_gem_object_check_coherency(object_list[i],
1885 exec_list[i].handle);
1889 exec_offset = exec_list[args->buffer_count - 1].offset;
1892 i915_gem_dump_object(object_list[args->buffer_count - 1],
1898 (void)i915_add_request(dev, flush_domains);
1900 /* Exec the batchbuffer */
1901 ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
1903 DRM_ERROR("dispatch failed %d\n", ret);
1908 * Ensure that the commands in the batch buffer are
1909 * finished before the interrupt fires
1911 flush_domains = i915_retire_commands(dev);
1913 i915_verify_inactive(dev, __FILE__, __LINE__);
1916 * Get a seqno representing the execution of the current buffer,
1917 * which we can wait on. We would like to mitigate these interrupts,
1918 * likely by only creating seqnos occasionally (so that we have
1919 * *some* interrupts representing completion of buffers that we can
1920 * wait on when trying to clear up gtt space).
1922 seqno = i915_add_request(dev, flush_domains);
1924 i915_file_priv->mm.last_gem_seqno = seqno;
1925 for (i = 0; i < args->buffer_count; i++) {
1926 struct drm_gem_object *obj = object_list[i];
1927 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1929 i915_gem_object_move_to_active(obj);
1930 obj_priv->last_rendering_seqno = seqno;
1932 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
1936 i915_dump_lru(dev, __func__);
1939 i915_verify_inactive(dev, __FILE__, __LINE__);
1941 /* Copy the new buffer offsets back to the user's exec list. */
1942 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
1943 (uintptr_t) args->buffers_ptr,
1945 sizeof(*exec_list) * args->buffer_count);
1947 DRM_ERROR("failed to copy %d exec entries "
1948 "back to user (%d)\n",
1949 args->buffer_count, ret);
1951 if (object_list != NULL) {
1952 for (i = 0; i < pinned; i++)
1953 i915_gem_object_unpin(object_list[i]);
1955 for (i = 0; i < args->buffer_count; i++)
1956 drm_gem_object_unreference(object_list[i]);
1958 mutex_unlock(&dev->struct_mutex);
1961 drm_free(object_list, sizeof(*object_list) * args->buffer_count,
1963 drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
1970 i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
1972 struct drm_device *dev = obj->dev;
1973 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1976 i915_verify_inactive(dev, __FILE__, __LINE__);
1977 if (obj_priv->gtt_space == NULL) {
1978 ret = i915_gem_object_bind_to_gtt(obj, alignment);
1980 DRM_ERROR("Failure to bind: %d", ret);
1984 obj_priv->pin_count++;
1986 /* If the object is not active and not pending a flush,
1987 * remove it from the inactive list
1989 if (obj_priv->pin_count == 1) {
1990 atomic_inc(&dev->pin_count);
1991 atomic_add(obj->size, &dev->pin_memory);
1992 if (!obj_priv->active &&
1993 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
1994 I915_GEM_DOMAIN_GTT)) == 0 &&
1995 !list_empty(&obj_priv->list))
1996 list_del_init(&obj_priv->list);
1998 i915_verify_inactive(dev, __FILE__, __LINE__);
2004 i915_gem_object_unpin(struct drm_gem_object *obj)
2006 struct drm_device *dev = obj->dev;
2007 drm_i915_private_t *dev_priv = dev->dev_private;
2008 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2010 i915_verify_inactive(dev, __FILE__, __LINE__);
2011 obj_priv->pin_count--;
2012 BUG_ON(obj_priv->pin_count < 0);
2013 BUG_ON(obj_priv->gtt_space == NULL);
2015 /* If the object is no longer pinned, and is
2016 * neither active nor being flushed, then stick it on
2019 if (obj_priv->pin_count == 0) {
2020 if (!obj_priv->active &&
2021 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
2022 I915_GEM_DOMAIN_GTT)) == 0)
2023 list_move_tail(&obj_priv->list,
2024 &dev_priv->mm.inactive_list);
2025 atomic_dec(&dev->pin_count);
2026 atomic_sub(obj->size, &dev->pin_memory);
2028 i915_verify_inactive(dev, __FILE__, __LINE__);
2032 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
2033 struct drm_file *file_priv)
2035 struct drm_i915_gem_pin *args = data;
2036 struct drm_gem_object *obj;
2037 struct drm_i915_gem_object *obj_priv;
2040 mutex_lock(&dev->struct_mutex);
2042 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2044 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
2046 mutex_unlock(&dev->struct_mutex);
2049 obj_priv = obj->driver_private;
2051 ret = i915_gem_object_pin(obj, args->alignment);
2053 drm_gem_object_unreference(obj);
2054 mutex_unlock(&dev->struct_mutex);
2058 /* XXX - flush the CPU caches for pinned objects
2059 * as the X server doesn't manage domains yet
2061 if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
2062 i915_gem_clflush_object(obj);
2063 drm_agp_chipset_flush(dev);
2064 obj->write_domain = 0;
2066 args->offset = obj_priv->gtt_offset;
2067 drm_gem_object_unreference(obj);
2068 mutex_unlock(&dev->struct_mutex);
2074 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
2075 struct drm_file *file_priv)
2077 struct drm_i915_gem_pin *args = data;
2078 struct drm_gem_object *obj;
2080 mutex_lock(&dev->struct_mutex);
2082 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2084 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
2086 mutex_unlock(&dev->struct_mutex);
2090 i915_gem_object_unpin(obj);
2092 drm_gem_object_unreference(obj);
2093 mutex_unlock(&dev->struct_mutex);
2098 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
2099 struct drm_file *file_priv)
2101 struct drm_i915_gem_busy *args = data;
2102 struct drm_gem_object *obj;
2103 struct drm_i915_gem_object *obj_priv;
2105 mutex_lock(&dev->struct_mutex);
2106 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2108 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
2110 mutex_unlock(&dev->struct_mutex);
2114 obj_priv = obj->driver_private;
2115 args->busy = obj_priv->active;
2117 drm_gem_object_unreference(obj);
2118 mutex_unlock(&dev->struct_mutex);
2123 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
2124 struct drm_file *file_priv)
2126 return i915_gem_ring_throttle(dev, file_priv);
2129 int i915_gem_init_object(struct drm_gem_object *obj)
2131 struct drm_i915_gem_object *obj_priv;
2133 obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
2134 if (obj_priv == NULL)
2138 * We've just allocated pages from the kernel,
2139 * so they've just been written by the CPU with
2140 * zeros. They'll need to be clflushed before we
2141 * use them with the GPU.
2143 obj->write_domain = I915_GEM_DOMAIN_CPU;
2144 obj->read_domains = I915_GEM_DOMAIN_CPU;
2146 obj_priv->agp_type = AGP_USER_MEMORY;
2148 obj->driver_private = obj_priv;
2149 obj_priv->obj = obj;
2150 INIT_LIST_HEAD(&obj_priv->list);
2154 void i915_gem_free_object(struct drm_gem_object *obj)
2156 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2158 while (obj_priv->pin_count > 0)
2159 i915_gem_object_unpin(obj);
2161 i915_gem_object_unbind(obj);
2163 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
2164 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
2168 i915_gem_set_domain(struct drm_gem_object *obj,
2169 struct drm_file *file_priv,
2170 uint32_t read_domains,
2171 uint32_t write_domain)
2173 struct drm_device *dev = obj->dev;
2175 uint32_t flush_domains;
2177 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
2179 ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
2182 flush_domains = i915_gem_dev_set_domain(obj->dev);
2184 if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
2185 (void) i915_add_request(dev, flush_domains);
2190 /** Unbinds all objects that are on the given buffer list. */
2192 i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
2194 struct drm_gem_object *obj;
2195 struct drm_i915_gem_object *obj_priv;
2198 while (!list_empty(head)) {
2199 obj_priv = list_first_entry(head,
2200 struct drm_i915_gem_object,
2202 obj = obj_priv->obj;
2204 if (obj_priv->pin_count != 0) {
2205 DRM_ERROR("Pinned object in unbind list\n");
2206 mutex_unlock(&dev->struct_mutex);
2210 ret = i915_gem_object_unbind(obj);
2212 DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
2214 mutex_unlock(&dev->struct_mutex);
2224 i915_gem_idle(struct drm_device *dev)
2226 drm_i915_private_t *dev_priv = dev->dev_private;
2227 uint32_t seqno, cur_seqno, last_seqno;
2230 if (dev_priv->mm.suspended)
2233 /* Hack! Don't let anybody do execbuf while we don't control the chip.
2234 * We need to replace this with a semaphore, or something.
2236 dev_priv->mm.suspended = 1;
2238 i915_kernel_lost_context(dev);
2240 /* Flush the GPU along with all non-CPU write domains
2242 i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
2243 ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
2244 seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU |
2245 I915_GEM_DOMAIN_GTT));
2248 mutex_unlock(&dev->struct_mutex);
2252 dev_priv->mm.waiting_gem_seqno = seqno;
2256 cur_seqno = i915_get_gem_seqno(dev);
2257 if (i915_seqno_passed(cur_seqno, seqno))
2259 if (last_seqno == cur_seqno) {
2260 if (stuck++ > 100) {
2261 DRM_ERROR("hardware wedged\n");
2262 dev_priv->mm.wedged = 1;
2263 DRM_WAKEUP(&dev_priv->irq_queue);
2268 last_seqno = cur_seqno;
2270 dev_priv->mm.waiting_gem_seqno = 0;
2272 i915_gem_retire_requests(dev);
2274 /* Active and flushing should now be empty as we've
2275 * waited for a sequence higher than any pending execbuffer
2277 BUG_ON(!list_empty(&dev_priv->mm.active_list));
2278 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2280 /* Request should now be empty as we've also waited
2281 * for the last request in the list
2283 BUG_ON(!list_empty(&dev_priv->mm.request_list));
2285 /* Move all buffers out of the GTT. */
2286 ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
2290 BUG_ON(!list_empty(&dev_priv->mm.active_list));
2291 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2292 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
2293 BUG_ON(!list_empty(&dev_priv->mm.request_list));
2298 i915_gem_init_hws(struct drm_device *dev)
2300 drm_i915_private_t *dev_priv = dev->dev_private;
2301 struct drm_gem_object *obj;
2302 struct drm_i915_gem_object *obj_priv;
2305 /* If we need a physical address for the status page, it's already
2306 * initialized at driver load time.
2308 if (!I915_NEED_GFX_HWS(dev))
2311 obj = drm_gem_object_alloc(dev, 4096);
2313 DRM_ERROR("Failed to allocate status page\n");
2316 obj_priv = obj->driver_private;
2317 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
2319 ret = i915_gem_object_pin(obj, 4096);
2321 drm_gem_object_unreference(obj);
2325 dev_priv->status_gfx_addr = obj_priv->gtt_offset;
2327 dev_priv->hw_status_page = kmap(obj_priv->page_list[0]);
2328 if (dev_priv->hw_status_page == NULL) {
2329 DRM_ERROR("Failed to map status page.\n");
2330 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
2331 drm_gem_object_unreference(obj);
2334 dev_priv->hws_obj = obj;
2335 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
2336 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
2337 I915_READ(HWS_PGA); /* posting read */
2338 DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
2344 i915_gem_init_ringbuffer(struct drm_device *dev)
2346 drm_i915_private_t *dev_priv = dev->dev_private;
2347 struct drm_gem_object *obj;
2348 struct drm_i915_gem_object *obj_priv;
2352 ret = i915_gem_init_hws(dev);
2356 obj = drm_gem_object_alloc(dev, 128 * 1024);
2358 DRM_ERROR("Failed to allocate ringbuffer\n");
2361 obj_priv = obj->driver_private;
2363 ret = i915_gem_object_pin(obj, 4096);
2365 drm_gem_object_unreference(obj);
2369 /* Set up the kernel mapping for the ring. */
2370 dev_priv->ring.Size = obj->size;
2371 dev_priv->ring.tail_mask = obj->size - 1;
2373 dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset;
2374 dev_priv->ring.map.size = obj->size;
2375 dev_priv->ring.map.type = 0;
2376 dev_priv->ring.map.flags = 0;
2377 dev_priv->ring.map.mtrr = 0;
2379 drm_core_ioremap_wc(&dev_priv->ring.map, dev);
2380 if (dev_priv->ring.map.handle == NULL) {
2381 DRM_ERROR("Failed to map ringbuffer.\n");
2382 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
2383 drm_gem_object_unreference(obj);
2386 dev_priv->ring.ring_obj = obj;
2387 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
2389 /* Stop the ring if it's running. */
2390 I915_WRITE(PRB0_CTL, 0);
2391 I915_WRITE(PRB0_TAIL, 0);
2392 I915_WRITE(PRB0_HEAD, 0);
2394 /* Initialize the ring. */
2395 I915_WRITE(PRB0_START, obj_priv->gtt_offset);
2396 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
2398 /* G45 ring initialization fails to reset head to zero */
2400 DRM_ERROR("Ring head not reset to zero "
2401 "ctl %08x head %08x tail %08x start %08x\n",
2402 I915_READ(PRB0_CTL),
2403 I915_READ(PRB0_HEAD),
2404 I915_READ(PRB0_TAIL),
2405 I915_READ(PRB0_START));
2406 I915_WRITE(PRB0_HEAD, 0);
2408 DRM_ERROR("Ring head forced to zero "
2409 "ctl %08x head %08x tail %08x start %08x\n",
2410 I915_READ(PRB0_CTL),
2411 I915_READ(PRB0_HEAD),
2412 I915_READ(PRB0_TAIL),
2413 I915_READ(PRB0_START));
2416 I915_WRITE(PRB0_CTL,
2417 ((obj->size - 4096) & RING_NR_PAGES) |
2421 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
2423 /* If the head is still not zero, the ring is dead */
2425 DRM_ERROR("Ring initialization failed "
2426 "ctl %08x head %08x tail %08x start %08x\n",
2427 I915_READ(PRB0_CTL),
2428 I915_READ(PRB0_HEAD),
2429 I915_READ(PRB0_TAIL),
2430 I915_READ(PRB0_START));
2434 /* Update our cache of the ring state */
2435 i915_kernel_lost_context(dev);
2441 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
2443 drm_i915_private_t *dev_priv = dev->dev_private;
2445 if (dev_priv->ring.ring_obj == NULL)
2448 drm_core_ioremapfree(&dev_priv->ring.map, dev);
2450 i915_gem_object_unpin(dev_priv->ring.ring_obj);
2451 drm_gem_object_unreference(dev_priv->ring.ring_obj);
2452 dev_priv->ring.ring_obj = NULL;
2453 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
2455 if (dev_priv->hws_obj != NULL) {
2456 struct drm_gem_object *obj = dev_priv->hws_obj;
2457 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2459 kunmap(obj_priv->page_list[0]);
2460 i915_gem_object_unpin(obj);
2461 drm_gem_object_unreference(obj);
2462 dev_priv->hws_obj = NULL;
2463 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
2464 dev_priv->hw_status_page = NULL;
2466 /* Write high address into HWS_PGA when disabling. */
2467 I915_WRITE(HWS_PGA, 0x1ffff000);
2472 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
2473 struct drm_file *file_priv)
2475 drm_i915_private_t *dev_priv = dev->dev_private;
2478 if (dev_priv->mm.wedged) {
2479 DRM_ERROR("Reenabling wedged hardware, good luck\n");
2480 dev_priv->mm.wedged = 0;
2483 ret = i915_gem_init_ringbuffer(dev);
2487 mutex_lock(&dev->struct_mutex);
2488 BUG_ON(!list_empty(&dev_priv->mm.active_list));
2489 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2490 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
2491 BUG_ON(!list_empty(&dev_priv->mm.request_list));
2492 dev_priv->mm.suspended = 0;
2493 mutex_unlock(&dev->struct_mutex);
2495 drm_irq_install(dev);
2501 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
2502 struct drm_file *file_priv)
2506 mutex_lock(&dev->struct_mutex);
2507 ret = i915_gem_idle(dev);
2509 i915_gem_cleanup_ringbuffer(dev);
2510 mutex_unlock(&dev->struct_mutex);
2512 drm_irq_uninstall(dev);
2518 i915_gem_lastclose(struct drm_device *dev)
2521 drm_i915_private_t *dev_priv = dev->dev_private;
2523 mutex_lock(&dev->struct_mutex);
2525 if (dev_priv->ring.ring_obj != NULL) {
2526 ret = i915_gem_idle(dev);
2528 DRM_ERROR("failed to idle hardware: %d\n", ret);
2530 i915_gem_cleanup_ringbuffer(dev);
2533 mutex_unlock(&dev->struct_mutex);
2537 i915_gem_load(struct drm_device *dev)
2539 drm_i915_private_t *dev_priv = dev->dev_private;
2541 INIT_LIST_HEAD(&dev_priv->mm.active_list);
2542 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
2543 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
2544 INIT_LIST_HEAD(&dev_priv->mm.request_list);
2545 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
2546 i915_gem_retire_work_handler);
2547 INIT_WORK(&dev_priv->mm.vblank_work,
2548 i915_gem_vblank_work_handler);
2549 dev_priv->mm.next_gem_seqno = 1;
2551 i915_gem_detect_bit_6_swizzle(dev);