2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
32 #include <linux/swap.h>
34 #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
37 i915_gem_object_set_domain(struct drm_gem_object *obj,
38 uint32_t read_domains,
39 uint32_t write_domain);
41 i915_gem_object_set_domain_range(struct drm_gem_object *obj,
44 uint32_t read_domains,
45 uint32_t write_domain);
47 i915_gem_set_domain(struct drm_gem_object *obj,
48 struct drm_file *file_priv,
49 uint32_t read_domains,
50 uint32_t write_domain);
51 static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
52 static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
53 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
56 i915_gem_cleanup_ringbuffer(struct drm_device *dev);
59 i915_gem_init_ioctl(struct drm_device *dev, void *data,
60 struct drm_file *file_priv)
62 drm_i915_private_t *dev_priv = dev->dev_private;
63 struct drm_i915_gem_init *args = data;
65 mutex_lock(&dev->struct_mutex);
67 if (args->gtt_start >= args->gtt_end ||
68 (args->gtt_start & (PAGE_SIZE - 1)) != 0 ||
69 (args->gtt_end & (PAGE_SIZE - 1)) != 0) {
70 mutex_unlock(&dev->struct_mutex);
74 drm_mm_init(&dev_priv->mm.gtt_space, args->gtt_start,
75 args->gtt_end - args->gtt_start);
77 dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start);
79 mutex_unlock(&dev->struct_mutex);
85 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
86 struct drm_file *file_priv)
88 struct drm_i915_gem_get_aperture *args = data;
90 if (!(dev->driver->driver_features & DRIVER_GEM))
93 args->aper_size = dev->gtt_total;
94 args->aper_available_size = (args->aper_size -
95 atomic_read(&dev->pin_memory));
102 * Creates a new mm object and returns a handle to it.
105 i915_gem_create_ioctl(struct drm_device *dev, void *data,
106 struct drm_file *file_priv)
108 struct drm_i915_gem_create *args = data;
109 struct drm_gem_object *obj;
112 args->size = roundup(args->size, PAGE_SIZE);
114 /* Allocate the new object */
115 obj = drm_gem_object_alloc(dev, args->size);
119 ret = drm_gem_handle_create(file_priv, obj, &handle);
120 mutex_lock(&dev->struct_mutex);
121 drm_gem_object_handle_unreference(obj);
122 mutex_unlock(&dev->struct_mutex);
127 args->handle = handle;
133 * Reads data from the object referenced by handle.
135 * On error, the contents of *data are undefined.
138 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
139 struct drm_file *file_priv)
141 struct drm_i915_gem_pread *args = data;
142 struct drm_gem_object *obj;
143 struct drm_i915_gem_object *obj_priv;
148 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
151 obj_priv = obj->driver_private;
153 /* Bounds check source.
155 * XXX: This could use review for overflow issues...
157 if (args->offset > obj->size || args->size > obj->size ||
158 args->offset + args->size > obj->size) {
159 drm_gem_object_unreference(obj);
163 mutex_lock(&dev->struct_mutex);
165 ret = i915_gem_object_set_domain_range(obj, args->offset, args->size,
166 I915_GEM_DOMAIN_CPU, 0);
168 drm_gem_object_unreference(obj);
169 mutex_unlock(&dev->struct_mutex);
173 offset = args->offset;
175 read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
176 args->size, &offset);
177 if (read != args->size) {
178 drm_gem_object_unreference(obj);
179 mutex_unlock(&dev->struct_mutex);
186 drm_gem_object_unreference(obj);
187 mutex_unlock(&dev->struct_mutex);
192 /* This is the fast write path which cannot handle
193 * page faults in the source data
197 fast_user_write(struct io_mapping *mapping,
198 loff_t page_base, int page_offset,
199 char __user *user_data,
203 unsigned long unwritten;
205 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
206 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
208 io_mapping_unmap_atomic(vaddr_atomic);
214 /* Here's the write path which can sleep for
219 slow_user_write(struct io_mapping *mapping,
220 loff_t page_base, int page_offset,
221 char __user *user_data,
225 unsigned long unwritten;
227 vaddr = io_mapping_map_wc(mapping, page_base);
230 unwritten = __copy_from_user(vaddr + page_offset,
232 io_mapping_unmap(vaddr);
239 i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
240 struct drm_i915_gem_pwrite *args,
241 struct drm_file *file_priv)
243 struct drm_i915_gem_object *obj_priv = obj->driver_private;
244 drm_i915_private_t *dev_priv = dev->dev_private;
246 loff_t offset, page_base;
247 char __user *user_data;
248 int page_offset, page_length;
251 user_data = (char __user *) (uintptr_t) args->data_ptr;
253 if (!access_ok(VERIFY_READ, user_data, remain))
257 mutex_lock(&dev->struct_mutex);
258 ret = i915_gem_object_pin(obj, 0);
260 mutex_unlock(&dev->struct_mutex);
263 ret = i915_gem_set_domain(obj, file_priv,
264 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
268 obj_priv = obj->driver_private;
269 offset = obj_priv->gtt_offset + args->offset;
273 /* Operation in this page
275 * page_base = page offset within aperture
276 * page_offset = offset within page
277 * page_length = bytes to copy for this page
279 page_base = (offset & ~(PAGE_SIZE-1));
280 page_offset = offset & (PAGE_SIZE-1);
281 page_length = remain;
282 if ((page_offset + remain) > PAGE_SIZE)
283 page_length = PAGE_SIZE - page_offset;
285 ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
286 page_offset, user_data, page_length);
288 /* If we get a fault while copying data, then (presumably) our
289 * source page isn't available. In this case, use the
290 * non-atomic function
293 ret = slow_user_write (dev_priv->mm.gtt_mapping,
294 page_base, page_offset,
295 user_data, page_length);
300 remain -= page_length;
301 user_data += page_length;
302 offset += page_length;
306 i915_gem_object_unpin(obj);
307 mutex_unlock(&dev->struct_mutex);
313 i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
314 struct drm_i915_gem_pwrite *args,
315 struct drm_file *file_priv)
321 mutex_lock(&dev->struct_mutex);
323 ret = i915_gem_set_domain(obj, file_priv,
324 I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
326 mutex_unlock(&dev->struct_mutex);
330 offset = args->offset;
332 written = vfs_write(obj->filp,
333 (char __user *)(uintptr_t) args->data_ptr,
334 args->size, &offset);
335 if (written != args->size) {
336 mutex_unlock(&dev->struct_mutex);
343 mutex_unlock(&dev->struct_mutex);
349 * Writes data to the object referenced by handle.
351 * On error, the contents of the buffer that were to be modified are undefined.
354 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
355 struct drm_file *file_priv)
357 struct drm_i915_gem_pwrite *args = data;
358 struct drm_gem_object *obj;
359 struct drm_i915_gem_object *obj_priv;
362 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
365 obj_priv = obj->driver_private;
367 /* Bounds check destination.
369 * XXX: This could use review for overflow issues...
371 if (args->offset > obj->size || args->size > obj->size ||
372 args->offset + args->size > obj->size) {
373 drm_gem_object_unreference(obj);
377 /* We can only do the GTT pwrite on untiled buffers, as otherwise
378 * it would end up going through the fenced access, and we'll get
379 * different detiling behavior between reading and writing.
380 * pread/pwrite currently are reading and writing from the CPU
381 * perspective, requiring manual detiling by the client.
383 if (obj_priv->tiling_mode == I915_TILING_NONE &&
385 ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
387 ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
391 DRM_INFO("pwrite failed %d\n", ret);
394 drm_gem_object_unreference(obj);
400 * Called when user space prepares to use an object
403 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
404 struct drm_file *file_priv)
406 struct drm_i915_gem_set_domain *args = data;
407 struct drm_gem_object *obj;
410 if (!(dev->driver->driver_features & DRIVER_GEM))
413 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
417 mutex_lock(&dev->struct_mutex);
419 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
420 obj, obj->size, args->read_domains, args->write_domain);
422 ret = i915_gem_set_domain(obj, file_priv,
423 args->read_domains, args->write_domain);
424 drm_gem_object_unreference(obj);
425 mutex_unlock(&dev->struct_mutex);
430 * Called when user space has done writes to this buffer
433 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
434 struct drm_file *file_priv)
436 struct drm_i915_gem_sw_finish *args = data;
437 struct drm_gem_object *obj;
438 struct drm_i915_gem_object *obj_priv;
441 if (!(dev->driver->driver_features & DRIVER_GEM))
444 mutex_lock(&dev->struct_mutex);
445 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
447 mutex_unlock(&dev->struct_mutex);
452 DRM_INFO("%s: sw_finish %d (%p %d)\n",
453 __func__, args->handle, obj, obj->size);
455 obj_priv = obj->driver_private;
457 /* Pinned buffers may be scanout, so flush the cache */
458 if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) {
459 i915_gem_clflush_object(obj);
460 drm_agp_chipset_flush(dev);
462 drm_gem_object_unreference(obj);
463 mutex_unlock(&dev->struct_mutex);
468 * Maps the contents of an object, returning the address it is mapped
471 * While the mapping holds a reference on the contents of the object, it doesn't
472 * imply a ref on the object itself.
475 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
476 struct drm_file *file_priv)
478 struct drm_i915_gem_mmap *args = data;
479 struct drm_gem_object *obj;
483 if (!(dev->driver->driver_features & DRIVER_GEM))
486 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
490 offset = args->offset;
492 down_write(¤t->mm->mmap_sem);
493 addr = do_mmap(obj->filp, 0, args->size,
494 PROT_READ | PROT_WRITE, MAP_SHARED,
496 up_write(¤t->mm->mmap_sem);
497 mutex_lock(&dev->struct_mutex);
498 drm_gem_object_unreference(obj);
499 mutex_unlock(&dev->struct_mutex);
500 if (IS_ERR((void *)addr))
503 args->addr_ptr = (uint64_t) addr;
509 i915_gem_object_free_page_list(struct drm_gem_object *obj)
511 struct drm_i915_gem_object *obj_priv = obj->driver_private;
512 int page_count = obj->size / PAGE_SIZE;
515 if (obj_priv->page_list == NULL)
519 for (i = 0; i < page_count; i++)
520 if (obj_priv->page_list[i] != NULL) {
522 set_page_dirty(obj_priv->page_list[i]);
523 mark_page_accessed(obj_priv->page_list[i]);
524 page_cache_release(obj_priv->page_list[i]);
528 drm_free(obj_priv->page_list,
529 page_count * sizeof(struct page *),
531 obj_priv->page_list = NULL;
535 i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
537 struct drm_device *dev = obj->dev;
538 drm_i915_private_t *dev_priv = dev->dev_private;
539 struct drm_i915_gem_object *obj_priv = obj->driver_private;
541 /* Add a reference if we're newly entering the active list. */
542 if (!obj_priv->active) {
543 drm_gem_object_reference(obj);
544 obj_priv->active = 1;
546 /* Move from whatever list we were on to the tail of execution. */
547 list_move_tail(&obj_priv->list,
548 &dev_priv->mm.active_list);
549 obj_priv->last_rendering_seqno = seqno;
553 i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
555 struct drm_device *dev = obj->dev;
556 drm_i915_private_t *dev_priv = dev->dev_private;
557 struct drm_i915_gem_object *obj_priv = obj->driver_private;
559 BUG_ON(!obj_priv->active);
560 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
561 obj_priv->last_rendering_seqno = 0;
565 i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
567 struct drm_device *dev = obj->dev;
568 drm_i915_private_t *dev_priv = dev->dev_private;
569 struct drm_i915_gem_object *obj_priv = obj->driver_private;
571 i915_verify_inactive(dev, __FILE__, __LINE__);
572 if (obj_priv->pin_count != 0)
573 list_del_init(&obj_priv->list);
575 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
577 obj_priv->last_rendering_seqno = 0;
578 if (obj_priv->active) {
579 obj_priv->active = 0;
580 drm_gem_object_unreference(obj);
582 i915_verify_inactive(dev, __FILE__, __LINE__);
586 * Creates a new sequence number, emitting a write of it to the status page
587 * plus an interrupt, which will trigger i915_user_interrupt_handler.
589 * Must be called with struct_lock held.
591 * Returned sequence numbers are nonzero on success.
594 i915_add_request(struct drm_device *dev, uint32_t flush_domains)
596 drm_i915_private_t *dev_priv = dev->dev_private;
597 struct drm_i915_gem_request *request;
602 request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
606 /* Grab the seqno we're going to make this request be, and bump the
607 * next (skipping 0 so it can be the reserved no-seqno value).
609 seqno = dev_priv->mm.next_gem_seqno;
610 dev_priv->mm.next_gem_seqno++;
611 if (dev_priv->mm.next_gem_seqno == 0)
612 dev_priv->mm.next_gem_seqno++;
615 OUT_RING(MI_STORE_DWORD_INDEX);
616 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
619 OUT_RING(MI_USER_INTERRUPT);
622 DRM_DEBUG("%d\n", seqno);
624 request->seqno = seqno;
625 request->emitted_jiffies = jiffies;
626 was_empty = list_empty(&dev_priv->mm.request_list);
627 list_add_tail(&request->list, &dev_priv->mm.request_list);
629 /* Associate any objects on the flushing list matching the write
630 * domain we're flushing with our flush.
632 if (flush_domains != 0) {
633 struct drm_i915_gem_object *obj_priv, *next;
635 list_for_each_entry_safe(obj_priv, next,
636 &dev_priv->mm.flushing_list, list) {
637 struct drm_gem_object *obj = obj_priv->obj;
639 if ((obj->write_domain & flush_domains) ==
641 obj->write_domain = 0;
642 i915_gem_object_move_to_active(obj, seqno);
648 if (was_empty && !dev_priv->mm.suspended)
649 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
654 * Command execution barrier
656 * Ensures that all commands in the ring are finished
657 * before signalling the CPU
660 i915_retire_commands(struct drm_device *dev)
662 drm_i915_private_t *dev_priv = dev->dev_private;
663 uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
664 uint32_t flush_domains = 0;
667 /* The sampler always gets flushed on i965 (sigh) */
669 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
672 OUT_RING(0); /* noop */
674 return flush_domains;
678 * Moves buffers associated only with the given active seqno from the active
679 * to inactive list, potentially freeing them.
682 i915_gem_retire_request(struct drm_device *dev,
683 struct drm_i915_gem_request *request)
685 drm_i915_private_t *dev_priv = dev->dev_private;
687 /* Move any buffers on the active list that are no longer referenced
688 * by the ringbuffer to the flushing/inactive lists as appropriate.
690 while (!list_empty(&dev_priv->mm.active_list)) {
691 struct drm_gem_object *obj;
692 struct drm_i915_gem_object *obj_priv;
694 obj_priv = list_first_entry(&dev_priv->mm.active_list,
695 struct drm_i915_gem_object,
699 /* If the seqno being retired doesn't match the oldest in the
700 * list, then the oldest in the list must still be newer than
703 if (obj_priv->last_rendering_seqno != request->seqno)
706 DRM_INFO("%s: retire %d moves to inactive list %p\n",
707 __func__, request->seqno, obj);
710 if (obj->write_domain != 0)
711 i915_gem_object_move_to_flushing(obj);
713 i915_gem_object_move_to_inactive(obj);
718 * Returns true if seq1 is later than seq2.
721 i915_seqno_passed(uint32_t seq1, uint32_t seq2)
723 return (int32_t)(seq1 - seq2) >= 0;
727 i915_get_gem_seqno(struct drm_device *dev)
729 drm_i915_private_t *dev_priv = dev->dev_private;
731 return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
735 * This function clears the request list as sequence numbers are passed.
738 i915_gem_retire_requests(struct drm_device *dev)
740 drm_i915_private_t *dev_priv = dev->dev_private;
743 seqno = i915_get_gem_seqno(dev);
745 while (!list_empty(&dev_priv->mm.request_list)) {
746 struct drm_i915_gem_request *request;
747 uint32_t retiring_seqno;
749 request = list_first_entry(&dev_priv->mm.request_list,
750 struct drm_i915_gem_request,
752 retiring_seqno = request->seqno;
754 if (i915_seqno_passed(seqno, retiring_seqno) ||
755 dev_priv->mm.wedged) {
756 i915_gem_retire_request(dev, request);
758 list_del(&request->list);
759 drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
766 i915_gem_retire_work_handler(struct work_struct *work)
768 drm_i915_private_t *dev_priv;
769 struct drm_device *dev;
771 dev_priv = container_of(work, drm_i915_private_t,
772 mm.retire_work.work);
775 mutex_lock(&dev->struct_mutex);
776 i915_gem_retire_requests(dev);
777 if (!dev_priv->mm.suspended &&
778 !list_empty(&dev_priv->mm.request_list))
779 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
780 mutex_unlock(&dev->struct_mutex);
784 * Waits for a sequence number to be signaled, and cleans up the
785 * request and object lists appropriately for that event.
788 i915_wait_request(struct drm_device *dev, uint32_t seqno)
790 drm_i915_private_t *dev_priv = dev->dev_private;
795 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
796 dev_priv->mm.waiting_gem_seqno = seqno;
797 i915_user_irq_get(dev);
798 ret = wait_event_interruptible(dev_priv->irq_queue,
799 i915_seqno_passed(i915_get_gem_seqno(dev),
801 dev_priv->mm.wedged);
802 i915_user_irq_put(dev);
803 dev_priv->mm.waiting_gem_seqno = 0;
805 if (dev_priv->mm.wedged)
808 if (ret && ret != -ERESTARTSYS)
809 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
810 __func__, ret, seqno, i915_get_gem_seqno(dev));
812 /* Directly dispatch request retiring. While we have the work queue
813 * to handle this, the waiter on a request often wants an associated
814 * buffer to have made it to the inactive list, and we would need
815 * a separate wait queue to handle that.
818 i915_gem_retire_requests(dev);
824 i915_gem_flush(struct drm_device *dev,
825 uint32_t invalidate_domains,
826 uint32_t flush_domains)
828 drm_i915_private_t *dev_priv = dev->dev_private;
833 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
834 invalidate_domains, flush_domains);
837 if (flush_domains & I915_GEM_DOMAIN_CPU)
838 drm_agp_chipset_flush(dev);
840 if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
841 I915_GEM_DOMAIN_GTT)) {
845 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
846 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
847 * also flushed at 2d versus 3d pipeline switches.
851 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
852 * MI_READ_FLUSH is set, and is always flushed on 965.
854 * I915_GEM_DOMAIN_COMMAND may not exist?
856 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
857 * invalidated when MI_EXE_FLUSH is set.
859 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
860 * invalidated with every MI_FLUSH.
864 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
865 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
866 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
867 * are flushed at any MI_FLUSH.
870 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
871 if ((invalidate_domains|flush_domains) &
872 I915_GEM_DOMAIN_RENDER)
873 cmd &= ~MI_NO_WRITE_FLUSH;
874 if (!IS_I965G(dev)) {
876 * On the 965, the sampler cache always gets flushed
877 * and this bit is reserved.
879 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
880 cmd |= MI_READ_FLUSH;
882 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
886 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
890 OUT_RING(0); /* noop */
896 * Ensures that all rendering to the object has completed and the object is
897 * safe to unbind from the GTT or access from the CPU.
900 i915_gem_object_wait_rendering(struct drm_gem_object *obj)
902 struct drm_device *dev = obj->dev;
903 struct drm_i915_gem_object *obj_priv = obj->driver_private;
906 /* If there are writes queued to the buffer, flush and
907 * create a new seqno to wait for.
909 if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) {
910 uint32_t seqno, write_domain = obj->write_domain;
912 DRM_INFO("%s: flushing object %p from write domain %08x\n",
913 __func__, obj, write_domain);
915 i915_gem_flush(dev, 0, write_domain);
917 seqno = i915_add_request(dev, write_domain);
918 i915_gem_object_move_to_active(obj, seqno);
920 DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
924 /* If there is rendering queued on the buffer being evicted, wait for
927 if (obj_priv->active) {
929 DRM_INFO("%s: object %p wait for seqno %08x\n",
930 __func__, obj, obj_priv->last_rendering_seqno);
932 ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
941 * Unbinds an object from the GTT aperture.
944 i915_gem_object_unbind(struct drm_gem_object *obj)
946 struct drm_device *dev = obj->dev;
947 struct drm_i915_gem_object *obj_priv = obj->driver_private;
951 DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
952 DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
954 if (obj_priv->gtt_space == NULL)
957 if (obj_priv->pin_count != 0) {
958 DRM_ERROR("Attempting to unbind pinned buffer\n");
962 /* Wait for any rendering to complete
964 ret = i915_gem_object_wait_rendering(obj);
966 DRM_ERROR("wait_rendering failed: %d\n", ret);
970 /* Move the object to the CPU domain to ensure that
971 * any possible CPU writes while it's not in the GTT
972 * are flushed when we go to remap it. This will
973 * also ensure that all pending GPU writes are finished
976 ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU,
977 I915_GEM_DOMAIN_CPU);
979 DRM_ERROR("set_domain failed: %d\n", ret);
983 if (obj_priv->agp_mem != NULL) {
984 drm_unbind_agp(obj_priv->agp_mem);
985 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
986 obj_priv->agp_mem = NULL;
989 BUG_ON(obj_priv->active);
991 i915_gem_object_free_page_list(obj);
993 if (obj_priv->gtt_space) {
994 atomic_dec(&dev->gtt_count);
995 atomic_sub(obj->size, &dev->gtt_memory);
997 drm_mm_put_block(obj_priv->gtt_space);
998 obj_priv->gtt_space = NULL;
1001 /* Remove ourselves from the LRU list if present. */
1002 if (!list_empty(&obj_priv->list))
1003 list_del_init(&obj_priv->list);
1009 i915_gem_evict_something(struct drm_device *dev)
1011 drm_i915_private_t *dev_priv = dev->dev_private;
1012 struct drm_gem_object *obj;
1013 struct drm_i915_gem_object *obj_priv;
1017 /* If there's an inactive buffer available now, grab it
1020 if (!list_empty(&dev_priv->mm.inactive_list)) {
1021 obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
1022 struct drm_i915_gem_object,
1024 obj = obj_priv->obj;
1025 BUG_ON(obj_priv->pin_count != 0);
1027 DRM_INFO("%s: evicting %p\n", __func__, obj);
1029 BUG_ON(obj_priv->active);
1031 /* Wait on the rendering and unbind the buffer. */
1032 ret = i915_gem_object_unbind(obj);
1036 /* If we didn't get anything, but the ring is still processing
1037 * things, wait for one of those things to finish and hopefully
1038 * leave us a buffer to evict.
1040 if (!list_empty(&dev_priv->mm.request_list)) {
1041 struct drm_i915_gem_request *request;
1043 request = list_first_entry(&dev_priv->mm.request_list,
1044 struct drm_i915_gem_request,
1047 ret = i915_wait_request(dev, request->seqno);
1051 /* if waiting caused an object to become inactive,
1052 * then loop around and wait for it. Otherwise, we
1053 * assume that waiting freed and unbound something,
1054 * so there should now be some space in the GTT
1056 if (!list_empty(&dev_priv->mm.inactive_list))
1061 /* If we didn't have anything on the request list but there
1062 * are buffers awaiting a flush, emit one and try again.
1063 * When we wait on it, those buffers waiting for that flush
1064 * will get moved to inactive.
1066 if (!list_empty(&dev_priv->mm.flushing_list)) {
1067 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
1068 struct drm_i915_gem_object,
1070 obj = obj_priv->obj;
1075 i915_add_request(dev, obj->write_domain);
1081 DRM_ERROR("inactive empty %d request empty %d "
1082 "flushing empty %d\n",
1083 list_empty(&dev_priv->mm.inactive_list),
1084 list_empty(&dev_priv->mm.request_list),
1085 list_empty(&dev_priv->mm.flushing_list));
1086 /* If we didn't do any of the above, there's nothing to be done
1087 * and we just can't fit it in.
1095 i915_gem_object_get_page_list(struct drm_gem_object *obj)
1097 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1099 struct address_space *mapping;
1100 struct inode *inode;
1104 if (obj_priv->page_list)
1107 /* Get the list of pages out of our struct file. They'll be pinned
1108 * at this point until we release them.
1110 page_count = obj->size / PAGE_SIZE;
1111 BUG_ON(obj_priv->page_list != NULL);
1112 obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
1114 if (obj_priv->page_list == NULL) {
1115 DRM_ERROR("Faled to allocate page list\n");
1119 inode = obj->filp->f_path.dentry->d_inode;
1120 mapping = inode->i_mapping;
1121 for (i = 0; i < page_count; i++) {
1122 page = read_mapping_page(mapping, i, NULL);
1124 ret = PTR_ERR(page);
1125 DRM_ERROR("read_mapping_page failed: %d\n", ret);
1126 i915_gem_object_free_page_list(obj);
1129 obj_priv->page_list[i] = page;
1135 * Finds free space in the GTT aperture and binds the object there.
1138 i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
1140 struct drm_device *dev = obj->dev;
1141 drm_i915_private_t *dev_priv = dev->dev_private;
1142 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1143 struct drm_mm_node *free_space;
1144 int page_count, ret;
1147 alignment = PAGE_SIZE;
1148 if (alignment & (PAGE_SIZE - 1)) {
1149 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
1154 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
1155 obj->size, alignment, 0);
1156 if (free_space != NULL) {
1157 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
1159 if (obj_priv->gtt_space != NULL) {
1160 obj_priv->gtt_space->private = obj;
1161 obj_priv->gtt_offset = obj_priv->gtt_space->start;
1164 if (obj_priv->gtt_space == NULL) {
1165 /* If the gtt is empty and we're still having trouble
1166 * fitting our object in, we're out of memory.
1169 DRM_INFO("%s: GTT full, evicting something\n", __func__);
1171 if (list_empty(&dev_priv->mm.inactive_list) &&
1172 list_empty(&dev_priv->mm.flushing_list) &&
1173 list_empty(&dev_priv->mm.active_list)) {
1174 DRM_ERROR("GTT full, but LRU list empty\n");
1178 ret = i915_gem_evict_something(dev);
1180 DRM_ERROR("Failed to evict a buffer %d\n", ret);
1187 DRM_INFO("Binding object of size %d at 0x%08x\n",
1188 obj->size, obj_priv->gtt_offset);
1190 ret = i915_gem_object_get_page_list(obj);
1192 drm_mm_put_block(obj_priv->gtt_space);
1193 obj_priv->gtt_space = NULL;
1197 page_count = obj->size / PAGE_SIZE;
1198 /* Create an AGP memory structure pointing at our pages, and bind it
1201 obj_priv->agp_mem = drm_agp_bind_pages(dev,
1202 obj_priv->page_list,
1204 obj_priv->gtt_offset,
1205 obj_priv->agp_type);
1206 if (obj_priv->agp_mem == NULL) {
1207 i915_gem_object_free_page_list(obj);
1208 drm_mm_put_block(obj_priv->gtt_space);
1209 obj_priv->gtt_space = NULL;
1212 atomic_inc(&dev->gtt_count);
1213 atomic_add(obj->size, &dev->gtt_memory);
1215 /* Assert that the object is not currently in any GPU domain. As it
1216 * wasn't in the GTT, there shouldn't be any way it could have been in
1219 BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
1220 BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
1226 i915_gem_clflush_object(struct drm_gem_object *obj)
1228 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1230 /* If we don't have a page list set up, then we're not pinned
1231 * to GPU, and we can ignore the cache flush because it'll happen
1232 * again at bind time.
1234 if (obj_priv->page_list == NULL)
1237 drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
1241 * Set the next domain for the specified object. This
1242 * may not actually perform the necessary flushing/invaliding though,
1243 * as that may want to be batched with other set_domain operations
1245 * This is (we hope) the only really tricky part of gem. The goal
1246 * is fairly simple -- track which caches hold bits of the object
1247 * and make sure they remain coherent. A few concrete examples may
1248 * help to explain how it works. For shorthand, we use the notation
1249 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
1250 * a pair of read and write domain masks.
1252 * Case 1: the batch buffer
1258 * 5. Unmapped from GTT
1261 * Let's take these a step at a time
1264 * Pages allocated from the kernel may still have
1265 * cache contents, so we set them to (CPU, CPU) always.
1266 * 2. Written by CPU (using pwrite)
1267 * The pwrite function calls set_domain (CPU, CPU) and
1268 * this function does nothing (as nothing changes)
1270 * This function asserts that the object is not
1271 * currently in any GPU-based read or write domains
1273 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
1274 * As write_domain is zero, this function adds in the
1275 * current read domains (CPU+COMMAND, 0).
1276 * flush_domains is set to CPU.
1277 * invalidate_domains is set to COMMAND
1278 * clflush is run to get data out of the CPU caches
1279 * then i915_dev_set_domain calls i915_gem_flush to
1280 * emit an MI_FLUSH and drm_agp_chipset_flush
1281 * 5. Unmapped from GTT
1282 * i915_gem_object_unbind calls set_domain (CPU, CPU)
1283 * flush_domains and invalidate_domains end up both zero
1284 * so no flushing/invalidating happens
1288 * Case 2: The shared render buffer
1292 * 3. Read/written by GPU
1293 * 4. set_domain to (CPU,CPU)
1294 * 5. Read/written by CPU
1295 * 6. Read/written by GPU
1298 * Same as last example, (CPU, CPU)
1300 * Nothing changes (assertions find that it is not in the GPU)
1301 * 3. Read/written by GPU
1302 * execbuffer calls set_domain (RENDER, RENDER)
1303 * flush_domains gets CPU
1304 * invalidate_domains gets GPU
1306 * MI_FLUSH and drm_agp_chipset_flush
1307 * 4. set_domain (CPU, CPU)
1308 * flush_domains gets GPU
1309 * invalidate_domains gets CPU
1310 * wait_rendering (obj) to make sure all drawing is complete.
1311 * This will include an MI_FLUSH to get the data from GPU
1313 * clflush (obj) to invalidate the CPU cache
1314 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
1315 * 5. Read/written by CPU
1316 * cache lines are loaded and dirtied
1317 * 6. Read written by GPU
1318 * Same as last GPU access
1320 * Case 3: The constant buffer
1325 * 4. Updated (written) by CPU again
1334 * flush_domains = CPU
1335 * invalidate_domains = RENDER
1338 * drm_agp_chipset_flush
1339 * 4. Updated (written) by CPU again
1341 * flush_domains = 0 (no previous write domain)
1342 * invalidate_domains = 0 (no new read domains)
1345 * flush_domains = CPU
1346 * invalidate_domains = RENDER
1349 * drm_agp_chipset_flush
1352 i915_gem_object_set_domain(struct drm_gem_object *obj,
1353 uint32_t read_domains,
1354 uint32_t write_domain)
1356 struct drm_device *dev = obj->dev;
1357 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1358 uint32_t invalidate_domains = 0;
1359 uint32_t flush_domains = 0;
1363 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
1365 obj->read_domains, read_domains,
1366 obj->write_domain, write_domain);
1369 * If the object isn't moving to a new write domain,
1370 * let the object stay in multiple read domains
1372 if (write_domain == 0)
1373 read_domains |= obj->read_domains;
1375 obj_priv->dirty = 1;
1378 * Flush the current write domain if
1379 * the new read domains don't match. Invalidate
1380 * any read domains which differ from the old
1383 if (obj->write_domain && obj->write_domain != read_domains) {
1384 flush_domains |= obj->write_domain;
1385 invalidate_domains |= read_domains & ~obj->write_domain;
1388 * Invalidate any read caches which may have
1389 * stale data. That is, any new read domains.
1391 invalidate_domains |= read_domains & ~obj->read_domains;
1392 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
1394 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
1395 __func__, flush_domains, invalidate_domains);
1398 * If we're invaliding the CPU cache and flushing a GPU cache,
1399 * then pause for rendering so that the GPU caches will be
1400 * flushed before the cpu cache is invalidated
1402 if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
1403 (flush_domains & ~(I915_GEM_DOMAIN_CPU |
1404 I915_GEM_DOMAIN_GTT))) {
1405 ret = i915_gem_object_wait_rendering(obj);
1409 i915_gem_clflush_object(obj);
1412 if ((write_domain | flush_domains) != 0)
1413 obj->write_domain = write_domain;
1415 /* If we're invalidating the CPU domain, clear the per-page CPU
1416 * domain list as well.
1418 if (obj_priv->page_cpu_valid != NULL &&
1419 (write_domain != 0 ||
1420 read_domains & I915_GEM_DOMAIN_CPU)) {
1421 drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
1423 obj_priv->page_cpu_valid = NULL;
1425 obj->read_domains = read_domains;
1427 dev->invalidate_domains |= invalidate_domains;
1428 dev->flush_domains |= flush_domains;
1430 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
1432 obj->read_domains, obj->write_domain,
1433 dev->invalidate_domains, dev->flush_domains);
1439 * Set the read/write domain on a range of the object.
1441 * Currently only implemented for CPU reads, otherwise drops to normal
1442 * i915_gem_object_set_domain().
1445 i915_gem_object_set_domain_range(struct drm_gem_object *obj,
1448 uint32_t read_domains,
1449 uint32_t write_domain)
1451 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1454 if (obj->read_domains & I915_GEM_DOMAIN_CPU)
1457 if (read_domains != I915_GEM_DOMAIN_CPU ||
1459 return i915_gem_object_set_domain(obj,
1460 read_domains, write_domain);
1462 /* Wait on any GPU rendering to the object to be flushed. */
1463 ret = i915_gem_object_wait_rendering(obj);
1467 if (obj_priv->page_cpu_valid == NULL) {
1468 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
1472 /* Flush the cache on any pages that are still invalid from the CPU's
1475 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; i++) {
1476 if (obj_priv->page_cpu_valid[i])
1479 drm_clflush_pages(obj_priv->page_list + i, 1);
1481 obj_priv->page_cpu_valid[i] = 1;
1488 * Once all of the objects have been set in the proper domain,
1489 * perform the necessary flush and invalidate operations.
1491 * Returns the write domains flushed, for use in flush tracking.
1494 i915_gem_dev_set_domain(struct drm_device *dev)
1496 uint32_t flush_domains = dev->flush_domains;
1499 * Now that all the buffers are synced to the proper domains,
1500 * flush and invalidate the collected domains
1502 if (dev->invalidate_domains | dev->flush_domains) {
1504 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
1506 dev->invalidate_domains,
1507 dev->flush_domains);
1510 dev->invalidate_domains,
1511 dev->flush_domains);
1512 dev->invalidate_domains = 0;
1513 dev->flush_domains = 0;
1516 return flush_domains;
1520 * Pin an object to the GTT and evaluate the relocations landing in it.
1523 i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
1524 struct drm_file *file_priv,
1525 struct drm_i915_gem_exec_object *entry)
1527 struct drm_device *dev = obj->dev;
1528 drm_i915_private_t *dev_priv = dev->dev_private;
1529 struct drm_i915_gem_relocation_entry reloc;
1530 struct drm_i915_gem_relocation_entry __user *relocs;
1531 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1533 void __iomem *reloc_page;
1535 /* Choose the GTT offset for our buffer and put it there. */
1536 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
1540 entry->offset = obj_priv->gtt_offset;
1542 relocs = (struct drm_i915_gem_relocation_entry __user *)
1543 (uintptr_t) entry->relocs_ptr;
1544 /* Apply the relocations, using the GTT aperture to avoid cache
1545 * flushing requirements.
1547 for (i = 0; i < entry->relocation_count; i++) {
1548 struct drm_gem_object *target_obj;
1549 struct drm_i915_gem_object *target_obj_priv;
1550 uint32_t reloc_val, reloc_offset;
1551 uint32_t __iomem *reloc_entry;
1553 ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
1555 i915_gem_object_unpin(obj);
1559 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
1560 reloc.target_handle);
1561 if (target_obj == NULL) {
1562 i915_gem_object_unpin(obj);
1565 target_obj_priv = target_obj->driver_private;
1567 /* The target buffer should have appeared before us in the
1568 * exec_object list, so it should have a GTT space bound by now.
1570 if (target_obj_priv->gtt_space == NULL) {
1571 DRM_ERROR("No GTT space found for object %d\n",
1572 reloc.target_handle);
1573 drm_gem_object_unreference(target_obj);
1574 i915_gem_object_unpin(obj);
1578 if (reloc.offset > obj->size - 4) {
1579 DRM_ERROR("Relocation beyond object bounds: "
1580 "obj %p target %d offset %d size %d.\n",
1581 obj, reloc.target_handle,
1582 (int) reloc.offset, (int) obj->size);
1583 drm_gem_object_unreference(target_obj);
1584 i915_gem_object_unpin(obj);
1587 if (reloc.offset & 3) {
1588 DRM_ERROR("Relocation not 4-byte aligned: "
1589 "obj %p target %d offset %d.\n",
1590 obj, reloc.target_handle,
1591 (int) reloc.offset);
1592 drm_gem_object_unreference(target_obj);
1593 i915_gem_object_unpin(obj);
1597 if (reloc.write_domain && target_obj->pending_write_domain &&
1598 reloc.write_domain != target_obj->pending_write_domain) {
1599 DRM_ERROR("Write domain conflict: "
1600 "obj %p target %d offset %d "
1601 "new %08x old %08x\n",
1602 obj, reloc.target_handle,
1605 target_obj->pending_write_domain);
1606 drm_gem_object_unreference(target_obj);
1607 i915_gem_object_unpin(obj);
1612 DRM_INFO("%s: obj %p offset %08x target %d "
1613 "read %08x write %08x gtt %08x "
1614 "presumed %08x delta %08x\n",
1618 (int) reloc.target_handle,
1619 (int) reloc.read_domains,
1620 (int) reloc.write_domain,
1621 (int) target_obj_priv->gtt_offset,
1622 (int) reloc.presumed_offset,
1626 target_obj->pending_read_domains |= reloc.read_domains;
1627 target_obj->pending_write_domain |= reloc.write_domain;
1629 /* If the relocation already has the right value in it, no
1630 * more work needs to be done.
1632 if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
1633 drm_gem_object_unreference(target_obj);
1637 /* Now that we're going to actually write some data in,
1638 * make sure that any rendering using this buffer's contents
1641 i915_gem_object_wait_rendering(obj);
1643 /* As we're writing through the gtt, flush
1644 * any CPU writes before we write the relocations
1646 if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
1647 i915_gem_clflush_object(obj);
1648 drm_agp_chipset_flush(dev);
1649 obj->write_domain = 0;
1652 /* Map the page containing the relocation we're going to
1655 reloc_offset = obj_priv->gtt_offset + reloc.offset;
1656 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
1659 reloc_entry = (uint32_t __iomem *)(reloc_page +
1660 (reloc_offset & (PAGE_SIZE - 1)));
1661 reloc_val = target_obj_priv->gtt_offset + reloc.delta;
1664 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
1665 obj, (unsigned int) reloc.offset,
1666 readl(reloc_entry), reloc_val);
1668 writel(reloc_val, reloc_entry);
1669 io_mapping_unmap_atomic(reloc_page);
1671 /* Write the updated presumed offset for this entry back out
1674 reloc.presumed_offset = target_obj_priv->gtt_offset;
1675 ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
1677 drm_gem_object_unreference(target_obj);
1678 i915_gem_object_unpin(obj);
1682 drm_gem_object_unreference(target_obj);
1687 i915_gem_dump_object(obj, 128, __func__, ~0);
1692 /** Dispatch a batchbuffer to the ring
1695 i915_dispatch_gem_execbuffer(struct drm_device *dev,
1696 struct drm_i915_gem_execbuffer *exec,
1697 uint64_t exec_offset)
1699 drm_i915_private_t *dev_priv = dev->dev_private;
1700 struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
1701 (uintptr_t) exec->cliprects_ptr;
1702 int nbox = exec->num_cliprects;
1704 uint32_t exec_start, exec_len;
1707 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
1708 exec_len = (uint32_t) exec->batch_len;
1710 if ((exec_start | exec_len) & 0x7) {
1711 DRM_ERROR("alignment\n");
1718 count = nbox ? nbox : 1;
1720 for (i = 0; i < count; i++) {
1722 int ret = i915_emit_box(dev, boxes, i,
1723 exec->DR1, exec->DR4);
1728 if (IS_I830(dev) || IS_845G(dev)) {
1730 OUT_RING(MI_BATCH_BUFFER);
1731 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
1732 OUT_RING(exec_start + exec_len - 4);
1737 if (IS_I965G(dev)) {
1738 OUT_RING(MI_BATCH_BUFFER_START |
1740 MI_BATCH_NON_SECURE_I965);
1741 OUT_RING(exec_start);
1743 OUT_RING(MI_BATCH_BUFFER_START |
1745 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
1751 /* XXX breadcrumb */
1755 /* Throttle our rendering by waiting until the ring has completed our requests
1756 * emitted over 20 msec ago.
1758 * This should get us reasonable parallelism between CPU and GPU but also
1759 * relatively low latency when blocking on a particular request to finish.
1762 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
1764 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1768 mutex_lock(&dev->struct_mutex);
1769 seqno = i915_file_priv->mm.last_gem_throttle_seqno;
1770 i915_file_priv->mm.last_gem_throttle_seqno =
1771 i915_file_priv->mm.last_gem_seqno;
1773 ret = i915_wait_request(dev, seqno);
1774 mutex_unlock(&dev->struct_mutex);
1779 i915_gem_execbuffer(struct drm_device *dev, void *data,
1780 struct drm_file *file_priv)
1782 drm_i915_private_t *dev_priv = dev->dev_private;
1783 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1784 struct drm_i915_gem_execbuffer *args = data;
1785 struct drm_i915_gem_exec_object *exec_list = NULL;
1786 struct drm_gem_object **object_list = NULL;
1787 struct drm_gem_object *batch_obj;
1788 int ret, i, pinned = 0;
1789 uint64_t exec_offset;
1790 uint32_t seqno, flush_domains;
1793 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
1794 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
1797 if (args->buffer_count < 1) {
1798 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
1801 /* Copy in the exec list from userland */
1802 exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
1804 object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
1806 if (exec_list == NULL || object_list == NULL) {
1807 DRM_ERROR("Failed to allocate exec or object list "
1809 args->buffer_count);
1813 ret = copy_from_user(exec_list,
1814 (struct drm_i915_relocation_entry __user *)
1815 (uintptr_t) args->buffers_ptr,
1816 sizeof(*exec_list) * args->buffer_count);
1818 DRM_ERROR("copy %d exec entries failed %d\n",
1819 args->buffer_count, ret);
1823 mutex_lock(&dev->struct_mutex);
1825 i915_verify_inactive(dev, __FILE__, __LINE__);
1827 if (dev_priv->mm.wedged) {
1828 DRM_ERROR("Execbuf while wedged\n");
1829 mutex_unlock(&dev->struct_mutex);
1833 if (dev_priv->mm.suspended) {
1834 DRM_ERROR("Execbuf while VT-switched.\n");
1835 mutex_unlock(&dev->struct_mutex);
1839 /* Zero the gloabl flush/invalidate flags. These
1840 * will be modified as each object is bound to the
1843 dev->invalidate_domains = 0;
1844 dev->flush_domains = 0;
1846 /* Look up object handles and perform the relocations */
1847 for (i = 0; i < args->buffer_count; i++) {
1848 object_list[i] = drm_gem_object_lookup(dev, file_priv,
1849 exec_list[i].handle);
1850 if (object_list[i] == NULL) {
1851 DRM_ERROR("Invalid object handle %d at index %d\n",
1852 exec_list[i].handle, i);
1857 object_list[i]->pending_read_domains = 0;
1858 object_list[i]->pending_write_domain = 0;
1859 ret = i915_gem_object_pin_and_relocate(object_list[i],
1863 DRM_ERROR("object bind and relocate failed %d\n", ret);
1869 /* Set the pending read domains for the batch buffer to COMMAND */
1870 batch_obj = object_list[args->buffer_count-1];
1871 batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
1872 batch_obj->pending_write_domain = 0;
1874 i915_verify_inactive(dev, __FILE__, __LINE__);
1876 for (i = 0; i < args->buffer_count; i++) {
1877 struct drm_gem_object *obj = object_list[i];
1879 /* make sure all previous memory operations have passed */
1880 ret = i915_gem_object_set_domain(obj,
1881 obj->pending_read_domains,
1882 obj->pending_write_domain);
1884 /* As we've partially updated domains on our buffers,
1885 * we have to emit the flush we've accumulated
1886 * before exiting, or we'll have broken the
1887 * active/flushing/inactive invariants.
1889 * We'll potentially have some things marked as
1890 * being in write domains that they actually aren't,
1891 * but that should be merely a minor performance loss.
1893 flush_domains = i915_gem_dev_set_domain(dev);
1894 (void)i915_add_request(dev, flush_domains);
1899 i915_verify_inactive(dev, __FILE__, __LINE__);
1901 /* Flush/invalidate caches and chipset buffer */
1902 flush_domains = i915_gem_dev_set_domain(dev);
1904 i915_verify_inactive(dev, __FILE__, __LINE__);
1907 for (i = 0; i < args->buffer_count; i++) {
1908 i915_gem_object_check_coherency(object_list[i],
1909 exec_list[i].handle);
1913 exec_offset = exec_list[args->buffer_count - 1].offset;
1916 i915_gem_dump_object(object_list[args->buffer_count - 1],
1922 (void)i915_add_request(dev, flush_domains);
1924 /* Exec the batchbuffer */
1925 ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
1927 DRM_ERROR("dispatch failed %d\n", ret);
1932 * Ensure that the commands in the batch buffer are
1933 * finished before the interrupt fires
1935 flush_domains = i915_retire_commands(dev);
1937 i915_verify_inactive(dev, __FILE__, __LINE__);
1940 * Get a seqno representing the execution of the current buffer,
1941 * which we can wait on. We would like to mitigate these interrupts,
1942 * likely by only creating seqnos occasionally (so that we have
1943 * *some* interrupts representing completion of buffers that we can
1944 * wait on when trying to clear up gtt space).
1946 seqno = i915_add_request(dev, flush_domains);
1948 i915_file_priv->mm.last_gem_seqno = seqno;
1949 for (i = 0; i < args->buffer_count; i++) {
1950 struct drm_gem_object *obj = object_list[i];
1952 i915_gem_object_move_to_active(obj, seqno);
1954 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
1958 i915_dump_lru(dev, __func__);
1961 i915_verify_inactive(dev, __FILE__, __LINE__);
1963 /* Copy the new buffer offsets back to the user's exec list. */
1964 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
1965 (uintptr_t) args->buffers_ptr,
1967 sizeof(*exec_list) * args->buffer_count);
1969 DRM_ERROR("failed to copy %d exec entries "
1970 "back to user (%d)\n",
1971 args->buffer_count, ret);
1973 if (object_list != NULL) {
1974 for (i = 0; i < pinned; i++)
1975 i915_gem_object_unpin(object_list[i]);
1977 for (i = 0; i < args->buffer_count; i++)
1978 drm_gem_object_unreference(object_list[i]);
1980 mutex_unlock(&dev->struct_mutex);
1983 drm_free(object_list, sizeof(*object_list) * args->buffer_count,
1985 drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
1992 i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
1994 struct drm_device *dev = obj->dev;
1995 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1998 i915_verify_inactive(dev, __FILE__, __LINE__);
1999 if (obj_priv->gtt_space == NULL) {
2000 ret = i915_gem_object_bind_to_gtt(obj, alignment);
2002 DRM_ERROR("Failure to bind: %d", ret);
2006 obj_priv->pin_count++;
2008 /* If the object is not active and not pending a flush,
2009 * remove it from the inactive list
2011 if (obj_priv->pin_count == 1) {
2012 atomic_inc(&dev->pin_count);
2013 atomic_add(obj->size, &dev->pin_memory);
2014 if (!obj_priv->active &&
2015 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
2016 I915_GEM_DOMAIN_GTT)) == 0 &&
2017 !list_empty(&obj_priv->list))
2018 list_del_init(&obj_priv->list);
2020 i915_verify_inactive(dev, __FILE__, __LINE__);
2026 i915_gem_object_unpin(struct drm_gem_object *obj)
2028 struct drm_device *dev = obj->dev;
2029 drm_i915_private_t *dev_priv = dev->dev_private;
2030 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2032 i915_verify_inactive(dev, __FILE__, __LINE__);
2033 obj_priv->pin_count--;
2034 BUG_ON(obj_priv->pin_count < 0);
2035 BUG_ON(obj_priv->gtt_space == NULL);
2037 /* If the object is no longer pinned, and is
2038 * neither active nor being flushed, then stick it on
2041 if (obj_priv->pin_count == 0) {
2042 if (!obj_priv->active &&
2043 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
2044 I915_GEM_DOMAIN_GTT)) == 0)
2045 list_move_tail(&obj_priv->list,
2046 &dev_priv->mm.inactive_list);
2047 atomic_dec(&dev->pin_count);
2048 atomic_sub(obj->size, &dev->pin_memory);
2050 i915_verify_inactive(dev, __FILE__, __LINE__);
2054 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
2055 struct drm_file *file_priv)
2057 struct drm_i915_gem_pin *args = data;
2058 struct drm_gem_object *obj;
2059 struct drm_i915_gem_object *obj_priv;
2062 mutex_lock(&dev->struct_mutex);
2064 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2066 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
2068 mutex_unlock(&dev->struct_mutex);
2071 obj_priv = obj->driver_private;
2073 ret = i915_gem_object_pin(obj, args->alignment);
2075 drm_gem_object_unreference(obj);
2076 mutex_unlock(&dev->struct_mutex);
2080 /* XXX - flush the CPU caches for pinned objects
2081 * as the X server doesn't manage domains yet
2083 if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
2084 i915_gem_clflush_object(obj);
2085 drm_agp_chipset_flush(dev);
2086 obj->write_domain = 0;
2088 args->offset = obj_priv->gtt_offset;
2089 drm_gem_object_unreference(obj);
2090 mutex_unlock(&dev->struct_mutex);
2096 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
2097 struct drm_file *file_priv)
2099 struct drm_i915_gem_pin *args = data;
2100 struct drm_gem_object *obj;
2102 mutex_lock(&dev->struct_mutex);
2104 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2106 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
2108 mutex_unlock(&dev->struct_mutex);
2112 i915_gem_object_unpin(obj);
2114 drm_gem_object_unreference(obj);
2115 mutex_unlock(&dev->struct_mutex);
2120 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
2121 struct drm_file *file_priv)
2123 struct drm_i915_gem_busy *args = data;
2124 struct drm_gem_object *obj;
2125 struct drm_i915_gem_object *obj_priv;
2127 mutex_lock(&dev->struct_mutex);
2128 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2130 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
2132 mutex_unlock(&dev->struct_mutex);
2136 obj_priv = obj->driver_private;
2137 args->busy = obj_priv->active;
2139 drm_gem_object_unreference(obj);
2140 mutex_unlock(&dev->struct_mutex);
2145 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
2146 struct drm_file *file_priv)
2148 return i915_gem_ring_throttle(dev, file_priv);
2151 int i915_gem_init_object(struct drm_gem_object *obj)
2153 struct drm_i915_gem_object *obj_priv;
2155 obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
2156 if (obj_priv == NULL)
2160 * We've just allocated pages from the kernel,
2161 * so they've just been written by the CPU with
2162 * zeros. They'll need to be clflushed before we
2163 * use them with the GPU.
2165 obj->write_domain = I915_GEM_DOMAIN_CPU;
2166 obj->read_domains = I915_GEM_DOMAIN_CPU;
2168 obj_priv->agp_type = AGP_USER_MEMORY;
2170 obj->driver_private = obj_priv;
2171 obj_priv->obj = obj;
2172 INIT_LIST_HEAD(&obj_priv->list);
2176 void i915_gem_free_object(struct drm_gem_object *obj)
2178 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2180 while (obj_priv->pin_count > 0)
2181 i915_gem_object_unpin(obj);
2183 i915_gem_object_unbind(obj);
2185 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
2186 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
2190 i915_gem_set_domain(struct drm_gem_object *obj,
2191 struct drm_file *file_priv,
2192 uint32_t read_domains,
2193 uint32_t write_domain)
2195 struct drm_device *dev = obj->dev;
2197 uint32_t flush_domains;
2199 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
2201 ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
2204 flush_domains = i915_gem_dev_set_domain(obj->dev);
2206 if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
2207 (void) i915_add_request(dev, flush_domains);
2212 /** Unbinds all objects that are on the given buffer list. */
2214 i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
2216 struct drm_gem_object *obj;
2217 struct drm_i915_gem_object *obj_priv;
2220 while (!list_empty(head)) {
2221 obj_priv = list_first_entry(head,
2222 struct drm_i915_gem_object,
2224 obj = obj_priv->obj;
2226 if (obj_priv->pin_count != 0) {
2227 DRM_ERROR("Pinned object in unbind list\n");
2228 mutex_unlock(&dev->struct_mutex);
2232 ret = i915_gem_object_unbind(obj);
2234 DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
2236 mutex_unlock(&dev->struct_mutex);
2246 i915_gem_idle(struct drm_device *dev)
2248 drm_i915_private_t *dev_priv = dev->dev_private;
2249 uint32_t seqno, cur_seqno, last_seqno;
2252 mutex_lock(&dev->struct_mutex);
2254 if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
2255 mutex_unlock(&dev->struct_mutex);
2259 /* Hack! Don't let anybody do execbuf while we don't control the chip.
2260 * We need to replace this with a semaphore, or something.
2262 dev_priv->mm.suspended = 1;
2264 /* Cancel the retire work handler, wait for it to finish if running
2266 mutex_unlock(&dev->struct_mutex);
2267 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
2268 mutex_lock(&dev->struct_mutex);
2270 i915_kernel_lost_context(dev);
2272 /* Flush the GPU along with all non-CPU write domains
2274 i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
2275 ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
2276 seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU |
2277 I915_GEM_DOMAIN_GTT));
2280 mutex_unlock(&dev->struct_mutex);
2284 dev_priv->mm.waiting_gem_seqno = seqno;
2288 cur_seqno = i915_get_gem_seqno(dev);
2289 if (i915_seqno_passed(cur_seqno, seqno))
2291 if (last_seqno == cur_seqno) {
2292 if (stuck++ > 100) {
2293 DRM_ERROR("hardware wedged\n");
2294 dev_priv->mm.wedged = 1;
2295 DRM_WAKEUP(&dev_priv->irq_queue);
2300 last_seqno = cur_seqno;
2302 dev_priv->mm.waiting_gem_seqno = 0;
2304 i915_gem_retire_requests(dev);
2306 if (!dev_priv->mm.wedged) {
2307 /* Active and flushing should now be empty as we've
2308 * waited for a sequence higher than any pending execbuffer
2310 WARN_ON(!list_empty(&dev_priv->mm.active_list));
2311 WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
2312 /* Request should now be empty as we've also waited
2313 * for the last request in the list
2315 WARN_ON(!list_empty(&dev_priv->mm.request_list));
2318 /* Empty the active and flushing lists to inactive. If there's
2319 * anything left at this point, it means that we're wedged and
2320 * nothing good's going to happen by leaving them there. So strip
2321 * the GPU domains and just stuff them onto inactive.
2323 while (!list_empty(&dev_priv->mm.active_list)) {
2324 struct drm_i915_gem_object *obj_priv;
2326 obj_priv = list_first_entry(&dev_priv->mm.active_list,
2327 struct drm_i915_gem_object,
2329 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
2330 i915_gem_object_move_to_inactive(obj_priv->obj);
2333 while (!list_empty(&dev_priv->mm.flushing_list)) {
2334 struct drm_i915_gem_object *obj_priv;
2336 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
2337 struct drm_i915_gem_object,
2339 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
2340 i915_gem_object_move_to_inactive(obj_priv->obj);
2344 /* Move all inactive buffers out of the GTT. */
2345 ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
2346 WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
2348 mutex_unlock(&dev->struct_mutex);
2352 i915_gem_cleanup_ringbuffer(dev);
2353 mutex_unlock(&dev->struct_mutex);
2359 i915_gem_init_hws(struct drm_device *dev)
2361 drm_i915_private_t *dev_priv = dev->dev_private;
2362 struct drm_gem_object *obj;
2363 struct drm_i915_gem_object *obj_priv;
2366 /* If we need a physical address for the status page, it's already
2367 * initialized at driver load time.
2369 if (!I915_NEED_GFX_HWS(dev))
2372 obj = drm_gem_object_alloc(dev, 4096);
2374 DRM_ERROR("Failed to allocate status page\n");
2377 obj_priv = obj->driver_private;
2378 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
2380 ret = i915_gem_object_pin(obj, 4096);
2382 drm_gem_object_unreference(obj);
2386 dev_priv->status_gfx_addr = obj_priv->gtt_offset;
2388 dev_priv->hw_status_page = kmap(obj_priv->page_list[0]);
2389 if (dev_priv->hw_status_page == NULL) {
2390 DRM_ERROR("Failed to map status page.\n");
2391 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
2392 drm_gem_object_unreference(obj);
2395 dev_priv->hws_obj = obj;
2396 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
2397 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
2398 I915_READ(HWS_PGA); /* posting read */
2399 DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
2405 i915_gem_init_ringbuffer(struct drm_device *dev)
2407 drm_i915_private_t *dev_priv = dev->dev_private;
2408 struct drm_gem_object *obj;
2409 struct drm_i915_gem_object *obj_priv;
2413 ret = i915_gem_init_hws(dev);
2417 obj = drm_gem_object_alloc(dev, 128 * 1024);
2419 DRM_ERROR("Failed to allocate ringbuffer\n");
2422 obj_priv = obj->driver_private;
2424 ret = i915_gem_object_pin(obj, 4096);
2426 drm_gem_object_unreference(obj);
2430 /* Set up the kernel mapping for the ring. */
2431 dev_priv->ring.Size = obj->size;
2432 dev_priv->ring.tail_mask = obj->size - 1;
2434 dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset;
2435 dev_priv->ring.map.size = obj->size;
2436 dev_priv->ring.map.type = 0;
2437 dev_priv->ring.map.flags = 0;
2438 dev_priv->ring.map.mtrr = 0;
2440 drm_core_ioremap_wc(&dev_priv->ring.map, dev);
2441 if (dev_priv->ring.map.handle == NULL) {
2442 DRM_ERROR("Failed to map ringbuffer.\n");
2443 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
2444 drm_gem_object_unreference(obj);
2447 dev_priv->ring.ring_obj = obj;
2448 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
2450 /* Stop the ring if it's running. */
2451 I915_WRITE(PRB0_CTL, 0);
2452 I915_WRITE(PRB0_TAIL, 0);
2453 I915_WRITE(PRB0_HEAD, 0);
2455 /* Initialize the ring. */
2456 I915_WRITE(PRB0_START, obj_priv->gtt_offset);
2457 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
2459 /* G45 ring initialization fails to reset head to zero */
2461 DRM_ERROR("Ring head not reset to zero "
2462 "ctl %08x head %08x tail %08x start %08x\n",
2463 I915_READ(PRB0_CTL),
2464 I915_READ(PRB0_HEAD),
2465 I915_READ(PRB0_TAIL),
2466 I915_READ(PRB0_START));
2467 I915_WRITE(PRB0_HEAD, 0);
2469 DRM_ERROR("Ring head forced to zero "
2470 "ctl %08x head %08x tail %08x start %08x\n",
2471 I915_READ(PRB0_CTL),
2472 I915_READ(PRB0_HEAD),
2473 I915_READ(PRB0_TAIL),
2474 I915_READ(PRB0_START));
2477 I915_WRITE(PRB0_CTL,
2478 ((obj->size - 4096) & RING_NR_PAGES) |
2482 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
2484 /* If the head is still not zero, the ring is dead */
2486 DRM_ERROR("Ring initialization failed "
2487 "ctl %08x head %08x tail %08x start %08x\n",
2488 I915_READ(PRB0_CTL),
2489 I915_READ(PRB0_HEAD),
2490 I915_READ(PRB0_TAIL),
2491 I915_READ(PRB0_START));
2495 /* Update our cache of the ring state */
2496 i915_kernel_lost_context(dev);
2502 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
2504 drm_i915_private_t *dev_priv = dev->dev_private;
2506 if (dev_priv->ring.ring_obj == NULL)
2509 drm_core_ioremapfree(&dev_priv->ring.map, dev);
2511 i915_gem_object_unpin(dev_priv->ring.ring_obj);
2512 drm_gem_object_unreference(dev_priv->ring.ring_obj);
2513 dev_priv->ring.ring_obj = NULL;
2514 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
2516 if (dev_priv->hws_obj != NULL) {
2517 struct drm_gem_object *obj = dev_priv->hws_obj;
2518 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2520 kunmap(obj_priv->page_list[0]);
2521 i915_gem_object_unpin(obj);
2522 drm_gem_object_unreference(obj);
2523 dev_priv->hws_obj = NULL;
2524 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
2525 dev_priv->hw_status_page = NULL;
2527 /* Write high address into HWS_PGA when disabling. */
2528 I915_WRITE(HWS_PGA, 0x1ffff000);
2533 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
2534 struct drm_file *file_priv)
2536 drm_i915_private_t *dev_priv = dev->dev_private;
2539 if (dev_priv->mm.wedged) {
2540 DRM_ERROR("Reenabling wedged hardware, good luck\n");
2541 dev_priv->mm.wedged = 0;
2544 ret = i915_gem_init_ringbuffer(dev);
2548 dev_priv->mm.gtt_mapping = io_mapping_create_wc(dev->agp->base,
2549 dev->agp->agp_info.aper_size
2552 mutex_lock(&dev->struct_mutex);
2553 BUG_ON(!list_empty(&dev_priv->mm.active_list));
2554 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2555 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
2556 BUG_ON(!list_empty(&dev_priv->mm.request_list));
2557 dev_priv->mm.suspended = 0;
2558 mutex_unlock(&dev->struct_mutex);
2560 drm_irq_install(dev);
2566 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
2567 struct drm_file *file_priv)
2569 drm_i915_private_t *dev_priv = dev->dev_private;
2572 ret = i915_gem_idle(dev);
2573 drm_irq_uninstall(dev);
2575 io_mapping_free(dev_priv->mm.gtt_mapping);
2580 i915_gem_lastclose(struct drm_device *dev)
2584 ret = i915_gem_idle(dev);
2586 DRM_ERROR("failed to idle hardware: %d\n", ret);
2590 i915_gem_load(struct drm_device *dev)
2592 drm_i915_private_t *dev_priv = dev->dev_private;
2594 INIT_LIST_HEAD(&dev_priv->mm.active_list);
2595 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
2596 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
2597 INIT_LIST_HEAD(&dev_priv->mm.request_list);
2598 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
2599 i915_gem_retire_work_handler);
2600 dev_priv->mm.next_gem_seqno = 1;
2602 i915_gem_detect_bit_6_swizzle(dev);