]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/gpu/drm/i915/i915_gem.c
edc805afde05a46cbc528a93cb9bd203f128ec4f
[linux-2.6-omap-h63xx.git] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include "drmP.h"
29 #include "drm.h"
30 #include "i915_drm.h"
31 #include "i915_drv.h"
32 #include <linux/swap.h>
33
34 #define I915_GEM_GPU_DOMAINS    (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
35
36 static int
37 i915_gem_object_set_domain(struct drm_gem_object *obj,
38                             uint32_t read_domains,
39                             uint32_t write_domain);
40 static int
41 i915_gem_object_set_domain_range(struct drm_gem_object *obj,
42                                  uint64_t offset,
43                                  uint64_t size,
44                                  uint32_t read_domains,
45                                  uint32_t write_domain);
46 static int
47 i915_gem_set_domain(struct drm_gem_object *obj,
48                     struct drm_file *file_priv,
49                     uint32_t read_domains,
50                     uint32_t write_domain);
51 static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
52 static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
53 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
54
55 static void
56 i915_gem_cleanup_ringbuffer(struct drm_device *dev);
57
58 int
59 i915_gem_init_ioctl(struct drm_device *dev, void *data,
60                     struct drm_file *file_priv)
61 {
62         drm_i915_private_t *dev_priv = dev->dev_private;
63         struct drm_i915_gem_init *args = data;
64
65         mutex_lock(&dev->struct_mutex);
66
67         if (args->gtt_start >= args->gtt_end ||
68             (args->gtt_start & (PAGE_SIZE - 1)) != 0 ||
69             (args->gtt_end & (PAGE_SIZE - 1)) != 0) {
70                 mutex_unlock(&dev->struct_mutex);
71                 return -EINVAL;
72         }
73
74         drm_mm_init(&dev_priv->mm.gtt_space, args->gtt_start,
75             args->gtt_end - args->gtt_start);
76
77         dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start);
78
79         mutex_unlock(&dev->struct_mutex);
80
81         return 0;
82 }
83
84 int
85 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
86                             struct drm_file *file_priv)
87 {
88         drm_i915_private_t *dev_priv = dev->dev_private;
89         struct drm_i915_gem_get_aperture *args = data;
90         struct drm_i915_gem_object *obj_priv;
91
92         if (!(dev->driver->driver_features & DRIVER_GEM))
93                 return -ENODEV;
94
95         args->aper_size = dev->gtt_total;
96         args->aper_available_size = args->aper_size;
97
98         list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
99                 if (obj_priv->pin_count > 0)
100                         args->aper_available_size -= obj_priv->obj->size;
101         }
102
103         return 0;
104 }
105
106
107 /**
108  * Creates a new mm object and returns a handle to it.
109  */
110 int
111 i915_gem_create_ioctl(struct drm_device *dev, void *data,
112                       struct drm_file *file_priv)
113 {
114         struct drm_i915_gem_create *args = data;
115         struct drm_gem_object *obj;
116         int handle, ret;
117
118         args->size = roundup(args->size, PAGE_SIZE);
119
120         /* Allocate the new object */
121         obj = drm_gem_object_alloc(dev, args->size);
122         if (obj == NULL)
123                 return -ENOMEM;
124
125         ret = drm_gem_handle_create(file_priv, obj, &handle);
126         mutex_lock(&dev->struct_mutex);
127         drm_gem_object_handle_unreference(obj);
128         mutex_unlock(&dev->struct_mutex);
129
130         if (ret)
131                 return ret;
132
133         args->handle = handle;
134
135         return 0;
136 }
137
138 /**
139  * Reads data from the object referenced by handle.
140  *
141  * On error, the contents of *data are undefined.
142  */
143 int
144 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
145                      struct drm_file *file_priv)
146 {
147         struct drm_i915_gem_pread *args = data;
148         struct drm_gem_object *obj;
149         struct drm_i915_gem_object *obj_priv;
150         ssize_t read;
151         loff_t offset;
152         int ret;
153
154         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
155         if (obj == NULL)
156                 return -EBADF;
157         obj_priv = obj->driver_private;
158
159         /* Bounds check source.
160          *
161          * XXX: This could use review for overflow issues...
162          */
163         if (args->offset > obj->size || args->size > obj->size ||
164             args->offset + args->size > obj->size) {
165                 drm_gem_object_unreference(obj);
166                 return -EINVAL;
167         }
168
169         mutex_lock(&dev->struct_mutex);
170
171         ret = i915_gem_object_set_domain_range(obj, args->offset, args->size,
172                                                I915_GEM_DOMAIN_CPU, 0);
173         if (ret != 0) {
174                 drm_gem_object_unreference(obj);
175                 mutex_unlock(&dev->struct_mutex);
176                 return ret;
177         }
178
179         offset = args->offset;
180
181         read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
182                         args->size, &offset);
183         if (read != args->size) {
184                 drm_gem_object_unreference(obj);
185                 mutex_unlock(&dev->struct_mutex);
186                 if (read < 0)
187                         return read;
188                 else
189                         return -EINVAL;
190         }
191
192         drm_gem_object_unreference(obj);
193         mutex_unlock(&dev->struct_mutex);
194
195         return 0;
196 }
197
198 /* This is the fast write path which cannot handle
199  * page faults in the source data
200  */
201
202 static inline int
203 fast_user_write(struct io_mapping *mapping,
204                 loff_t page_base, int page_offset,
205                 char __user *user_data,
206                 int length)
207 {
208         char *vaddr_atomic;
209         unsigned long unwritten;
210
211         vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
212         unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
213                                                       user_data, length);
214         io_mapping_unmap_atomic(vaddr_atomic);
215         if (unwritten)
216                 return -EFAULT;
217         return 0;
218 }
219
220 /* Here's the write path which can sleep for
221  * page faults
222  */
223
224 static inline int
225 slow_user_write(struct io_mapping *mapping,
226                 loff_t page_base, int page_offset,
227                 char __user *user_data,
228                 int length)
229 {
230         char __iomem *vaddr;
231         unsigned long unwritten;
232
233         vaddr = io_mapping_map_wc(mapping, page_base);
234         if (vaddr == NULL)
235                 return -EFAULT;
236         unwritten = __copy_from_user(vaddr + page_offset,
237                                      user_data, length);
238         io_mapping_unmap(vaddr);
239         if (unwritten)
240                 return -EFAULT;
241         return 0;
242 }
243
244 static int
245 i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
246                     struct drm_i915_gem_pwrite *args,
247                     struct drm_file *file_priv)
248 {
249         struct drm_i915_gem_object *obj_priv = obj->driver_private;
250         drm_i915_private_t *dev_priv = dev->dev_private;
251         ssize_t remain;
252         loff_t offset, page_base;
253         char __user *user_data;
254         int page_offset, page_length;
255         int ret;
256
257         user_data = (char __user *) (uintptr_t) args->data_ptr;
258         remain = args->size;
259         if (!access_ok(VERIFY_READ, user_data, remain))
260                 return -EFAULT;
261
262
263         mutex_lock(&dev->struct_mutex);
264         ret = i915_gem_object_pin(obj, 0);
265         if (ret) {
266                 mutex_unlock(&dev->struct_mutex);
267                 return ret;
268         }
269         ret = i915_gem_set_domain(obj, file_priv,
270                                   I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
271         if (ret)
272                 goto fail;
273
274         obj_priv = obj->driver_private;
275         offset = obj_priv->gtt_offset + args->offset;
276         obj_priv->dirty = 1;
277
278         while (remain > 0) {
279                 /* Operation in this page
280                  *
281                  * page_base = page offset within aperture
282                  * page_offset = offset within page
283                  * page_length = bytes to copy for this page
284                  */
285                 page_base = (offset & ~(PAGE_SIZE-1));
286                 page_offset = offset & (PAGE_SIZE-1);
287                 page_length = remain;
288                 if ((page_offset + remain) > PAGE_SIZE)
289                         page_length = PAGE_SIZE - page_offset;
290
291                 ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
292                                        page_offset, user_data, page_length);
293
294                 /* If we get a fault while copying data, then (presumably) our
295                  * source page isn't available. In this case, use the
296                  * non-atomic function
297                  */
298                 if (ret) {
299                         ret = slow_user_write (dev_priv->mm.gtt_mapping,
300                                                page_base, page_offset,
301                                                user_data, page_length);
302                         if (ret)
303                                 goto fail;
304                 }
305
306                 remain -= page_length;
307                 user_data += page_length;
308                 offset += page_length;
309         }
310
311 fail:
312         i915_gem_object_unpin(obj);
313         mutex_unlock(&dev->struct_mutex);
314
315         return ret;
316 }
317
318 static int
319 i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
320                       struct drm_i915_gem_pwrite *args,
321                       struct drm_file *file_priv)
322 {
323         int ret;
324         loff_t offset;
325         ssize_t written;
326
327         mutex_lock(&dev->struct_mutex);
328
329         ret = i915_gem_set_domain(obj, file_priv,
330                                   I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
331         if (ret) {
332                 mutex_unlock(&dev->struct_mutex);
333                 return ret;
334         }
335
336         offset = args->offset;
337
338         written = vfs_write(obj->filp,
339                             (char __user *)(uintptr_t) args->data_ptr,
340                             args->size, &offset);
341         if (written != args->size) {
342                 mutex_unlock(&dev->struct_mutex);
343                 if (written < 0)
344                         return written;
345                 else
346                         return -EINVAL;
347         }
348
349         mutex_unlock(&dev->struct_mutex);
350
351         return 0;
352 }
353
354 /**
355  * Writes data to the object referenced by handle.
356  *
357  * On error, the contents of the buffer that were to be modified are undefined.
358  */
359 int
360 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
361                       struct drm_file *file_priv)
362 {
363         struct drm_i915_gem_pwrite *args = data;
364         struct drm_gem_object *obj;
365         struct drm_i915_gem_object *obj_priv;
366         int ret = 0;
367
368         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
369         if (obj == NULL)
370                 return -EBADF;
371         obj_priv = obj->driver_private;
372
373         /* Bounds check destination.
374          *
375          * XXX: This could use review for overflow issues...
376          */
377         if (args->offset > obj->size || args->size > obj->size ||
378             args->offset + args->size > obj->size) {
379                 drm_gem_object_unreference(obj);
380                 return -EINVAL;
381         }
382
383         /* We can only do the GTT pwrite on untiled buffers, as otherwise
384          * it would end up going through the fenced access, and we'll get
385          * different detiling behavior between reading and writing.
386          * pread/pwrite currently are reading and writing from the CPU
387          * perspective, requiring manual detiling by the client.
388          */
389         if (obj_priv->tiling_mode == I915_TILING_NONE &&
390             dev->gtt_total != 0)
391                 ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
392         else
393                 ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
394
395 #if WATCH_PWRITE
396         if (ret)
397                 DRM_INFO("pwrite failed %d\n", ret);
398 #endif
399
400         drm_gem_object_unreference(obj);
401
402         return ret;
403 }
404
405 /**
406  * Called when user space prepares to use an object
407  */
408 int
409 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
410                           struct drm_file *file_priv)
411 {
412         struct drm_i915_gem_set_domain *args = data;
413         struct drm_gem_object *obj;
414         int ret;
415
416         if (!(dev->driver->driver_features & DRIVER_GEM))
417                 return -ENODEV;
418
419         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
420         if (obj == NULL)
421                 return -EBADF;
422
423         mutex_lock(&dev->struct_mutex);
424 #if WATCH_BUF
425         DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
426                  obj, obj->size, args->read_domains, args->write_domain);
427 #endif
428         ret = i915_gem_set_domain(obj, file_priv,
429                                   args->read_domains, args->write_domain);
430         drm_gem_object_unreference(obj);
431         mutex_unlock(&dev->struct_mutex);
432         return ret;
433 }
434
435 /**
436  * Called when user space has done writes to this buffer
437  */
438 int
439 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
440                       struct drm_file *file_priv)
441 {
442         struct drm_i915_gem_sw_finish *args = data;
443         struct drm_gem_object *obj;
444         struct drm_i915_gem_object *obj_priv;
445         int ret = 0;
446
447         if (!(dev->driver->driver_features & DRIVER_GEM))
448                 return -ENODEV;
449
450         mutex_lock(&dev->struct_mutex);
451         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
452         if (obj == NULL) {
453                 mutex_unlock(&dev->struct_mutex);
454                 return -EBADF;
455         }
456
457 #if WATCH_BUF
458         DRM_INFO("%s: sw_finish %d (%p %d)\n",
459                  __func__, args->handle, obj, obj->size);
460 #endif
461         obj_priv = obj->driver_private;
462
463         /* Pinned buffers may be scanout, so flush the cache */
464         if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) {
465                 i915_gem_clflush_object(obj);
466                 drm_agp_chipset_flush(dev);
467         }
468         drm_gem_object_unreference(obj);
469         mutex_unlock(&dev->struct_mutex);
470         return ret;
471 }
472
473 /**
474  * Maps the contents of an object, returning the address it is mapped
475  * into.
476  *
477  * While the mapping holds a reference on the contents of the object, it doesn't
478  * imply a ref on the object itself.
479  */
480 int
481 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
482                    struct drm_file *file_priv)
483 {
484         struct drm_i915_gem_mmap *args = data;
485         struct drm_gem_object *obj;
486         loff_t offset;
487         unsigned long addr;
488
489         if (!(dev->driver->driver_features & DRIVER_GEM))
490                 return -ENODEV;
491
492         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
493         if (obj == NULL)
494                 return -EBADF;
495
496         offset = args->offset;
497
498         down_write(&current->mm->mmap_sem);
499         addr = do_mmap(obj->filp, 0, args->size,
500                        PROT_READ | PROT_WRITE, MAP_SHARED,
501                        args->offset);
502         up_write(&current->mm->mmap_sem);
503         mutex_lock(&dev->struct_mutex);
504         drm_gem_object_unreference(obj);
505         mutex_unlock(&dev->struct_mutex);
506         if (IS_ERR((void *)addr))
507                 return addr;
508
509         args->addr_ptr = (uint64_t) addr;
510
511         return 0;
512 }
513
514 static void
515 i915_gem_object_free_page_list(struct drm_gem_object *obj)
516 {
517         struct drm_i915_gem_object *obj_priv = obj->driver_private;
518         int page_count = obj->size / PAGE_SIZE;
519         int i;
520
521         if (obj_priv->page_list == NULL)
522                 return;
523
524
525         for (i = 0; i < page_count; i++)
526                 if (obj_priv->page_list[i] != NULL) {
527                         if (obj_priv->dirty)
528                                 set_page_dirty(obj_priv->page_list[i]);
529                         mark_page_accessed(obj_priv->page_list[i]);
530                         page_cache_release(obj_priv->page_list[i]);
531                 }
532         obj_priv->dirty = 0;
533
534         drm_free(obj_priv->page_list,
535                  page_count * sizeof(struct page *),
536                  DRM_MEM_DRIVER);
537         obj_priv->page_list = NULL;
538 }
539
540 static void
541 i915_gem_object_move_to_active(struct drm_gem_object *obj)
542 {
543         struct drm_device *dev = obj->dev;
544         drm_i915_private_t *dev_priv = dev->dev_private;
545         struct drm_i915_gem_object *obj_priv = obj->driver_private;
546
547         /* Add a reference if we're newly entering the active list. */
548         if (!obj_priv->active) {
549                 drm_gem_object_reference(obj);
550                 obj_priv->active = 1;
551         }
552         /* Move from whatever list we were on to the tail of execution. */
553         list_move_tail(&obj_priv->list,
554                        &dev_priv->mm.active_list);
555 }
556
557
558 static void
559 i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
560 {
561         struct drm_device *dev = obj->dev;
562         drm_i915_private_t *dev_priv = dev->dev_private;
563         struct drm_i915_gem_object *obj_priv = obj->driver_private;
564
565         i915_verify_inactive(dev, __FILE__, __LINE__);
566         if (obj_priv->pin_count != 0)
567                 list_del_init(&obj_priv->list);
568         else
569                 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
570
571         if (obj_priv->active) {
572                 obj_priv->active = 0;
573                 drm_gem_object_unreference(obj);
574         }
575         i915_verify_inactive(dev, __FILE__, __LINE__);
576 }
577
578 /**
579  * Creates a new sequence number, emitting a write of it to the status page
580  * plus an interrupt, which will trigger i915_user_interrupt_handler.
581  *
582  * Must be called with struct_lock held.
583  *
584  * Returned sequence numbers are nonzero on success.
585  */
586 static uint32_t
587 i915_add_request(struct drm_device *dev, uint32_t flush_domains)
588 {
589         drm_i915_private_t *dev_priv = dev->dev_private;
590         struct drm_i915_gem_request *request;
591         uint32_t seqno;
592         int was_empty;
593         RING_LOCALS;
594
595         request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
596         if (request == NULL)
597                 return 0;
598
599         /* Grab the seqno we're going to make this request be, and bump the
600          * next (skipping 0 so it can be the reserved no-seqno value).
601          */
602         seqno = dev_priv->mm.next_gem_seqno;
603         dev_priv->mm.next_gem_seqno++;
604         if (dev_priv->mm.next_gem_seqno == 0)
605                 dev_priv->mm.next_gem_seqno++;
606
607         BEGIN_LP_RING(4);
608         OUT_RING(MI_STORE_DWORD_INDEX);
609         OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
610         OUT_RING(seqno);
611
612         OUT_RING(MI_USER_INTERRUPT);
613         ADVANCE_LP_RING();
614
615         DRM_DEBUG("%d\n", seqno);
616
617         request->seqno = seqno;
618         request->emitted_jiffies = jiffies;
619         request->flush_domains = flush_domains;
620         was_empty = list_empty(&dev_priv->mm.request_list);
621         list_add_tail(&request->list, &dev_priv->mm.request_list);
622
623         if (was_empty && !dev_priv->mm.suspended)
624                 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
625         return seqno;
626 }
627
628 /**
629  * Command execution barrier
630  *
631  * Ensures that all commands in the ring are finished
632  * before signalling the CPU
633  */
634 static uint32_t
635 i915_retire_commands(struct drm_device *dev)
636 {
637         drm_i915_private_t *dev_priv = dev->dev_private;
638         uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
639         uint32_t flush_domains = 0;
640         RING_LOCALS;
641
642         /* The sampler always gets flushed on i965 (sigh) */
643         if (IS_I965G(dev))
644                 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
645         BEGIN_LP_RING(2);
646         OUT_RING(cmd);
647         OUT_RING(0); /* noop */
648         ADVANCE_LP_RING();
649         return flush_domains;
650 }
651
652 /**
653  * Moves buffers associated only with the given active seqno from the active
654  * to inactive list, potentially freeing them.
655  */
656 static void
657 i915_gem_retire_request(struct drm_device *dev,
658                         struct drm_i915_gem_request *request)
659 {
660         drm_i915_private_t *dev_priv = dev->dev_private;
661
662         /* Move any buffers on the active list that are no longer referenced
663          * by the ringbuffer to the flushing/inactive lists as appropriate.
664          */
665         while (!list_empty(&dev_priv->mm.active_list)) {
666                 struct drm_gem_object *obj;
667                 struct drm_i915_gem_object *obj_priv;
668
669                 obj_priv = list_first_entry(&dev_priv->mm.active_list,
670                                             struct drm_i915_gem_object,
671                                             list);
672                 obj = obj_priv->obj;
673
674                 /* If the seqno being retired doesn't match the oldest in the
675                  * list, then the oldest in the list must still be newer than
676                  * this seqno.
677                  */
678                 if (obj_priv->last_rendering_seqno != request->seqno)
679                         return;
680 #if WATCH_LRU
681                 DRM_INFO("%s: retire %d moves to inactive list %p\n",
682                          __func__, request->seqno, obj);
683 #endif
684
685                 if (obj->write_domain != 0) {
686                         list_move_tail(&obj_priv->list,
687                                        &dev_priv->mm.flushing_list);
688                 } else {
689                         i915_gem_object_move_to_inactive(obj);
690                 }
691         }
692
693         if (request->flush_domains != 0) {
694                 struct drm_i915_gem_object *obj_priv, *next;
695
696                 /* Clear the write domain and activity from any buffers
697                  * that are just waiting for a flush matching the one retired.
698                  */
699                 list_for_each_entry_safe(obj_priv, next,
700                                          &dev_priv->mm.flushing_list, list) {
701                         struct drm_gem_object *obj = obj_priv->obj;
702
703                         if (obj->write_domain & request->flush_domains) {
704                                 obj->write_domain = 0;
705                                 i915_gem_object_move_to_inactive(obj);
706                         }
707                 }
708
709         }
710 }
711
712 /**
713  * Returns true if seq1 is later than seq2.
714  */
715 static int
716 i915_seqno_passed(uint32_t seq1, uint32_t seq2)
717 {
718         return (int32_t)(seq1 - seq2) >= 0;
719 }
720
721 uint32_t
722 i915_get_gem_seqno(struct drm_device *dev)
723 {
724         drm_i915_private_t *dev_priv = dev->dev_private;
725
726         return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
727 }
728
729 /**
730  * This function clears the request list as sequence numbers are passed.
731  */
732 void
733 i915_gem_retire_requests(struct drm_device *dev)
734 {
735         drm_i915_private_t *dev_priv = dev->dev_private;
736         uint32_t seqno;
737
738         seqno = i915_get_gem_seqno(dev);
739
740         while (!list_empty(&dev_priv->mm.request_list)) {
741                 struct drm_i915_gem_request *request;
742                 uint32_t retiring_seqno;
743
744                 request = list_first_entry(&dev_priv->mm.request_list,
745                                            struct drm_i915_gem_request,
746                                            list);
747                 retiring_seqno = request->seqno;
748
749                 if (i915_seqno_passed(seqno, retiring_seqno) ||
750                     dev_priv->mm.wedged) {
751                         i915_gem_retire_request(dev, request);
752
753                         list_del(&request->list);
754                         drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
755                 } else
756                         break;
757         }
758 }
759
760 void
761 i915_gem_retire_work_handler(struct work_struct *work)
762 {
763         drm_i915_private_t *dev_priv;
764         struct drm_device *dev;
765
766         dev_priv = container_of(work, drm_i915_private_t,
767                                 mm.retire_work.work);
768         dev = dev_priv->dev;
769
770         mutex_lock(&dev->struct_mutex);
771         i915_gem_retire_requests(dev);
772         if (!dev_priv->mm.suspended &&
773             !list_empty(&dev_priv->mm.request_list))
774                 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
775         mutex_unlock(&dev->struct_mutex);
776 }
777
778 /**
779  * Waits for a sequence number to be signaled, and cleans up the
780  * request and object lists appropriately for that event.
781  */
782 static int
783 i915_wait_request(struct drm_device *dev, uint32_t seqno)
784 {
785         drm_i915_private_t *dev_priv = dev->dev_private;
786         int ret = 0;
787
788         BUG_ON(seqno == 0);
789
790         if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
791                 dev_priv->mm.waiting_gem_seqno = seqno;
792                 i915_user_irq_get(dev);
793                 ret = wait_event_interruptible(dev_priv->irq_queue,
794                                                i915_seqno_passed(i915_get_gem_seqno(dev),
795                                                                  seqno) ||
796                                                dev_priv->mm.wedged);
797                 i915_user_irq_put(dev);
798                 dev_priv->mm.waiting_gem_seqno = 0;
799         }
800         if (dev_priv->mm.wedged)
801                 ret = -EIO;
802
803         if (ret && ret != -ERESTARTSYS)
804                 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
805                           __func__, ret, seqno, i915_get_gem_seqno(dev));
806
807         /* Directly dispatch request retiring.  While we have the work queue
808          * to handle this, the waiter on a request often wants an associated
809          * buffer to have made it to the inactive list, and we would need
810          * a separate wait queue to handle that.
811          */
812         if (ret == 0)
813                 i915_gem_retire_requests(dev);
814
815         return ret;
816 }
817
818 static void
819 i915_gem_flush(struct drm_device *dev,
820                uint32_t invalidate_domains,
821                uint32_t flush_domains)
822 {
823         drm_i915_private_t *dev_priv = dev->dev_private;
824         uint32_t cmd;
825         RING_LOCALS;
826
827 #if WATCH_EXEC
828         DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
829                   invalidate_domains, flush_domains);
830 #endif
831
832         if (flush_domains & I915_GEM_DOMAIN_CPU)
833                 drm_agp_chipset_flush(dev);
834
835         if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
836                                                      I915_GEM_DOMAIN_GTT)) {
837                 /*
838                  * read/write caches:
839                  *
840                  * I915_GEM_DOMAIN_RENDER is always invalidated, but is
841                  * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
842                  * also flushed at 2d versus 3d pipeline switches.
843                  *
844                  * read-only caches:
845                  *
846                  * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
847                  * MI_READ_FLUSH is set, and is always flushed on 965.
848                  *
849                  * I915_GEM_DOMAIN_COMMAND may not exist?
850                  *
851                  * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
852                  * invalidated when MI_EXE_FLUSH is set.
853                  *
854                  * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
855                  * invalidated with every MI_FLUSH.
856                  *
857                  * TLBs:
858                  *
859                  * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
860                  * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
861                  * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
862                  * are flushed at any MI_FLUSH.
863                  */
864
865                 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
866                 if ((invalidate_domains|flush_domains) &
867                     I915_GEM_DOMAIN_RENDER)
868                         cmd &= ~MI_NO_WRITE_FLUSH;
869                 if (!IS_I965G(dev)) {
870                         /*
871                          * On the 965, the sampler cache always gets flushed
872                          * and this bit is reserved.
873                          */
874                         if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
875                                 cmd |= MI_READ_FLUSH;
876                 }
877                 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
878                         cmd |= MI_EXE_FLUSH;
879
880 #if WATCH_EXEC
881                 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
882 #endif
883                 BEGIN_LP_RING(2);
884                 OUT_RING(cmd);
885                 OUT_RING(0); /* noop */
886                 ADVANCE_LP_RING();
887         }
888 }
889
890 /**
891  * Ensures that all rendering to the object has completed and the object is
892  * safe to unbind from the GTT or access from the CPU.
893  */
894 static int
895 i915_gem_object_wait_rendering(struct drm_gem_object *obj)
896 {
897         struct drm_device *dev = obj->dev;
898         struct drm_i915_gem_object *obj_priv = obj->driver_private;
899         int ret;
900
901         /* If there are writes queued to the buffer, flush and
902          * create a new seqno to wait for.
903          */
904         if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) {
905                 uint32_t write_domain = obj->write_domain;
906 #if WATCH_BUF
907                 DRM_INFO("%s: flushing object %p from write domain %08x\n",
908                           __func__, obj, write_domain);
909 #endif
910                 i915_gem_flush(dev, 0, write_domain);
911
912                 i915_gem_object_move_to_active(obj);
913                 obj_priv->last_rendering_seqno = i915_add_request(dev,
914                                                                   write_domain);
915                 BUG_ON(obj_priv->last_rendering_seqno == 0);
916 #if WATCH_LRU
917                 DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
918 #endif
919         }
920
921         /* If there is rendering queued on the buffer being evicted, wait for
922          * it.
923          */
924         if (obj_priv->active) {
925 #if WATCH_BUF
926                 DRM_INFO("%s: object %p wait for seqno %08x\n",
927                           __func__, obj, obj_priv->last_rendering_seqno);
928 #endif
929                 ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
930                 if (ret != 0)
931                         return ret;
932         }
933
934         return 0;
935 }
936
937 /**
938  * Unbinds an object from the GTT aperture.
939  */
940 static int
941 i915_gem_object_unbind(struct drm_gem_object *obj)
942 {
943         struct drm_device *dev = obj->dev;
944         struct drm_i915_gem_object *obj_priv = obj->driver_private;
945         int ret = 0;
946
947 #if WATCH_BUF
948         DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
949         DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
950 #endif
951         if (obj_priv->gtt_space == NULL)
952                 return 0;
953
954         if (obj_priv->pin_count != 0) {
955                 DRM_ERROR("Attempting to unbind pinned buffer\n");
956                 return -EINVAL;
957         }
958
959         /* Wait for any rendering to complete
960          */
961         ret = i915_gem_object_wait_rendering(obj);
962         if (ret) {
963                 DRM_ERROR("wait_rendering failed: %d\n", ret);
964                 return ret;
965         }
966
967         /* Move the object to the CPU domain to ensure that
968          * any possible CPU writes while it's not in the GTT
969          * are flushed when we go to remap it. This will
970          * also ensure that all pending GPU writes are finished
971          * before we unbind.
972          */
973         ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU,
974                                          I915_GEM_DOMAIN_CPU);
975         if (ret) {
976                 DRM_ERROR("set_domain failed: %d\n", ret);
977                 return ret;
978         }
979
980         if (obj_priv->agp_mem != NULL) {
981                 drm_unbind_agp(obj_priv->agp_mem);
982                 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
983                 obj_priv->agp_mem = NULL;
984         }
985
986         BUG_ON(obj_priv->active);
987
988         i915_gem_object_free_page_list(obj);
989
990         if (obj_priv->gtt_space) {
991                 atomic_dec(&dev->gtt_count);
992                 atomic_sub(obj->size, &dev->gtt_memory);
993
994                 drm_mm_put_block(obj_priv->gtt_space);
995                 obj_priv->gtt_space = NULL;
996         }
997
998         /* Remove ourselves from the LRU list if present. */
999         if (!list_empty(&obj_priv->list))
1000                 list_del_init(&obj_priv->list);
1001
1002         return 0;
1003 }
1004
1005 static int
1006 i915_gem_evict_something(struct drm_device *dev)
1007 {
1008         drm_i915_private_t *dev_priv = dev->dev_private;
1009         struct drm_gem_object *obj;
1010         struct drm_i915_gem_object *obj_priv;
1011         int ret = 0;
1012
1013         for (;;) {
1014                 /* If there's an inactive buffer available now, grab it
1015                  * and be done.
1016                  */
1017                 if (!list_empty(&dev_priv->mm.inactive_list)) {
1018                         obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
1019                                                     struct drm_i915_gem_object,
1020                                                     list);
1021                         obj = obj_priv->obj;
1022                         BUG_ON(obj_priv->pin_count != 0);
1023 #if WATCH_LRU
1024                         DRM_INFO("%s: evicting %p\n", __func__, obj);
1025 #endif
1026                         BUG_ON(obj_priv->active);
1027
1028                         /* Wait on the rendering and unbind the buffer. */
1029                         ret = i915_gem_object_unbind(obj);
1030                         break;
1031                 }
1032
1033                 /* If we didn't get anything, but the ring is still processing
1034                  * things, wait for one of those things to finish and hopefully
1035                  * leave us a buffer to evict.
1036                  */
1037                 if (!list_empty(&dev_priv->mm.request_list)) {
1038                         struct drm_i915_gem_request *request;
1039
1040                         request = list_first_entry(&dev_priv->mm.request_list,
1041                                                    struct drm_i915_gem_request,
1042                                                    list);
1043
1044                         ret = i915_wait_request(dev, request->seqno);
1045                         if (ret)
1046                                 break;
1047
1048                         /* if waiting caused an object to become inactive,
1049                          * then loop around and wait for it. Otherwise, we
1050                          * assume that waiting freed and unbound something,
1051                          * so there should now be some space in the GTT
1052                          */
1053                         if (!list_empty(&dev_priv->mm.inactive_list))
1054                                 continue;
1055                         break;
1056                 }
1057
1058                 /* If we didn't have anything on the request list but there
1059                  * are buffers awaiting a flush, emit one and try again.
1060                  * When we wait on it, those buffers waiting for that flush
1061                  * will get moved to inactive.
1062                  */
1063                 if (!list_empty(&dev_priv->mm.flushing_list)) {
1064                         obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
1065                                                     struct drm_i915_gem_object,
1066                                                     list);
1067                         obj = obj_priv->obj;
1068
1069                         i915_gem_flush(dev,
1070                                        obj->write_domain,
1071                                        obj->write_domain);
1072                         i915_add_request(dev, obj->write_domain);
1073
1074                         obj = NULL;
1075                         continue;
1076                 }
1077
1078                 DRM_ERROR("inactive empty %d request empty %d "
1079                           "flushing empty %d\n",
1080                           list_empty(&dev_priv->mm.inactive_list),
1081                           list_empty(&dev_priv->mm.request_list),
1082                           list_empty(&dev_priv->mm.flushing_list));
1083                 /* If we didn't do any of the above, there's nothing to be done
1084                  * and we just can't fit it in.
1085                  */
1086                 return -ENOMEM;
1087         }
1088         return ret;
1089 }
1090
1091 static int
1092 i915_gem_object_get_page_list(struct drm_gem_object *obj)
1093 {
1094         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1095         int page_count, i;
1096         struct address_space *mapping;
1097         struct inode *inode;
1098         struct page *page;
1099         int ret;
1100
1101         if (obj_priv->page_list)
1102                 return 0;
1103
1104         /* Get the list of pages out of our struct file.  They'll be pinned
1105          * at this point until we release them.
1106          */
1107         page_count = obj->size / PAGE_SIZE;
1108         BUG_ON(obj_priv->page_list != NULL);
1109         obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
1110                                          DRM_MEM_DRIVER);
1111         if (obj_priv->page_list == NULL) {
1112                 DRM_ERROR("Faled to allocate page list\n");
1113                 return -ENOMEM;
1114         }
1115
1116         inode = obj->filp->f_path.dentry->d_inode;
1117         mapping = inode->i_mapping;
1118         for (i = 0; i < page_count; i++) {
1119                 page = read_mapping_page(mapping, i, NULL);
1120                 if (IS_ERR(page)) {
1121                         ret = PTR_ERR(page);
1122                         DRM_ERROR("read_mapping_page failed: %d\n", ret);
1123                         i915_gem_object_free_page_list(obj);
1124                         return ret;
1125                 }
1126                 obj_priv->page_list[i] = page;
1127         }
1128         return 0;
1129 }
1130
1131 /**
1132  * Finds free space in the GTT aperture and binds the object there.
1133  */
1134 static int
1135 i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
1136 {
1137         struct drm_device *dev = obj->dev;
1138         drm_i915_private_t *dev_priv = dev->dev_private;
1139         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1140         struct drm_mm_node *free_space;
1141         int page_count, ret;
1142
1143         if (alignment == 0)
1144                 alignment = PAGE_SIZE;
1145         if (alignment & (PAGE_SIZE - 1)) {
1146                 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
1147                 return -EINVAL;
1148         }
1149
1150  search_free:
1151         free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
1152                                         obj->size, alignment, 0);
1153         if (free_space != NULL) {
1154                 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
1155                                                        alignment);
1156                 if (obj_priv->gtt_space != NULL) {
1157                         obj_priv->gtt_space->private = obj;
1158                         obj_priv->gtt_offset = obj_priv->gtt_space->start;
1159                 }
1160         }
1161         if (obj_priv->gtt_space == NULL) {
1162                 /* If the gtt is empty and we're still having trouble
1163                  * fitting our object in, we're out of memory.
1164                  */
1165 #if WATCH_LRU
1166                 DRM_INFO("%s: GTT full, evicting something\n", __func__);
1167 #endif
1168                 if (list_empty(&dev_priv->mm.inactive_list) &&
1169                     list_empty(&dev_priv->mm.flushing_list) &&
1170                     list_empty(&dev_priv->mm.active_list)) {
1171                         DRM_ERROR("GTT full, but LRU list empty\n");
1172                         return -ENOMEM;
1173                 }
1174
1175                 ret = i915_gem_evict_something(dev);
1176                 if (ret != 0) {
1177                         DRM_ERROR("Failed to evict a buffer %d\n", ret);
1178                         return ret;
1179                 }
1180                 goto search_free;
1181         }
1182
1183 #if WATCH_BUF
1184         DRM_INFO("Binding object of size %d at 0x%08x\n",
1185                  obj->size, obj_priv->gtt_offset);
1186 #endif
1187         ret = i915_gem_object_get_page_list(obj);
1188         if (ret) {
1189                 drm_mm_put_block(obj_priv->gtt_space);
1190                 obj_priv->gtt_space = NULL;
1191                 return ret;
1192         }
1193
1194         page_count = obj->size / PAGE_SIZE;
1195         /* Create an AGP memory structure pointing at our pages, and bind it
1196          * into the GTT.
1197          */
1198         obj_priv->agp_mem = drm_agp_bind_pages(dev,
1199                                                obj_priv->page_list,
1200                                                page_count,
1201                                                obj_priv->gtt_offset,
1202                                                obj_priv->agp_type);
1203         if (obj_priv->agp_mem == NULL) {
1204                 i915_gem_object_free_page_list(obj);
1205                 drm_mm_put_block(obj_priv->gtt_space);
1206                 obj_priv->gtt_space = NULL;
1207                 return -ENOMEM;
1208         }
1209         atomic_inc(&dev->gtt_count);
1210         atomic_add(obj->size, &dev->gtt_memory);
1211
1212         /* Assert that the object is not currently in any GPU domain. As it
1213          * wasn't in the GTT, there shouldn't be any way it could have been in
1214          * a GPU cache
1215          */
1216         BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
1217         BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
1218
1219         return 0;
1220 }
1221
1222 void
1223 i915_gem_clflush_object(struct drm_gem_object *obj)
1224 {
1225         struct drm_i915_gem_object      *obj_priv = obj->driver_private;
1226
1227         /* If we don't have a page list set up, then we're not pinned
1228          * to GPU, and we can ignore the cache flush because it'll happen
1229          * again at bind time.
1230          */
1231         if (obj_priv->page_list == NULL)
1232                 return;
1233
1234         drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
1235 }
1236
1237 /*
1238  * Set the next domain for the specified object. This
1239  * may not actually perform the necessary flushing/invaliding though,
1240  * as that may want to be batched with other set_domain operations
1241  *
1242  * This is (we hope) the only really tricky part of gem. The goal
1243  * is fairly simple -- track which caches hold bits of the object
1244  * and make sure they remain coherent. A few concrete examples may
1245  * help to explain how it works. For shorthand, we use the notation
1246  * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
1247  * a pair of read and write domain masks.
1248  *
1249  * Case 1: the batch buffer
1250  *
1251  *      1. Allocated
1252  *      2. Written by CPU
1253  *      3. Mapped to GTT
1254  *      4. Read by GPU
1255  *      5. Unmapped from GTT
1256  *      6. Freed
1257  *
1258  *      Let's take these a step at a time
1259  *
1260  *      1. Allocated
1261  *              Pages allocated from the kernel may still have
1262  *              cache contents, so we set them to (CPU, CPU) always.
1263  *      2. Written by CPU (using pwrite)
1264  *              The pwrite function calls set_domain (CPU, CPU) and
1265  *              this function does nothing (as nothing changes)
1266  *      3. Mapped by GTT
1267  *              This function asserts that the object is not
1268  *              currently in any GPU-based read or write domains
1269  *      4. Read by GPU
1270  *              i915_gem_execbuffer calls set_domain (COMMAND, 0).
1271  *              As write_domain is zero, this function adds in the
1272  *              current read domains (CPU+COMMAND, 0).
1273  *              flush_domains is set to CPU.
1274  *              invalidate_domains is set to COMMAND
1275  *              clflush is run to get data out of the CPU caches
1276  *              then i915_dev_set_domain calls i915_gem_flush to
1277  *              emit an MI_FLUSH and drm_agp_chipset_flush
1278  *      5. Unmapped from GTT
1279  *              i915_gem_object_unbind calls set_domain (CPU, CPU)
1280  *              flush_domains and invalidate_domains end up both zero
1281  *              so no flushing/invalidating happens
1282  *      6. Freed
1283  *              yay, done
1284  *
1285  * Case 2: The shared render buffer
1286  *
1287  *      1. Allocated
1288  *      2. Mapped to GTT
1289  *      3. Read/written by GPU
1290  *      4. set_domain to (CPU,CPU)
1291  *      5. Read/written by CPU
1292  *      6. Read/written by GPU
1293  *
1294  *      1. Allocated
1295  *              Same as last example, (CPU, CPU)
1296  *      2. Mapped to GTT
1297  *              Nothing changes (assertions find that it is not in the GPU)
1298  *      3. Read/written by GPU
1299  *              execbuffer calls set_domain (RENDER, RENDER)
1300  *              flush_domains gets CPU
1301  *              invalidate_domains gets GPU
1302  *              clflush (obj)
1303  *              MI_FLUSH and drm_agp_chipset_flush
1304  *      4. set_domain (CPU, CPU)
1305  *              flush_domains gets GPU
1306  *              invalidate_domains gets CPU
1307  *              wait_rendering (obj) to make sure all drawing is complete.
1308  *              This will include an MI_FLUSH to get the data from GPU
1309  *              to memory
1310  *              clflush (obj) to invalidate the CPU cache
1311  *              Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
1312  *      5. Read/written by CPU
1313  *              cache lines are loaded and dirtied
1314  *      6. Read written by GPU
1315  *              Same as last GPU access
1316  *
1317  * Case 3: The constant buffer
1318  *
1319  *      1. Allocated
1320  *      2. Written by CPU
1321  *      3. Read by GPU
1322  *      4. Updated (written) by CPU again
1323  *      5. Read by GPU
1324  *
1325  *      1. Allocated
1326  *              (CPU, CPU)
1327  *      2. Written by CPU
1328  *              (CPU, CPU)
1329  *      3. Read by GPU
1330  *              (CPU+RENDER, 0)
1331  *              flush_domains = CPU
1332  *              invalidate_domains = RENDER
1333  *              clflush (obj)
1334  *              MI_FLUSH
1335  *              drm_agp_chipset_flush
1336  *      4. Updated (written) by CPU again
1337  *              (CPU, CPU)
1338  *              flush_domains = 0 (no previous write domain)
1339  *              invalidate_domains = 0 (no new read domains)
1340  *      5. Read by GPU
1341  *              (CPU+RENDER, 0)
1342  *              flush_domains = CPU
1343  *              invalidate_domains = RENDER
1344  *              clflush (obj)
1345  *              MI_FLUSH
1346  *              drm_agp_chipset_flush
1347  */
1348 static int
1349 i915_gem_object_set_domain(struct drm_gem_object *obj,
1350                             uint32_t read_domains,
1351                             uint32_t write_domain)
1352 {
1353         struct drm_device               *dev = obj->dev;
1354         struct drm_i915_gem_object      *obj_priv = obj->driver_private;
1355         uint32_t                        invalidate_domains = 0;
1356         uint32_t                        flush_domains = 0;
1357         int                             ret;
1358
1359 #if WATCH_BUF
1360         DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
1361                  __func__, obj,
1362                  obj->read_domains, read_domains,
1363                  obj->write_domain, write_domain);
1364 #endif
1365         /*
1366          * If the object isn't moving to a new write domain,
1367          * let the object stay in multiple read domains
1368          */
1369         if (write_domain == 0)
1370                 read_domains |= obj->read_domains;
1371         else
1372                 obj_priv->dirty = 1;
1373
1374         /*
1375          * Flush the current write domain if
1376          * the new read domains don't match. Invalidate
1377          * any read domains which differ from the old
1378          * write domain
1379          */
1380         if (obj->write_domain && obj->write_domain != read_domains) {
1381                 flush_domains |= obj->write_domain;
1382                 invalidate_domains |= read_domains & ~obj->write_domain;
1383         }
1384         /*
1385          * Invalidate any read caches which may have
1386          * stale data. That is, any new read domains.
1387          */
1388         invalidate_domains |= read_domains & ~obj->read_domains;
1389         if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
1390 #if WATCH_BUF
1391                 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
1392                          __func__, flush_domains, invalidate_domains);
1393 #endif
1394                 /*
1395                  * If we're invaliding the CPU cache and flushing a GPU cache,
1396                  * then pause for rendering so that the GPU caches will be
1397                  * flushed before the cpu cache is invalidated
1398                  */
1399                 if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
1400                     (flush_domains & ~(I915_GEM_DOMAIN_CPU |
1401                                        I915_GEM_DOMAIN_GTT))) {
1402                         ret = i915_gem_object_wait_rendering(obj);
1403                         if (ret)
1404                                 return ret;
1405                 }
1406                 i915_gem_clflush_object(obj);
1407         }
1408
1409         if ((write_domain | flush_domains) != 0)
1410                 obj->write_domain = write_domain;
1411
1412         /* If we're invalidating the CPU domain, clear the per-page CPU
1413          * domain list as well.
1414          */
1415         if (obj_priv->page_cpu_valid != NULL &&
1416             (write_domain != 0 ||
1417              read_domains & I915_GEM_DOMAIN_CPU)) {
1418                 drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
1419                          DRM_MEM_DRIVER);
1420                 obj_priv->page_cpu_valid = NULL;
1421         }
1422         obj->read_domains = read_domains;
1423
1424         dev->invalidate_domains |= invalidate_domains;
1425         dev->flush_domains |= flush_domains;
1426 #if WATCH_BUF
1427         DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
1428                  __func__,
1429                  obj->read_domains, obj->write_domain,
1430                  dev->invalidate_domains, dev->flush_domains);
1431 #endif
1432         return 0;
1433 }
1434
1435 /**
1436  * Set the read/write domain on a range of the object.
1437  *
1438  * Currently only implemented for CPU reads, otherwise drops to normal
1439  * i915_gem_object_set_domain().
1440  */
1441 static int
1442 i915_gem_object_set_domain_range(struct drm_gem_object *obj,
1443                                  uint64_t offset,
1444                                  uint64_t size,
1445                                  uint32_t read_domains,
1446                                  uint32_t write_domain)
1447 {
1448         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1449         int ret, i;
1450
1451         if (obj->read_domains & I915_GEM_DOMAIN_CPU)
1452                 return 0;
1453
1454         if (read_domains != I915_GEM_DOMAIN_CPU ||
1455             write_domain != 0)
1456                 return i915_gem_object_set_domain(obj,
1457                                                   read_domains, write_domain);
1458
1459         /* Wait on any GPU rendering to the object to be flushed. */
1460         ret = i915_gem_object_wait_rendering(obj);
1461         if (ret)
1462                 return ret;
1463
1464         if (obj_priv->page_cpu_valid == NULL) {
1465                 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
1466                                                       DRM_MEM_DRIVER);
1467         }
1468
1469         /* Flush the cache on any pages that are still invalid from the CPU's
1470          * perspective.
1471          */
1472         for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; i++) {
1473                 if (obj_priv->page_cpu_valid[i])
1474                         continue;
1475
1476                 drm_clflush_pages(obj_priv->page_list + i, 1);
1477
1478                 obj_priv->page_cpu_valid[i] = 1;
1479         }
1480
1481         return 0;
1482 }
1483
1484 /**
1485  * Once all of the objects have been set in the proper domain,
1486  * perform the necessary flush and invalidate operations.
1487  *
1488  * Returns the write domains flushed, for use in flush tracking.
1489  */
1490 static uint32_t
1491 i915_gem_dev_set_domain(struct drm_device *dev)
1492 {
1493         uint32_t flush_domains = dev->flush_domains;
1494
1495         /*
1496          * Now that all the buffers are synced to the proper domains,
1497          * flush and invalidate the collected domains
1498          */
1499         if (dev->invalidate_domains | dev->flush_domains) {
1500 #if WATCH_EXEC
1501                 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
1502                           __func__,
1503                          dev->invalidate_domains,
1504                          dev->flush_domains);
1505 #endif
1506                 i915_gem_flush(dev,
1507                                dev->invalidate_domains,
1508                                dev->flush_domains);
1509                 dev->invalidate_domains = 0;
1510                 dev->flush_domains = 0;
1511         }
1512
1513         return flush_domains;
1514 }
1515
1516 /**
1517  * Pin an object to the GTT and evaluate the relocations landing in it.
1518  */
1519 static int
1520 i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
1521                                  struct drm_file *file_priv,
1522                                  struct drm_i915_gem_exec_object *entry)
1523 {
1524         struct drm_device *dev = obj->dev;
1525         drm_i915_private_t *dev_priv = dev->dev_private;
1526         struct drm_i915_gem_relocation_entry reloc;
1527         struct drm_i915_gem_relocation_entry __user *relocs;
1528         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1529         int i, ret;
1530         void __iomem *reloc_page;
1531
1532         /* Choose the GTT offset for our buffer and put it there. */
1533         ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
1534         if (ret)
1535                 return ret;
1536
1537         entry->offset = obj_priv->gtt_offset;
1538
1539         relocs = (struct drm_i915_gem_relocation_entry __user *)
1540                  (uintptr_t) entry->relocs_ptr;
1541         /* Apply the relocations, using the GTT aperture to avoid cache
1542          * flushing requirements.
1543          */
1544         for (i = 0; i < entry->relocation_count; i++) {
1545                 struct drm_gem_object *target_obj;
1546                 struct drm_i915_gem_object *target_obj_priv;
1547                 uint32_t reloc_val, reloc_offset;
1548                 uint32_t __iomem *reloc_entry;
1549
1550                 ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
1551                 if (ret != 0) {
1552                         i915_gem_object_unpin(obj);
1553                         return ret;
1554                 }
1555
1556                 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
1557                                                    reloc.target_handle);
1558                 if (target_obj == NULL) {
1559                         i915_gem_object_unpin(obj);
1560                         return -EBADF;
1561                 }
1562                 target_obj_priv = target_obj->driver_private;
1563
1564                 /* The target buffer should have appeared before us in the
1565                  * exec_object list, so it should have a GTT space bound by now.
1566                  */
1567                 if (target_obj_priv->gtt_space == NULL) {
1568                         DRM_ERROR("No GTT space found for object %d\n",
1569                                   reloc.target_handle);
1570                         drm_gem_object_unreference(target_obj);
1571                         i915_gem_object_unpin(obj);
1572                         return -EINVAL;
1573                 }
1574
1575                 if (reloc.offset > obj->size - 4) {
1576                         DRM_ERROR("Relocation beyond object bounds: "
1577                                   "obj %p target %d offset %d size %d.\n",
1578                                   obj, reloc.target_handle,
1579                                   (int) reloc.offset, (int) obj->size);
1580                         drm_gem_object_unreference(target_obj);
1581                         i915_gem_object_unpin(obj);
1582                         return -EINVAL;
1583                 }
1584                 if (reloc.offset & 3) {
1585                         DRM_ERROR("Relocation not 4-byte aligned: "
1586                                   "obj %p target %d offset %d.\n",
1587                                   obj, reloc.target_handle,
1588                                   (int) reloc.offset);
1589                         drm_gem_object_unreference(target_obj);
1590                         i915_gem_object_unpin(obj);
1591                         return -EINVAL;
1592                 }
1593
1594                 if (reloc.write_domain && target_obj->pending_write_domain &&
1595                     reloc.write_domain != target_obj->pending_write_domain) {
1596                         DRM_ERROR("Write domain conflict: "
1597                                   "obj %p target %d offset %d "
1598                                   "new %08x old %08x\n",
1599                                   obj, reloc.target_handle,
1600                                   (int) reloc.offset,
1601                                   reloc.write_domain,
1602                                   target_obj->pending_write_domain);
1603                         drm_gem_object_unreference(target_obj);
1604                         i915_gem_object_unpin(obj);
1605                         return -EINVAL;
1606                 }
1607
1608 #if WATCH_RELOC
1609                 DRM_INFO("%s: obj %p offset %08x target %d "
1610                          "read %08x write %08x gtt %08x "
1611                          "presumed %08x delta %08x\n",
1612                          __func__,
1613                          obj,
1614                          (int) reloc.offset,
1615                          (int) reloc.target_handle,
1616                          (int) reloc.read_domains,
1617                          (int) reloc.write_domain,
1618                          (int) target_obj_priv->gtt_offset,
1619                          (int) reloc.presumed_offset,
1620                          reloc.delta);
1621 #endif
1622
1623                 target_obj->pending_read_domains |= reloc.read_domains;
1624                 target_obj->pending_write_domain |= reloc.write_domain;
1625
1626                 /* If the relocation already has the right value in it, no
1627                  * more work needs to be done.
1628                  */
1629                 if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
1630                         drm_gem_object_unreference(target_obj);
1631                         continue;
1632                 }
1633
1634                 /* Now that we're going to actually write some data in,
1635                  * make sure that any rendering using this buffer's contents
1636                  * is completed.
1637                  */
1638                 i915_gem_object_wait_rendering(obj);
1639
1640                 /* As we're writing through the gtt, flush
1641                  * any CPU writes before we write the relocations
1642                  */
1643                 if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
1644                         i915_gem_clflush_object(obj);
1645                         drm_agp_chipset_flush(dev);
1646                         obj->write_domain = 0;
1647                 }
1648
1649                 /* Map the page containing the relocation we're going to
1650                  * perform.
1651                  */
1652                 reloc_offset = obj_priv->gtt_offset + reloc.offset;
1653                 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
1654                                                       (reloc_offset &
1655                                                        ~(PAGE_SIZE - 1)));
1656                 reloc_entry = (uint32_t __iomem *)(reloc_page +
1657                                                    (reloc_offset & (PAGE_SIZE - 1)));
1658                 reloc_val = target_obj_priv->gtt_offset + reloc.delta;
1659
1660 #if WATCH_BUF
1661                 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
1662                           obj, (unsigned int) reloc.offset,
1663                           readl(reloc_entry), reloc_val);
1664 #endif
1665                 writel(reloc_val, reloc_entry);
1666                 io_mapping_unmap_atomic(reloc_page);
1667
1668                 /* Write the updated presumed offset for this entry back out
1669                  * to the user.
1670                  */
1671                 reloc.presumed_offset = target_obj_priv->gtt_offset;
1672                 ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
1673                 if (ret != 0) {
1674                         drm_gem_object_unreference(target_obj);
1675                         i915_gem_object_unpin(obj);
1676                         return ret;
1677                 }
1678
1679                 drm_gem_object_unreference(target_obj);
1680         }
1681
1682 #if WATCH_BUF
1683         if (0)
1684                 i915_gem_dump_object(obj, 128, __func__, ~0);
1685 #endif
1686         return 0;
1687 }
1688
1689 /** Dispatch a batchbuffer to the ring
1690  */
1691 static int
1692 i915_dispatch_gem_execbuffer(struct drm_device *dev,
1693                               struct drm_i915_gem_execbuffer *exec,
1694                               uint64_t exec_offset)
1695 {
1696         drm_i915_private_t *dev_priv = dev->dev_private;
1697         struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
1698                                              (uintptr_t) exec->cliprects_ptr;
1699         int nbox = exec->num_cliprects;
1700         int i = 0, count;
1701         uint32_t        exec_start, exec_len;
1702         RING_LOCALS;
1703
1704         exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
1705         exec_len = (uint32_t) exec->batch_len;
1706
1707         if ((exec_start | exec_len) & 0x7) {
1708                 DRM_ERROR("alignment\n");
1709                 return -EINVAL;
1710         }
1711
1712         if (!exec_start)
1713                 return -EINVAL;
1714
1715         count = nbox ? nbox : 1;
1716
1717         for (i = 0; i < count; i++) {
1718                 if (i < nbox) {
1719                         int ret = i915_emit_box(dev, boxes, i,
1720                                                 exec->DR1, exec->DR4);
1721                         if (ret)
1722                                 return ret;
1723                 }
1724
1725                 if (IS_I830(dev) || IS_845G(dev)) {
1726                         BEGIN_LP_RING(4);
1727                         OUT_RING(MI_BATCH_BUFFER);
1728                         OUT_RING(exec_start | MI_BATCH_NON_SECURE);
1729                         OUT_RING(exec_start + exec_len - 4);
1730                         OUT_RING(0);
1731                         ADVANCE_LP_RING();
1732                 } else {
1733                         BEGIN_LP_RING(2);
1734                         if (IS_I965G(dev)) {
1735                                 OUT_RING(MI_BATCH_BUFFER_START |
1736                                          (2 << 6) |
1737                                          MI_BATCH_NON_SECURE_I965);
1738                                 OUT_RING(exec_start);
1739                         } else {
1740                                 OUT_RING(MI_BATCH_BUFFER_START |
1741                                          (2 << 6));
1742                                 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
1743                         }
1744                         ADVANCE_LP_RING();
1745                 }
1746         }
1747
1748         /* XXX breadcrumb */
1749         return 0;
1750 }
1751
1752 /* Throttle our rendering by waiting until the ring has completed our requests
1753  * emitted over 20 msec ago.
1754  *
1755  * This should get us reasonable parallelism between CPU and GPU but also
1756  * relatively low latency when blocking on a particular request to finish.
1757  */
1758 static int
1759 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
1760 {
1761         struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1762         int ret = 0;
1763         uint32_t seqno;
1764
1765         mutex_lock(&dev->struct_mutex);
1766         seqno = i915_file_priv->mm.last_gem_throttle_seqno;
1767         i915_file_priv->mm.last_gem_throttle_seqno =
1768                 i915_file_priv->mm.last_gem_seqno;
1769         if (seqno)
1770                 ret = i915_wait_request(dev, seqno);
1771         mutex_unlock(&dev->struct_mutex);
1772         return ret;
1773 }
1774
1775 int
1776 i915_gem_execbuffer(struct drm_device *dev, void *data,
1777                     struct drm_file *file_priv)
1778 {
1779         drm_i915_private_t *dev_priv = dev->dev_private;
1780         struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1781         struct drm_i915_gem_execbuffer *args = data;
1782         struct drm_i915_gem_exec_object *exec_list = NULL;
1783         struct drm_gem_object **object_list = NULL;
1784         struct drm_gem_object *batch_obj;
1785         int ret, i, pinned = 0;
1786         uint64_t exec_offset;
1787         uint32_t seqno, flush_domains;
1788
1789 #if WATCH_EXEC
1790         DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
1791                   (int) args->buffers_ptr, args->buffer_count, args->batch_len);
1792 #endif
1793
1794         if (args->buffer_count < 1) {
1795                 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
1796                 return -EINVAL;
1797         }
1798         /* Copy in the exec list from userland */
1799         exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
1800                                DRM_MEM_DRIVER);
1801         object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
1802                                  DRM_MEM_DRIVER);
1803         if (exec_list == NULL || object_list == NULL) {
1804                 DRM_ERROR("Failed to allocate exec or object list "
1805                           "for %d buffers\n",
1806                           args->buffer_count);
1807                 ret = -ENOMEM;
1808                 goto pre_mutex_err;
1809         }
1810         ret = copy_from_user(exec_list,
1811                              (struct drm_i915_relocation_entry __user *)
1812                              (uintptr_t) args->buffers_ptr,
1813                              sizeof(*exec_list) * args->buffer_count);
1814         if (ret != 0) {
1815                 DRM_ERROR("copy %d exec entries failed %d\n",
1816                           args->buffer_count, ret);
1817                 goto pre_mutex_err;
1818         }
1819
1820         mutex_lock(&dev->struct_mutex);
1821
1822         i915_verify_inactive(dev, __FILE__, __LINE__);
1823
1824         if (dev_priv->mm.wedged) {
1825                 DRM_ERROR("Execbuf while wedged\n");
1826                 mutex_unlock(&dev->struct_mutex);
1827                 return -EIO;
1828         }
1829
1830         if (dev_priv->mm.suspended) {
1831                 DRM_ERROR("Execbuf while VT-switched.\n");
1832                 mutex_unlock(&dev->struct_mutex);
1833                 return -EBUSY;
1834         }
1835
1836         /* Zero the gloabl flush/invalidate flags. These
1837          * will be modified as each object is bound to the
1838          * gtt
1839          */
1840         dev->invalidate_domains = 0;
1841         dev->flush_domains = 0;
1842
1843         /* Look up object handles and perform the relocations */
1844         for (i = 0; i < args->buffer_count; i++) {
1845                 object_list[i] = drm_gem_object_lookup(dev, file_priv,
1846                                                        exec_list[i].handle);
1847                 if (object_list[i] == NULL) {
1848                         DRM_ERROR("Invalid object handle %d at index %d\n",
1849                                    exec_list[i].handle, i);
1850                         ret = -EBADF;
1851                         goto err;
1852                 }
1853
1854                 object_list[i]->pending_read_domains = 0;
1855                 object_list[i]->pending_write_domain = 0;
1856                 ret = i915_gem_object_pin_and_relocate(object_list[i],
1857                                                        file_priv,
1858                                                        &exec_list[i]);
1859                 if (ret) {
1860                         DRM_ERROR("object bind and relocate failed %d\n", ret);
1861                         goto err;
1862                 }
1863                 pinned = i + 1;
1864         }
1865
1866         /* Set the pending read domains for the batch buffer to COMMAND */
1867         batch_obj = object_list[args->buffer_count-1];
1868         batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
1869         batch_obj->pending_write_domain = 0;
1870
1871         i915_verify_inactive(dev, __FILE__, __LINE__);
1872
1873         for (i = 0; i < args->buffer_count; i++) {
1874                 struct drm_gem_object *obj = object_list[i];
1875                 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1876
1877                 if (obj_priv->gtt_space == NULL) {
1878                         /* We evicted the buffer in the process of validating
1879                          * our set of buffers in.  We could try to recover by
1880                          * kicking them everything out and trying again from
1881                          * the start.
1882                          */
1883                         ret = -ENOMEM;
1884                         goto err;
1885                 }
1886
1887                 /* make sure all previous memory operations have passed */
1888                 ret = i915_gem_object_set_domain(obj,
1889                                                  obj->pending_read_domains,
1890                                                  obj->pending_write_domain);
1891                 if (ret)
1892                         goto err;
1893         }
1894
1895         i915_verify_inactive(dev, __FILE__, __LINE__);
1896
1897         /* Flush/invalidate caches and chipset buffer */
1898         flush_domains = i915_gem_dev_set_domain(dev);
1899
1900         i915_verify_inactive(dev, __FILE__, __LINE__);
1901
1902 #if WATCH_COHERENCY
1903         for (i = 0; i < args->buffer_count; i++) {
1904                 i915_gem_object_check_coherency(object_list[i],
1905                                                 exec_list[i].handle);
1906         }
1907 #endif
1908
1909         exec_offset = exec_list[args->buffer_count - 1].offset;
1910
1911 #if WATCH_EXEC
1912         i915_gem_dump_object(object_list[args->buffer_count - 1],
1913                               args->batch_len,
1914                               __func__,
1915                               ~0);
1916 #endif
1917
1918         (void)i915_add_request(dev, flush_domains);
1919
1920         /* Exec the batchbuffer */
1921         ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
1922         if (ret) {
1923                 DRM_ERROR("dispatch failed %d\n", ret);
1924                 goto err;
1925         }
1926
1927         /*
1928          * Ensure that the commands in the batch buffer are
1929          * finished before the interrupt fires
1930          */
1931         flush_domains = i915_retire_commands(dev);
1932
1933         i915_verify_inactive(dev, __FILE__, __LINE__);
1934
1935         /*
1936          * Get a seqno representing the execution of the current buffer,
1937          * which we can wait on.  We would like to mitigate these interrupts,
1938          * likely by only creating seqnos occasionally (so that we have
1939          * *some* interrupts representing completion of buffers that we can
1940          * wait on when trying to clear up gtt space).
1941          */
1942         seqno = i915_add_request(dev, flush_domains);
1943         BUG_ON(seqno == 0);
1944         i915_file_priv->mm.last_gem_seqno = seqno;
1945         for (i = 0; i < args->buffer_count; i++) {
1946                 struct drm_gem_object *obj = object_list[i];
1947                 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1948
1949                 i915_gem_object_move_to_active(obj);
1950                 obj_priv->last_rendering_seqno = seqno;
1951 #if WATCH_LRU
1952                 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
1953 #endif
1954         }
1955 #if WATCH_LRU
1956         i915_dump_lru(dev, __func__);
1957 #endif
1958
1959         i915_verify_inactive(dev, __FILE__, __LINE__);
1960
1961         /* Copy the new buffer offsets back to the user's exec list. */
1962         ret = copy_to_user((struct drm_i915_relocation_entry __user *)
1963                            (uintptr_t) args->buffers_ptr,
1964                            exec_list,
1965                            sizeof(*exec_list) * args->buffer_count);
1966         if (ret)
1967                 DRM_ERROR("failed to copy %d exec entries "
1968                           "back to user (%d)\n",
1969                            args->buffer_count, ret);
1970 err:
1971         if (object_list != NULL) {
1972                 for (i = 0; i < pinned; i++)
1973                         i915_gem_object_unpin(object_list[i]);
1974
1975                 for (i = 0; i < args->buffer_count; i++)
1976                         drm_gem_object_unreference(object_list[i]);
1977         }
1978         mutex_unlock(&dev->struct_mutex);
1979
1980 pre_mutex_err:
1981         drm_free(object_list, sizeof(*object_list) * args->buffer_count,
1982                  DRM_MEM_DRIVER);
1983         drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
1984                  DRM_MEM_DRIVER);
1985
1986         return ret;
1987 }
1988
1989 int
1990 i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
1991 {
1992         struct drm_device *dev = obj->dev;
1993         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1994         int ret;
1995
1996         i915_verify_inactive(dev, __FILE__, __LINE__);
1997         if (obj_priv->gtt_space == NULL) {
1998                 ret = i915_gem_object_bind_to_gtt(obj, alignment);
1999                 if (ret != 0) {
2000                         DRM_ERROR("Failure to bind: %d", ret);
2001                         return ret;
2002                 }
2003         }
2004         obj_priv->pin_count++;
2005
2006         /* If the object is not active and not pending a flush,
2007          * remove it from the inactive list
2008          */
2009         if (obj_priv->pin_count == 1) {
2010                 atomic_inc(&dev->pin_count);
2011                 atomic_add(obj->size, &dev->pin_memory);
2012                 if (!obj_priv->active &&
2013                     (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
2014                                            I915_GEM_DOMAIN_GTT)) == 0 &&
2015                     !list_empty(&obj_priv->list))
2016                         list_del_init(&obj_priv->list);
2017         }
2018         i915_verify_inactive(dev, __FILE__, __LINE__);
2019
2020         return 0;
2021 }
2022
2023 void
2024 i915_gem_object_unpin(struct drm_gem_object *obj)
2025 {
2026         struct drm_device *dev = obj->dev;
2027         drm_i915_private_t *dev_priv = dev->dev_private;
2028         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2029
2030         i915_verify_inactive(dev, __FILE__, __LINE__);
2031         obj_priv->pin_count--;
2032         BUG_ON(obj_priv->pin_count < 0);
2033         BUG_ON(obj_priv->gtt_space == NULL);
2034
2035         /* If the object is no longer pinned, and is
2036          * neither active nor being flushed, then stick it on
2037          * the inactive list
2038          */
2039         if (obj_priv->pin_count == 0) {
2040                 if (!obj_priv->active &&
2041                     (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
2042                                            I915_GEM_DOMAIN_GTT)) == 0)
2043                         list_move_tail(&obj_priv->list,
2044                                        &dev_priv->mm.inactive_list);
2045                 atomic_dec(&dev->pin_count);
2046                 atomic_sub(obj->size, &dev->pin_memory);
2047         }
2048         i915_verify_inactive(dev, __FILE__, __LINE__);
2049 }
2050
2051 int
2052 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
2053                    struct drm_file *file_priv)
2054 {
2055         struct drm_i915_gem_pin *args = data;
2056         struct drm_gem_object *obj;
2057         struct drm_i915_gem_object *obj_priv;
2058         int ret;
2059
2060         mutex_lock(&dev->struct_mutex);
2061
2062         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2063         if (obj == NULL) {
2064                 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
2065                           args->handle);
2066                 mutex_unlock(&dev->struct_mutex);
2067                 return -EBADF;
2068         }
2069         obj_priv = obj->driver_private;
2070
2071         ret = i915_gem_object_pin(obj, args->alignment);
2072         if (ret != 0) {
2073                 drm_gem_object_unreference(obj);
2074                 mutex_unlock(&dev->struct_mutex);
2075                 return ret;
2076         }
2077
2078         /* XXX - flush the CPU caches for pinned objects
2079          * as the X server doesn't manage domains yet
2080          */
2081         if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
2082                 i915_gem_clflush_object(obj);
2083                 drm_agp_chipset_flush(dev);
2084                 obj->write_domain = 0;
2085         }
2086         args->offset = obj_priv->gtt_offset;
2087         drm_gem_object_unreference(obj);
2088         mutex_unlock(&dev->struct_mutex);
2089
2090         return 0;
2091 }
2092
2093 int
2094 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
2095                      struct drm_file *file_priv)
2096 {
2097         struct drm_i915_gem_pin *args = data;
2098         struct drm_gem_object *obj;
2099
2100         mutex_lock(&dev->struct_mutex);
2101
2102         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2103         if (obj == NULL) {
2104                 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
2105                           args->handle);
2106                 mutex_unlock(&dev->struct_mutex);
2107                 return -EBADF;
2108         }
2109
2110         i915_gem_object_unpin(obj);
2111
2112         drm_gem_object_unreference(obj);
2113         mutex_unlock(&dev->struct_mutex);
2114         return 0;
2115 }
2116
2117 int
2118 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
2119                     struct drm_file *file_priv)
2120 {
2121         struct drm_i915_gem_busy *args = data;
2122         struct drm_gem_object *obj;
2123         struct drm_i915_gem_object *obj_priv;
2124
2125         mutex_lock(&dev->struct_mutex);
2126         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2127         if (obj == NULL) {
2128                 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
2129                           args->handle);
2130                 mutex_unlock(&dev->struct_mutex);
2131                 return -EBADF;
2132         }
2133
2134         obj_priv = obj->driver_private;
2135         args->busy = obj_priv->active;
2136
2137         drm_gem_object_unreference(obj);
2138         mutex_unlock(&dev->struct_mutex);
2139         return 0;
2140 }
2141
2142 int
2143 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
2144                         struct drm_file *file_priv)
2145 {
2146     return i915_gem_ring_throttle(dev, file_priv);
2147 }
2148
2149 int i915_gem_init_object(struct drm_gem_object *obj)
2150 {
2151         struct drm_i915_gem_object *obj_priv;
2152
2153         obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
2154         if (obj_priv == NULL)
2155                 return -ENOMEM;
2156
2157         /*
2158          * We've just allocated pages from the kernel,
2159          * so they've just been written by the CPU with
2160          * zeros. They'll need to be clflushed before we
2161          * use them with the GPU.
2162          */
2163         obj->write_domain = I915_GEM_DOMAIN_CPU;
2164         obj->read_domains = I915_GEM_DOMAIN_CPU;
2165
2166         obj_priv->agp_type = AGP_USER_MEMORY;
2167
2168         obj->driver_private = obj_priv;
2169         obj_priv->obj = obj;
2170         INIT_LIST_HEAD(&obj_priv->list);
2171         return 0;
2172 }
2173
2174 void i915_gem_free_object(struct drm_gem_object *obj)
2175 {
2176         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2177
2178         while (obj_priv->pin_count > 0)
2179                 i915_gem_object_unpin(obj);
2180
2181         i915_gem_object_unbind(obj);
2182
2183         drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
2184         drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
2185 }
2186
2187 static int
2188 i915_gem_set_domain(struct drm_gem_object *obj,
2189                     struct drm_file *file_priv,
2190                     uint32_t read_domains,
2191                     uint32_t write_domain)
2192 {
2193         struct drm_device *dev = obj->dev;
2194         int ret;
2195         uint32_t flush_domains;
2196
2197         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
2198
2199         ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
2200         if (ret)
2201                 return ret;
2202         flush_domains = i915_gem_dev_set_domain(obj->dev);
2203
2204         if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
2205                 (void) i915_add_request(dev, flush_domains);
2206
2207         return 0;
2208 }
2209
2210 /** Unbinds all objects that are on the given buffer list. */
2211 static int
2212 i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
2213 {
2214         struct drm_gem_object *obj;
2215         struct drm_i915_gem_object *obj_priv;
2216         int ret;
2217
2218         while (!list_empty(head)) {
2219                 obj_priv = list_first_entry(head,
2220                                             struct drm_i915_gem_object,
2221                                             list);
2222                 obj = obj_priv->obj;
2223
2224                 if (obj_priv->pin_count != 0) {
2225                         DRM_ERROR("Pinned object in unbind list\n");
2226                         mutex_unlock(&dev->struct_mutex);
2227                         return -EINVAL;
2228                 }
2229
2230                 ret = i915_gem_object_unbind(obj);
2231                 if (ret != 0) {
2232                         DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
2233                                   ret);
2234                         mutex_unlock(&dev->struct_mutex);
2235                         return ret;
2236                 }
2237         }
2238
2239
2240         return 0;
2241 }
2242
2243 static int
2244 i915_gem_idle(struct drm_device *dev)
2245 {
2246         drm_i915_private_t *dev_priv = dev->dev_private;
2247         uint32_t seqno, cur_seqno, last_seqno;
2248         int stuck, ret;
2249
2250         mutex_lock(&dev->struct_mutex);
2251
2252         if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
2253                 mutex_unlock(&dev->struct_mutex);
2254                 return 0;
2255         }
2256
2257         /* Hack!  Don't let anybody do execbuf while we don't control the chip.
2258          * We need to replace this with a semaphore, or something.
2259          */
2260         dev_priv->mm.suspended = 1;
2261
2262         /* Cancel the retire work handler, wait for it to finish if running
2263          */
2264         mutex_unlock(&dev->struct_mutex);
2265         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
2266         mutex_lock(&dev->struct_mutex);
2267
2268         i915_kernel_lost_context(dev);
2269
2270         /* Flush the GPU along with all non-CPU write domains
2271          */
2272         i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
2273                        ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
2274         seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU |
2275                                         I915_GEM_DOMAIN_GTT));
2276
2277         if (seqno == 0) {
2278                 mutex_unlock(&dev->struct_mutex);
2279                 return -ENOMEM;
2280         }
2281
2282         dev_priv->mm.waiting_gem_seqno = seqno;
2283         last_seqno = 0;
2284         stuck = 0;
2285         for (;;) {
2286                 cur_seqno = i915_get_gem_seqno(dev);
2287                 if (i915_seqno_passed(cur_seqno, seqno))
2288                         break;
2289                 if (last_seqno == cur_seqno) {
2290                         if (stuck++ > 100) {
2291                                 DRM_ERROR("hardware wedged\n");
2292                                 dev_priv->mm.wedged = 1;
2293                                 DRM_WAKEUP(&dev_priv->irq_queue);
2294                                 break;
2295                         }
2296                 }
2297                 msleep(10);
2298                 last_seqno = cur_seqno;
2299         }
2300         dev_priv->mm.waiting_gem_seqno = 0;
2301
2302         i915_gem_retire_requests(dev);
2303
2304         if (!dev_priv->mm.wedged) {
2305                 /* Active and flushing should now be empty as we've
2306                  * waited for a sequence higher than any pending execbuffer
2307                  */
2308                 WARN_ON(!list_empty(&dev_priv->mm.active_list));
2309                 WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
2310                 /* Request should now be empty as we've also waited
2311                  * for the last request in the list
2312                  */
2313                 WARN_ON(!list_empty(&dev_priv->mm.request_list));
2314         }
2315
2316         /* Empty the active and flushing lists to inactive.  If there's
2317          * anything left at this point, it means that we're wedged and
2318          * nothing good's going to happen by leaving them there.  So strip
2319          * the GPU domains and just stuff them onto inactive.
2320          */
2321         while (!list_empty(&dev_priv->mm.active_list)) {
2322                 struct drm_i915_gem_object *obj_priv;
2323
2324                 obj_priv = list_first_entry(&dev_priv->mm.active_list,
2325                                             struct drm_i915_gem_object,
2326                                             list);
2327                 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
2328                 i915_gem_object_move_to_inactive(obj_priv->obj);
2329         }
2330
2331         while (!list_empty(&dev_priv->mm.flushing_list)) {
2332                 struct drm_i915_gem_object *obj_priv;
2333
2334                 obj_priv = list_first_entry(&dev_priv->mm.active_list,
2335                                             struct drm_i915_gem_object,
2336                                             list);
2337                 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
2338                 i915_gem_object_move_to_inactive(obj_priv->obj);
2339         }
2340
2341
2342         /* Move all inactive buffers out of the GTT. */
2343         ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
2344         WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
2345         if (ret) {
2346                 mutex_unlock(&dev->struct_mutex);
2347                 return ret;
2348         }
2349
2350         i915_gem_cleanup_ringbuffer(dev);
2351         mutex_unlock(&dev->struct_mutex);
2352
2353         return 0;
2354 }
2355
2356 static int
2357 i915_gem_init_hws(struct drm_device *dev)
2358 {
2359         drm_i915_private_t *dev_priv = dev->dev_private;
2360         struct drm_gem_object *obj;
2361         struct drm_i915_gem_object *obj_priv;
2362         int ret;
2363
2364         /* If we need a physical address for the status page, it's already
2365          * initialized at driver load time.
2366          */
2367         if (!I915_NEED_GFX_HWS(dev))
2368                 return 0;
2369
2370         obj = drm_gem_object_alloc(dev, 4096);
2371         if (obj == NULL) {
2372                 DRM_ERROR("Failed to allocate status page\n");
2373                 return -ENOMEM;
2374         }
2375         obj_priv = obj->driver_private;
2376         obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
2377
2378         ret = i915_gem_object_pin(obj, 4096);
2379         if (ret != 0) {
2380                 drm_gem_object_unreference(obj);
2381                 return ret;
2382         }
2383
2384         dev_priv->status_gfx_addr = obj_priv->gtt_offset;
2385
2386         dev_priv->hw_status_page = kmap(obj_priv->page_list[0]);
2387         if (dev_priv->hw_status_page == NULL) {
2388                 DRM_ERROR("Failed to map status page.\n");
2389                 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
2390                 drm_gem_object_unreference(obj);
2391                 return -EINVAL;
2392         }
2393         dev_priv->hws_obj = obj;
2394         memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
2395         I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
2396         I915_READ(HWS_PGA); /* posting read */
2397         DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
2398
2399         return 0;
2400 }
2401
2402 static int
2403 i915_gem_init_ringbuffer(struct drm_device *dev)
2404 {
2405         drm_i915_private_t *dev_priv = dev->dev_private;
2406         struct drm_gem_object *obj;
2407         struct drm_i915_gem_object *obj_priv;
2408         int ret;
2409         u32 head;
2410
2411         ret = i915_gem_init_hws(dev);
2412         if (ret != 0)
2413                 return ret;
2414
2415         obj = drm_gem_object_alloc(dev, 128 * 1024);
2416         if (obj == NULL) {
2417                 DRM_ERROR("Failed to allocate ringbuffer\n");
2418                 return -ENOMEM;
2419         }
2420         obj_priv = obj->driver_private;
2421
2422         ret = i915_gem_object_pin(obj, 4096);
2423         if (ret != 0) {
2424                 drm_gem_object_unreference(obj);
2425                 return ret;
2426         }
2427
2428         /* Set up the kernel mapping for the ring. */
2429         dev_priv->ring.Size = obj->size;
2430         dev_priv->ring.tail_mask = obj->size - 1;
2431
2432         dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset;
2433         dev_priv->ring.map.size = obj->size;
2434         dev_priv->ring.map.type = 0;
2435         dev_priv->ring.map.flags = 0;
2436         dev_priv->ring.map.mtrr = 0;
2437
2438         drm_core_ioremap_wc(&dev_priv->ring.map, dev);
2439         if (dev_priv->ring.map.handle == NULL) {
2440                 DRM_ERROR("Failed to map ringbuffer.\n");
2441                 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
2442                 drm_gem_object_unreference(obj);
2443                 return -EINVAL;
2444         }
2445         dev_priv->ring.ring_obj = obj;
2446         dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
2447
2448         /* Stop the ring if it's running. */
2449         I915_WRITE(PRB0_CTL, 0);
2450         I915_WRITE(PRB0_TAIL, 0);
2451         I915_WRITE(PRB0_HEAD, 0);
2452
2453         /* Initialize the ring. */
2454         I915_WRITE(PRB0_START, obj_priv->gtt_offset);
2455         head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
2456
2457         /* G45 ring initialization fails to reset head to zero */
2458         if (head != 0) {
2459                 DRM_ERROR("Ring head not reset to zero "
2460                           "ctl %08x head %08x tail %08x start %08x\n",
2461                           I915_READ(PRB0_CTL),
2462                           I915_READ(PRB0_HEAD),
2463                           I915_READ(PRB0_TAIL),
2464                           I915_READ(PRB0_START));
2465                 I915_WRITE(PRB0_HEAD, 0);
2466
2467                 DRM_ERROR("Ring head forced to zero "
2468                           "ctl %08x head %08x tail %08x start %08x\n",
2469                           I915_READ(PRB0_CTL),
2470                           I915_READ(PRB0_HEAD),
2471                           I915_READ(PRB0_TAIL),
2472                           I915_READ(PRB0_START));
2473         }
2474
2475         I915_WRITE(PRB0_CTL,
2476                    ((obj->size - 4096) & RING_NR_PAGES) |
2477                    RING_NO_REPORT |
2478                    RING_VALID);
2479
2480         head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
2481
2482         /* If the head is still not zero, the ring is dead */
2483         if (head != 0) {
2484                 DRM_ERROR("Ring initialization failed "
2485                           "ctl %08x head %08x tail %08x start %08x\n",
2486                           I915_READ(PRB0_CTL),
2487                           I915_READ(PRB0_HEAD),
2488                           I915_READ(PRB0_TAIL),
2489                           I915_READ(PRB0_START));
2490                 return -EIO;
2491         }
2492
2493         /* Update our cache of the ring state */
2494         i915_kernel_lost_context(dev);
2495
2496         return 0;
2497 }
2498
2499 static void
2500 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
2501 {
2502         drm_i915_private_t *dev_priv = dev->dev_private;
2503
2504         if (dev_priv->ring.ring_obj == NULL)
2505                 return;
2506
2507         drm_core_ioremapfree(&dev_priv->ring.map, dev);
2508
2509         i915_gem_object_unpin(dev_priv->ring.ring_obj);
2510         drm_gem_object_unreference(dev_priv->ring.ring_obj);
2511         dev_priv->ring.ring_obj = NULL;
2512         memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
2513
2514         if (dev_priv->hws_obj != NULL) {
2515                 struct drm_gem_object *obj = dev_priv->hws_obj;
2516                 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2517
2518                 kunmap(obj_priv->page_list[0]);
2519                 i915_gem_object_unpin(obj);
2520                 drm_gem_object_unreference(obj);
2521                 dev_priv->hws_obj = NULL;
2522                 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
2523                 dev_priv->hw_status_page = NULL;
2524
2525                 /* Write high address into HWS_PGA when disabling. */
2526                 I915_WRITE(HWS_PGA, 0x1ffff000);
2527         }
2528 }
2529
2530 int
2531 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
2532                        struct drm_file *file_priv)
2533 {
2534         drm_i915_private_t *dev_priv = dev->dev_private;
2535         int ret;
2536
2537         if (dev_priv->mm.wedged) {
2538                 DRM_ERROR("Reenabling wedged hardware, good luck\n");
2539                 dev_priv->mm.wedged = 0;
2540         }
2541
2542         ret = i915_gem_init_ringbuffer(dev);
2543         if (ret != 0)
2544                 return ret;
2545
2546         dev_priv->mm.gtt_mapping = io_mapping_create_wc(dev->agp->base,
2547                                                         dev->agp->agp_info.aper_size
2548                                                         * 1024 * 1024);
2549
2550         mutex_lock(&dev->struct_mutex);
2551         BUG_ON(!list_empty(&dev_priv->mm.active_list));
2552         BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2553         BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
2554         BUG_ON(!list_empty(&dev_priv->mm.request_list));
2555         dev_priv->mm.suspended = 0;
2556         mutex_unlock(&dev->struct_mutex);
2557
2558         drm_irq_install(dev);
2559
2560         return 0;
2561 }
2562
2563 int
2564 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
2565                        struct drm_file *file_priv)
2566 {
2567         drm_i915_private_t *dev_priv = dev->dev_private;
2568         int ret;
2569
2570         ret = i915_gem_idle(dev);
2571         drm_irq_uninstall(dev);
2572
2573         io_mapping_free(dev_priv->mm.gtt_mapping);
2574         return ret;
2575 }
2576
2577 void
2578 i915_gem_lastclose(struct drm_device *dev)
2579 {
2580         int ret;
2581
2582         ret = i915_gem_idle(dev);
2583         if (ret)
2584                 DRM_ERROR("failed to idle hardware: %d\n", ret);
2585 }
2586
2587 void
2588 i915_gem_load(struct drm_device *dev)
2589 {
2590         drm_i915_private_t *dev_priv = dev->dev_private;
2591
2592         INIT_LIST_HEAD(&dev_priv->mm.active_list);
2593         INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
2594         INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
2595         INIT_LIST_HEAD(&dev_priv->mm.request_list);
2596         INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
2597                           i915_gem_retire_work_handler);
2598         dev_priv->mm.next_gem_seqno = 1;
2599
2600         i915_gem_detect_bit_6_swizzle(dev);
2601 }