]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Merge branch 'drm-intel-next' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 27 Mar 2009 23:50:49 +0000 (16:50 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 27 Mar 2009 23:50:49 +0000 (16:50 -0700)
* 'drm-intel-next' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel: (25 commits)
  drm/i915: Fix LVDS dither setting
  drm/i915: Check for dev->primary->master before dereference.
  drm/i915: TV detection fix
  drm/i915: TV mode_set sync up with 2D driver
  drm/i915: Fix TV get_modes to return modes count
  drm/i915: Sync crt hotplug detection with intel video driver
  drm/i915: Sync mode_valid/mode_set with intel video driver
  drm/i915: TV modes' parameters sync up with 2D driver
  agp/intel: Add support for new intel chipset.
  i915/drm: Remove two redundant agp_chipset_flushes
  drm/i915: Display fence register state in debugfs i915_gem_fence_regs node.
  drm/i915: Add information on pinning and fencing to the i915 list debug.
  drm/i915: Consolidate gem object list dumping
  drm/i915: Convert i915 proc files to seq_file and move to debugfs.
  drm: Convert proc files to seq_file and introduce debugfs
  drm/i915: Fix lock order reversal in GEM relocation entry copying.
  drm/i915: Fix lock order reversal with cliprects and cmdbuf in non-DRI2 paths.
  drm/i915: Fix lock order reversal in shmem pread path.
  drm/i915: Fix lock order reversal in shmem pwrite path.
  drm/i915: Make GEM object's page lists refcounted instead of get/free.
  ...

23 files changed:
drivers/char/agp/intel-agp.c
drivers/gpu/drm/Makefile
drivers/gpu/drm/drm_debugfs.c [new file with mode: 0644]
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_info.c [new file with mode: 0644]
drivers/gpu/drm/drm_proc.c
drivers/gpu/drm/drm_stub.c
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_debugfs.c [new file with mode: 0644]
drivers/gpu/drm/i915/i915_gem_proc.c [deleted file]
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_bios.h
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_tv.c
include/drm/drmP.h
include/drm/drm_pciids.h

index 4373adb2119aeea256758abc7602504477d5c5ad..9d9490e22e07abd728fa52fa2d003df22c8b1175 100644 (file)
 #define PCI_DEVICE_ID_INTEL_82965GME_IG     0x2A12
 #define PCI_DEVICE_ID_INTEL_82945GME_HB     0x27AC
 #define PCI_DEVICE_ID_INTEL_82945GME_IG     0x27AE
+#define PCI_DEVICE_ID_INTEL_IGDGM_HB        0xA010
+#define PCI_DEVICE_ID_INTEL_IGDGM_IG        0xA011
+#define PCI_DEVICE_ID_INTEL_IGDG_HB         0xA000
+#define PCI_DEVICE_ID_INTEL_IGDG_IG         0xA001
 #define PCI_DEVICE_ID_INTEL_G33_HB          0x29C0
 #define PCI_DEVICE_ID_INTEL_G33_IG          0x29C2
 #define PCI_DEVICE_ID_INTEL_Q35_HB          0x29B0
 
 #define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
-               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB)
+               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \
+               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDGM_HB || \
+               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDG_HB)
+
+#define IS_IGD (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDGM_HB || \
+               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDG_HB)
 
 #define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_E_HB || \
                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
@@ -510,7 +519,7 @@ static void intel_i830_init_gtt_entries(void)
                        size = 512;
                }
                size += 4; /* add in BIOS popup space */
-       } else if (IS_G33) {
+       } else if (IS_G33 && !IS_IGD) {
        /* G33's GTT size defined in gmch_ctrl */
                switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
                case G33_PGETBL_SIZE_1M:
@@ -526,7 +535,7 @@ static void intel_i830_init_gtt_entries(void)
                        size = 512;
                }
                size += 4;
-       } else if (IS_G4X) {
+       } else if (IS_G4X || IS_IGD) {
                /* On 4 series hardware, GTT stolen is separate from graphics
                 * stolen, ignore it in stolen gtt entries counting.  However,
                 * 4KB of the stolen memory doesn't get mapped to the GTT.
@@ -2161,6 +2170,10 @@ static const struct intel_driver_description {
                NULL, &intel_g33_driver },
        { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33",
                NULL, &intel_g33_driver },
+       { PCI_DEVICE_ID_INTEL_IGDGM_HB, PCI_DEVICE_ID_INTEL_IGDGM_IG, 0, "IGD",
+               NULL, &intel_g33_driver },
+       { PCI_DEVICE_ID_INTEL_IGDG_HB, PCI_DEVICE_ID_INTEL_IGDG_IG, 0, "IGD",
+               NULL, &intel_g33_driver },
        { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0,
            "Mobile IntelĀ® GM45 Express", NULL, &intel_i965_driver },
        { PCI_DEVICE_ID_INTEL_IGD_E_HB, PCI_DEVICE_ID_INTEL_IGD_E_IG, 0,
@@ -2355,6 +2368,8 @@ static struct pci_device_id agp_intel_pci_table[] = {
        ID(PCI_DEVICE_ID_INTEL_82945G_HB),
        ID(PCI_DEVICE_ID_INTEL_82945GM_HB),
        ID(PCI_DEVICE_ID_INTEL_82945GME_HB),
+       ID(PCI_DEVICE_ID_INTEL_IGDGM_HB),
+       ID(PCI_DEVICE_ID_INTEL_IGDG_HB),
        ID(PCI_DEVICE_ID_INTEL_82946GZ_HB),
        ID(PCI_DEVICE_ID_INTEL_82G35_HB),
        ID(PCI_DEVICE_ID_INTEL_82965Q_HB),
index 30022c4a5c12a0be54aef5cfb7b0fddd7abd06df..4ec5061fa584aab4d070deabcf776de0994d3720 100644 (file)
@@ -10,7 +10,8 @@ drm-y       :=        drm_auth.o drm_bufs.o drm_cache.o \
                drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
                drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
                drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
-               drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o
+               drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o \
+               drm_info.o drm_debugfs.o
 
 drm-$(CONFIG_COMPAT) += drm_ioc32.o
 
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
new file mode 100644 (file)
index 0000000..c77c6c6
--- /dev/null
@@ -0,0 +1,235 @@
+/**
+ * \file drm_debugfs.c
+ * debugfs support for DRM
+ *
+ * \author Ben Gamari <bgamari@gmail.com>
+ */
+
+/*
+ * Created: Sun Dec 21 13:08:50 2008 by bgamari@gmail.com
+ *
+ * Copyright 2008 Ben Gamari <bgamari@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include "drmP.h"
+
+#if defined(CONFIG_DEBUG_FS)
+
+/***************************************************
+ * Initialization, etc.
+ **************************************************/
+
+static struct drm_info_list drm_debugfs_list[] = {
+       {"name", drm_name_info, 0},
+       {"vm", drm_vm_info, 0},
+       {"clients", drm_clients_info, 0},
+       {"queues", drm_queues_info, 0},
+       {"bufs", drm_bufs_info, 0},
+       {"gem_names", drm_gem_name_info, DRIVER_GEM},
+       {"gem_objects", drm_gem_object_info, DRIVER_GEM},
+#if DRM_DEBUG_CODE
+       {"vma", drm_vma_info, 0},
+#endif
+};
+#define DRM_DEBUGFS_ENTRIES ARRAY_SIZE(drm_debugfs_list)
+
+
+static int drm_debugfs_open(struct inode *inode, struct file *file)
+{
+       struct drm_info_node *node = inode->i_private;
+
+       return single_open(file, node->info_ent->show, node);
+}
+
+
+static const struct file_operations drm_debugfs_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_debugfs_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+
+/**
+ * Initialize a given set of debugfs files for a device
+ *
+ * \param files The array of files to create
+ * \param count The number of files given
+ * \param root DRI debugfs dir entry.
+ * \param minor device minor number
+ * \return Zero on success, non-zero on failure
+ *
+ * Create a given set of debugfs files represented by an array of
+ * gdm_debugfs_lists in the given root directory.
+ */
+int drm_debugfs_create_files(struct drm_info_list *files, int count,
+                            struct dentry *root, struct drm_minor *minor)
+{
+       struct drm_device *dev = minor->dev;
+       struct dentry *ent;
+       struct drm_info_node *tmp;
+       char name[64];
+       int i, ret;
+
+       for (i = 0; i < count; i++) {
+               u32 features = files[i].driver_features;
+
+               if (features != 0 &&
+                   (dev->driver->driver_features & features) != features)
+                       continue;
+
+               tmp = drm_alloc(sizeof(struct drm_info_node),
+                               _DRM_DRIVER);
+               ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO,
+                                         root, tmp, &drm_debugfs_fops);
+               if (!ent) {
+                       DRM_ERROR("Cannot create /debugfs/dri/%s/%s\n",
+                                 name, files[i].name);
+                       drm_free(tmp, sizeof(struct drm_info_node),
+                                _DRM_DRIVER);
+                       ret = -1;
+                       goto fail;
+               }
+
+               tmp->minor = minor;
+               tmp->dent = ent;
+               tmp->info_ent = &files[i];
+               list_add(&(tmp->list), &(minor->debugfs_nodes.list));
+       }
+       return 0;
+
+fail:
+       drm_debugfs_remove_files(files, count, minor);
+       return ret;
+}
+EXPORT_SYMBOL(drm_debugfs_create_files);
+
+/**
+ * Initialize the DRI debugfs filesystem for a device
+ *
+ * \param dev DRM device
+ * \param minor device minor number
+ * \param root DRI debugfs dir entry.
+ *
+ * Create the DRI debugfs root entry "/debugfs/dri", the device debugfs root entry
+ * "/debugfs/dri/%minor%/", and each entry in debugfs_list as
+ * "/debugfs/dri/%minor%/%name%".
+ */
+int drm_debugfs_init(struct drm_minor *minor, int minor_id,
+                    struct dentry *root)
+{
+       struct drm_device *dev = minor->dev;
+       char name[64];
+       int ret;
+
+       INIT_LIST_HEAD(&minor->debugfs_nodes.list);
+       sprintf(name, "%d", minor_id);
+       minor->debugfs_root = debugfs_create_dir(name, root);
+       if (!minor->debugfs_root) {
+               DRM_ERROR("Cannot create /debugfs/dri/%s\n", name);
+               return -1;
+       }
+
+       ret = drm_debugfs_create_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES,
+                                      minor->debugfs_root, minor);
+       if (ret) {
+               debugfs_remove(minor->debugfs_root);
+               minor->debugfs_root = NULL;
+               DRM_ERROR("Failed to create core drm debugfs files\n");
+               return ret;
+       }
+
+       if (dev->driver->debugfs_init) {
+               ret = dev->driver->debugfs_init(minor);
+               if (ret) {
+                       DRM_ERROR("DRM: Driver failed to initialize "
+                                 "/debugfs/dri.\n");
+                       return ret;
+               }
+       }
+       return 0;
+}
+
+
+/**
+ * Remove a list of debugfs files
+ *
+ * \param files The list of files
+ * \param count The number of files
+ * \param minor The minor of which we should remove the files
+ * \return always zero.
+ *
+ * Remove all debugfs entries created by debugfs_init().
+ */
+int drm_debugfs_remove_files(struct drm_info_list *files, int count,
+                            struct drm_minor *minor)
+{
+       struct list_head *pos, *q;
+       struct drm_info_node *tmp;
+       int i;
+
+       for (i = 0; i < count; i++) {
+               list_for_each_safe(pos, q, &minor->debugfs_nodes.list) {
+                       tmp = list_entry(pos, struct drm_info_node, list);
+                       if (tmp->info_ent == &files[i]) {
+                               debugfs_remove(tmp->dent);
+                               list_del(pos);
+                               drm_free(tmp, sizeof(struct drm_info_node),
+                                        _DRM_DRIVER);
+                       }
+               }
+       }
+       return 0;
+}
+EXPORT_SYMBOL(drm_debugfs_remove_files);
+
+/**
+ * Cleanup the debugfs filesystem resources.
+ *
+ * \param minor device minor number.
+ * \return always zero.
+ *
+ * Remove all debugfs entries created by debugfs_init().
+ */
+int drm_debugfs_cleanup(struct drm_minor *minor)
+{
+       struct drm_device *dev = minor->dev;
+
+       if (!minor->debugfs_root)
+               return 0;
+
+       if (dev->driver->debugfs_cleanup)
+               dev->driver->debugfs_cleanup(minor);
+
+       drm_debugfs_remove_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES, minor);
+
+       debugfs_remove(minor->debugfs_root);
+       minor->debugfs_root = NULL;
+
+       return 0;
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
index 14c7a23dc157e83cfe70f6b83ca3cb09f9550843..ed32edb17166b218498bbd428d164a60ec855d0c 100644 (file)
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 
+#include <linux/debugfs.h>
 #include "drmP.h"
 #include "drm_core.h"
 
+
 static int drm_version(struct drm_device *dev, void *data,
                       struct drm_file *file_priv);
 
@@ -178,7 +180,7 @@ int drm_lastclose(struct drm_device * dev)
 
        /* Clear AGP information */
        if (drm_core_has_AGP(dev) && dev->agp &&
-           !drm_core_check_feature(dev, DRIVER_MODESET)) {
+                       !drm_core_check_feature(dev, DRIVER_MODESET)) {
                struct drm_agp_mem *entry, *tempe;
 
                /* Remove AGP resources, but leave dev->agp
@@ -382,6 +384,13 @@ static int __init drm_core_init(void)
                goto err_p3;
        }
 
+       drm_debugfs_root = debugfs_create_dir("dri", NULL);
+       if (!drm_debugfs_root) {
+               DRM_ERROR("Cannot create /debugfs/dri\n");
+               ret = -1;
+               goto err_p3;
+       }
+
        drm_mem_init();
 
        DRM_INFO("Initialized %s %d.%d.%d %s\n",
@@ -400,6 +409,7 @@ err_p1:
 static void __exit drm_core_exit(void)
 {
        remove_proc_entry("dri", NULL);
+       debugfs_remove(drm_debugfs_root);
        drm_sysfs_destroy();
 
        unregister_chrdev(DRM_MAJOR, "drm");
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
new file mode 100644 (file)
index 0000000..fc98952
--- /dev/null
@@ -0,0 +1,328 @@
+/**
+ * \file drm_info.c
+ * DRM info file implementations
+ *
+ * \author Ben Gamari <bgamari@gmail.com>
+ */
+
+/*
+ * Created: Sun Dec 21 13:09:50 2008 by bgamari@gmail.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright 2008 Ben Gamari <bgamari@gmail.com>
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/seq_file.h>
+#include "drmP.h"
+
+/**
+ * Called when "/proc/dri/.../name" is read.
+ *
+ * Prints the device name together with the bus id if available.
+ */
+int drm_name_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_minor *minor = node->minor;
+       struct drm_device *dev = minor->dev;
+       struct drm_master *master = minor->master;
+
+       if (!master)
+               return 0;
+
+       if (master->unique) {
+               seq_printf(m, "%s %s %s\n",
+                          dev->driver->pci_driver.name,
+                          pci_name(dev->pdev), master->unique);
+       } else {
+               seq_printf(m, "%s %s\n", dev->driver->pci_driver.name,
+                          pci_name(dev->pdev));
+       }
+
+       return 0;
+}
+
+/**
+ * Called when "/proc/dri/.../vm" is read.
+ *
+ * Prints information about all mappings in drm_device::maplist.
+ */
+int drm_vm_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_map *map;
+       struct drm_map_list *r_list;
+
+       /* Hardcoded from _DRM_FRAME_BUFFER,
+          _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
+          _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
+       const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
+       const char *type;
+       int i;
+
+       mutex_lock(&dev->struct_mutex);
+       seq_printf(m, "slot      offset       size type flags    address mtrr\n\n");
+       i = 0;
+       list_for_each_entry(r_list, &dev->maplist, head) {
+               map = r_list->map;
+               if (!map)
+                       continue;
+               if (map->type < 0 || map->type > 5)
+                       type = "??";
+               else
+                       type = types[map->type];
+
+               seq_printf(m, "%4d 0x%08lx 0x%08lx %4.4s  0x%02x 0x%08lx ",
+                          i,
+                          map->offset,
+                          map->size, type, map->flags,
+                          (unsigned long) r_list->user_token);
+               if (map->mtrr < 0)
+                       seq_printf(m, "none\n");
+               else
+                       seq_printf(m, "%4d\n", map->mtrr);
+               i++;
+       }
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+}
+
+/**
+ * Called when "/proc/dri/.../queues" is read.
+ */
+int drm_queues_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       int i;
+       struct drm_queue *q;
+
+       mutex_lock(&dev->struct_mutex);
+       seq_printf(m, "  ctx/flags   use   fin"
+                  "   blk/rw/rwf  wait    flushed         queued"
+                  "      locks\n\n");
+       for (i = 0; i < dev->queue_count; i++) {
+               q = dev->queuelist[i];
+               atomic_inc(&q->use_count);
+               seq_printf(m,   "%5d/0x%03x %5d %5d"
+                          " %5d/%c%c/%c%c%c %5Zd\n",
+                          i,
+                          q->flags,
+                          atomic_read(&q->use_count),
+                          atomic_read(&q->finalization),
+                          atomic_read(&q->block_count),
+                          atomic_read(&q->block_read) ? 'r' : '-',
+                          atomic_read(&q->block_write) ? 'w' : '-',
+                          waitqueue_active(&q->read_queue) ? 'r' : '-',
+                          waitqueue_active(&q->write_queue) ? 'w' : '-',
+                          waitqueue_active(&q->flush_queue) ? 'f' : '-',
+                          DRM_BUFCOUNT(&q->waitlist));
+               atomic_dec(&q->use_count);
+       }
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+}
+
+/**
+ * Called when "/proc/dri/.../bufs" is read.
+ */
+int drm_bufs_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_device_dma *dma;
+       int i, seg_pages;
+
+       mutex_lock(&dev->struct_mutex);
+       dma = dev->dma;
+       if (!dma) {
+               mutex_unlock(&dev->struct_mutex);
+               return 0;
+       }
+
+       seq_printf(m, " o     size count  free   segs pages    kB\n\n");
+       for (i = 0; i <= DRM_MAX_ORDER; i++) {
+               if (dma->bufs[i].buf_count) {
+                       seg_pages = dma->bufs[i].seg_count * (1 << dma->bufs[i].page_order);
+                       seq_printf(m, "%2d %8d %5d %5d %5d %5d %5ld\n",
+                                  i,
+                                  dma->bufs[i].buf_size,
+                                  dma->bufs[i].buf_count,
+                                  atomic_read(&dma->bufs[i].freelist.count),
+                                  dma->bufs[i].seg_count,
+                                  seg_pages,
+                                  seg_pages * PAGE_SIZE / 1024);
+               }
+       }
+       seq_printf(m, "\n");
+       for (i = 0; i < dma->buf_count; i++) {
+               if (i && !(i % 32))
+                       seq_printf(m, "\n");
+               seq_printf(m, " %d", dma->buflist[i]->list);
+       }
+       seq_printf(m, "\n");
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+}
+
+/**
+ * Called when "/proc/dri/.../vblank" is read.
+ */
+int drm_vblank_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       int crtc;
+
+       mutex_lock(&dev->struct_mutex);
+       for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
+               seq_printf(m, "CRTC %d enable:     %d\n",
+                          crtc, atomic_read(&dev->vblank_refcount[crtc]));
+               seq_printf(m, "CRTC %d counter:    %d\n",
+                          crtc, drm_vblank_count(dev, crtc));
+               seq_printf(m, "CRTC %d last wait:  %d\n",
+                          crtc, dev->last_vblank_wait[crtc]);
+               seq_printf(m, "CRTC %d in modeset: %d\n",
+                          crtc, dev->vblank_inmodeset[crtc]);
+       }
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+}
+
+/**
+ * Called when "/proc/dri/.../clients" is read.
+ *
+ */
+int drm_clients_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_file *priv;
+
+       mutex_lock(&dev->struct_mutex);
+       seq_printf(m, "a dev    pid    uid      magic     ioctls\n\n");
+       list_for_each_entry(priv, &dev->filelist, lhead) {
+               seq_printf(m, "%c %3d %5d %5d %10u %10lu\n",
+                          priv->authenticated ? 'y' : 'n',
+                          priv->minor->index,
+                          priv->pid,
+                          priv->uid, priv->magic, priv->ioctl_count);
+       }
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+}
+
+
+int drm_gem_one_name_info(int id, void *ptr, void *data)
+{
+       struct drm_gem_object *obj = ptr;
+       struct seq_file *m = data;
+
+       seq_printf(m, "name %d size %zd\n", obj->name, obj->size);
+
+       seq_printf(m, "%6d %8zd %7d %8d\n",
+                  obj->name, obj->size,
+                  atomic_read(&obj->handlecount.refcount),
+                  atomic_read(&obj->refcount.refcount));
+       return 0;
+}
+
+int drm_gem_name_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+
+       seq_printf(m, "  name     size handles refcount\n");
+       idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m);
+       return 0;
+}
+
+int drm_gem_object_info(struct seq_file *m, void* data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+
+       seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
+       seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
+       seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
+       seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
+       seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
+       seq_printf(m, "%d gtt total\n", dev->gtt_total);
+       return 0;
+}
+
+#if DRM_DEBUG_CODE
+
+int drm_vma_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_vma_entry *pt;
+       struct vm_area_struct *vma;
+#if defined(__i386__)
+       unsigned int pgprot;
+#endif
+
+       mutex_lock(&dev->struct_mutex);
+       seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08lx\n",
+                  atomic_read(&dev->vma_count),
+                  high_memory, virt_to_phys(high_memory));
+
+       list_for_each_entry(pt, &dev->vmalist, head) {
+               vma = pt->vma;
+               if (!vma)
+                       continue;
+               seq_printf(m,
+                          "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
+                          pt->pid, vma->vm_start, vma->vm_end,
+                          vma->vm_flags & VM_READ ? 'r' : '-',
+                          vma->vm_flags & VM_WRITE ? 'w' : '-',
+                          vma->vm_flags & VM_EXEC ? 'x' : '-',
+                          vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
+                          vma->vm_flags & VM_LOCKED ? 'l' : '-',
+                          vma->vm_flags & VM_IO ? 'i' : '-',
+                          vma->vm_pgoff);
+
+#if defined(__i386__)
+               pgprot = pgprot_val(vma->vm_page_prot);
+               seq_printf(m, " %c%c%c%c%c%c%c%c%c",
+                          pgprot & _PAGE_PRESENT ? 'p' : '-',
+                          pgprot & _PAGE_RW ? 'w' : 'r',
+                          pgprot & _PAGE_USER ? 'u' : 's',
+                          pgprot & _PAGE_PWT ? 't' : 'b',
+                          pgprot & _PAGE_PCD ? 'u' : 'c',
+                          pgprot & _PAGE_ACCESSED ? 'a' : '-',
+                          pgprot & _PAGE_DIRTY ? 'd' : '-',
+                          pgprot & _PAGE_PSE ? 'm' : 'k',
+                          pgprot & _PAGE_GLOBAL ? 'g' : 'l');
+#endif
+               seq_printf(m, "\n");
+       }
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+}
+
+#endif
+
index 8df849f66830fbd50bc371fb13abf04360afa914..9b3c5af61e98f4b8173421f28b1ce93827135fe7 100644 (file)
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 
+#include <linux/seq_file.h>
 #include "drmP.h"
 
-static int drm_name_info(char *buf, char **start, off_t offset,
-                        int request, int *eof, void *data);
-static int drm_vm_info(char *buf, char **start, off_t offset,
-                      int request, int *eof, void *data);
-static int drm_clients_info(char *buf, char **start, off_t offset,
-                           int request, int *eof, void *data);
-static int drm_queues_info(char *buf, char **start, off_t offset,
-                          int request, int *eof, void *data);
-static int drm_bufs_info(char *buf, char **start, off_t offset,
-                        int request, int *eof, void *data);
-static int drm_vblank_info(char *buf, char **start, off_t offset,
-                          int request, int *eof, void *data);
-static int drm_gem_name_info(char *buf, char **start, off_t offset,
-                            int request, int *eof, void *data);
-static int drm_gem_object_info(char *buf, char **start, off_t offset,
-                              int request, int *eof, void *data);
-#if DRM_DEBUG_CODE
-static int drm_vma_info(char *buf, char **start, off_t offset,
-                       int request, int *eof, void *data);
-#endif
+
+/***************************************************
+ * Initialization, etc.
+ **************************************************/
 
 /**
  * Proc file list.
  */
-static struct drm_proc_list {
-       const char *name;       /**< file name */
-       int (*f) (char *, char **, off_t, int, int *, void *);          /**< proc callback*/
-       u32 driver_features; /**< Required driver features for this entry */
-} drm_proc_list[] = {
+static struct drm_info_list drm_proc_list[] = {
        {"name", drm_name_info, 0},
-       {"mem", drm_mem_info, 0},
        {"vm", drm_vm_info, 0},
        {"clients", drm_clients_info, 0},
        {"queues", drm_queues_info, 0},
        {"bufs", drm_bufs_info, 0},
-       {"vblank", drm_vblank_info, 0},
        {"gem_names", drm_gem_name_info, DRIVER_GEM},
        {"gem_objects", drm_gem_object_info, DRIVER_GEM},
 #if DRM_DEBUG_CODE
-       {"vma", drm_vma_info},
+       {"vma", drm_vma_info, 0},
 #endif
 };
-
 #define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list)
 
+static int drm_proc_open(struct inode *inode, struct file *file)
+{
+       struct drm_info_node* node = PDE(inode)->data;
+
+       return single_open(file, node->info_ent->show, node);
+}
+
+static const struct file_operations drm_proc_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_proc_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+
 /**
- * Initialize the DRI proc filesystem for a device.
+ * Initialize a given set of proc files for a device
  *
- * \param dev DRM device.
- * \param minor device minor number.
+ * \param files The array of files to create
+ * \param count The number of files given
  * \param root DRI proc dir entry.
- * \param dev_root resulting DRI device proc dir entry.
- * \return root entry pointer on success, or NULL on failure.
+ * \param minor device minor number
+ * \return Zero on success, non-zero on failure
  *
- * Create the DRI proc root entry "/proc/dri", the device proc root entry
- * "/proc/dri/%minor%/", and each entry in proc_list as
- * "/proc/dri/%minor%/%name%".
+ * Create a given set of proc files represented by an array of
+ * gdm_proc_lists in the given root directory.
  */
-int drm_proc_init(struct drm_minor *minor, int minor_id,
-                 struct proc_dir_entry *root)
+int drm_proc_create_files(struct drm_info_list *files, int count,
+                         struct proc_dir_entry *root, struct drm_minor *minor)
 {
        struct drm_device *dev = minor->dev;
        struct proc_dir_entry *ent;
-       int i, j, ret;
+       struct drm_info_node *tmp;
        char name[64];
+       int i, ret;
 
-       sprintf(name, "%d", minor_id);
-       minor->dev_root = proc_mkdir(name, root);
-       if (!minor->dev_root) {
-               DRM_ERROR("Cannot create /proc/dri/%s\n", name);
-               return -1;
-       }
-
-       for (i = 0; i < DRM_PROC_ENTRIES; i++) {
-               u32 features = drm_proc_list[i].driver_features;
+       for (i = 0; i < count; i++) {
+               u32 features = files[i].driver_features;
 
                if (features != 0 &&
                    (dev->driver->driver_features & features) != features)
                        continue;
 
-               ent = create_proc_entry(drm_proc_list[i].name,
-                                       S_IFREG | S_IRUGO, minor->dev_root);
+               tmp = drm_alloc(sizeof(struct drm_info_node), _DRM_DRIVER);
+               ent = create_proc_entry(files[i].name, S_IFREG | S_IRUGO, root);
                if (!ent) {
                        DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
-                                 name, drm_proc_list[i].name);
+                                 name, files[i].name);
+                       drm_free(tmp, sizeof(struct drm_info_node),
+                                _DRM_DRIVER);
                        ret = -1;
                        goto fail;
                }
-               ent->read_proc = drm_proc_list[i].f;
-               ent->data = minor;
-       }
 
-       if (dev->driver->proc_init) {
-               ret = dev->driver->proc_init(minor);
-               if (ret) {
-                       DRM_ERROR("DRM: Driver failed to initialize "
-                                 "/proc/dri.\n");
-                       goto fail;
-               }
+               ent->proc_fops = &drm_proc_fops;
+               ent->data = tmp;
+               tmp->minor = minor;
+               tmp->info_ent = &files[i];
+               list_add(&(tmp->list), &(minor->proc_nodes.list));
        }
-
        return 0;
- fail:
 
-       for (j = 0; j < i; j++)
-               remove_proc_entry(drm_proc_list[i].name,
-                                 minor->dev_root);
-       remove_proc_entry(name, root);
-       minor->dev_root = NULL;
+fail:
+       for (i = 0; i < count; i++)
+               remove_proc_entry(drm_proc_list[i].name, minor->proc_root);
        return ret;
 }
 
 /**
- * Cleanup the proc filesystem resources.
+ * Initialize the DRI proc filesystem for a device
  *
- * \param minor device minor number.
+ * \param dev DRM device
+ * \param minor device minor number
  * \param root DRI proc dir entry.
- * \param dev_root DRI device proc dir entry.
- * \return always zero.
+ * \param dev_root resulting DRI device proc dir entry.
+ * \return root entry pointer on success, or NULL on failure.
  *
- * Remove all proc entries created by proc_init().
+ * Create the DRI proc root entry "/proc/dri", the device proc root entry
+ * "/proc/dri/%minor%/", and each entry in proc_list as
+ * "/proc/dri/%minor%/%name%".
  */
-int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
+int drm_proc_init(struct drm_minor *minor, int minor_id,
+                 struct proc_dir_entry *root)
 {
        struct drm_device *dev = minor->dev;
-       int i;
        char name[64];
+       int ret;
 
-       if (!root || !minor->dev_root)
-               return 0;
-
-       if (dev->driver->proc_cleanup)
-               dev->driver->proc_cleanup(minor);
-
-       for (i = 0; i < DRM_PROC_ENTRIES; i++)
-               remove_proc_entry(drm_proc_list[i].name, minor->dev_root);
-       sprintf(name, "%d", minor->index);
-       remove_proc_entry(name, root);
-
-       return 0;
-}
-
-/**
- * Called when "/proc/dri/.../name" is read.
- *
- * \param buf output buffer.
- * \param start start of output data.
- * \param offset requested start offset.
- * \param request requested number of bytes.
- * \param eof whether there is no more data to return.
- * \param data private data.
- * \return number of written bytes.
- *
- * Prints the device name together with the bus id if available.
- */
-static int drm_name_info(char *buf, char **start, off_t offset, int request,
-                        int *eof, void *data)
-{
-       struct drm_minor *minor = (struct drm_minor *) data;
-       struct drm_master *master = minor->master;
-       struct drm_device *dev = minor->dev;
-       int len = 0;
-
-       if (offset > DRM_PROC_LIMIT) {
-               *eof = 1;
-               return 0;
+       INIT_LIST_HEAD(&minor->proc_nodes.list);
+       sprintf(name, "%d", minor_id);
+       minor->proc_root = proc_mkdir(name, root);
+       if (!minor->proc_root) {
+               DRM_ERROR("Cannot create /proc/dri/%s\n", name);
+               return -1;
        }
 
-       if (!master)
-               return 0;
-
-       *start = &buf[offset];
-       *eof = 0;
-
-       if (master->unique) {
-               DRM_PROC_PRINT("%s %s %s\n",
-                              dev->driver->pci_driver.name,
-                              pci_name(dev->pdev), master->unique);
-       } else {
-               DRM_PROC_PRINT("%s %s\n", dev->driver->pci_driver.name,
-                              pci_name(dev->pdev));
+       ret = drm_proc_create_files(drm_proc_list, DRM_PROC_ENTRIES,
+                                   minor->proc_root, minor);
+       if (ret) {
+               remove_proc_entry(name, root);
+               minor->proc_root = NULL;
+               DRM_ERROR("Failed to create core drm proc files\n");
+               return ret;
        }
 
-       if (len > request + offset)
-               return request;
-       *eof = 1;
-       return len - offset;
-}
-
-/**
- * Called when "/proc/dri/.../vm" is read.
- *
- * \param buf output buffer.
- * \param start start of output data.
- * \param offset requested start offset.
- * \param request requested number of bytes.
- * \param eof whether there is no more data to return.
- * \param data private data.
- * \return number of written bytes.
- *
- * Prints information about all mappings in drm_device::maplist.
- */
-static int drm__vm_info(char *buf, char **start, off_t offset, int request,
-                       int *eof, void *data)
-{
-       struct drm_minor *minor = (struct drm_minor *) data;
-       struct drm_device *dev = minor->dev;
-       int len = 0;
-       struct drm_map *map;
-       struct drm_map_list *r_list;
-
-       /* Hardcoded from _DRM_FRAME_BUFFER,
-          _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
-          _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
-       const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
-       const char *type;
-       int i;
-
-       if (offset > DRM_PROC_LIMIT) {
-               *eof = 1;
-               return 0;
-       }
-
-       *start = &buf[offset];
-       *eof = 0;
-
-       DRM_PROC_PRINT("slot     offset       size type flags    "
-                      "address mtrr\n\n");
-       i = 0;
-       list_for_each_entry(r_list, &dev->maplist, head) {
-               map = r_list->map;
-               if (!map)
-                       continue;
-               if (map->type < 0 || map->type > 5)
-                       type = "??";
-               else
-                       type = types[map->type];
-               DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s  0x%02x 0x%08lx ",
-                              i,
-                              map->offset,
-                              map->size, type, map->flags,
-                              (unsigned long) r_list->user_token);
-               if (map->mtrr < 0) {
-                       DRM_PROC_PRINT("none\n");
-               } else {
-                       DRM_PROC_PRINT("%4d\n", map->mtrr);
+       if (dev->driver->proc_init) {
+               ret = dev->driver->proc_init(minor);
+               if (ret) {
+                       DRM_ERROR("DRM: Driver failed to initialize "
+                                 "/proc/dri.\n");
+                       return ret;
                }
-               i++;
-       }
-
-       if (len > request + offset)
-               return request;
-       *eof = 1;
-       return len - offset;
-}
-
-/**
- * Simply calls _vm_info() while holding the drm_device::struct_mutex lock.
- */
-static int drm_vm_info(char *buf, char **start, off_t offset, int request,
-                      int *eof, void *data)
-{
-       struct drm_minor *minor = (struct drm_minor *) data;
-       struct drm_device *dev = minor->dev;
-       int ret;
-
-       mutex_lock(&dev->struct_mutex);
-       ret = drm__vm_info(buf, start, offset, request, eof, data);
-       mutex_unlock(&dev->struct_mutex);
-       return ret;
-}
-
-/**
- * Called when "/proc/dri/.../queues" is read.
- *
- * \param buf output buffer.
- * \param start start of output data.
- * \param offset requested start offset.
- * \param request requested number of bytes.
- * \param eof whether there is no more data to return.
- * \param data private data.
- * \return number of written bytes.
- */
-static int drm__queues_info(char *buf, char **start, off_t offset,
-                           int request, int *eof, void *data)
-{
-       struct drm_minor *minor = (struct drm_minor *) data;
-       struct drm_device *dev = minor->dev;
-       int len = 0;
-       int i;
-       struct drm_queue *q;
-
-       if (offset > DRM_PROC_LIMIT) {
-               *eof = 1;
-               return 0;
        }
-
-       *start = &buf[offset];
-       *eof = 0;
-
-       DRM_PROC_PRINT("  ctx/flags   use   fin"
-                      "   blk/rw/rwf  wait    flushed     queued"
-                      "      locks\n\n");
-       for (i = 0; i < dev->queue_count; i++) {
-               q = dev->queuelist[i];
-               atomic_inc(&q->use_count);
-               DRM_PROC_PRINT_RET(atomic_dec(&q->use_count),
-                                  "%5d/0x%03x %5d %5d"
-                                  " %5d/%c%c/%c%c%c %5Zd\n",
-                                  i,
-                                  q->flags,
-                                  atomic_read(&q->use_count),
-                                  atomic_read(&q->finalization),
-                                  atomic_read(&q->block_count),
-                                  atomic_read(&q->block_read) ? 'r' : '-',
-                                  atomic_read(&q->block_write) ? 'w' : '-',
-                                  waitqueue_active(&q->read_queue) ? 'r' : '-',
-                                  waitqueue_active(&q->
-                                                   write_queue) ? 'w' : '-',
-                                  waitqueue_active(&q->
-                                                   flush_queue) ? 'f' : '-',
-                                  DRM_BUFCOUNT(&q->waitlist));
-               atomic_dec(&q->use_count);
-       }
-
-       if (len > request + offset)
-               return request;
-       *eof = 1;
-       return len - offset;
-}
-
-/**
- * Simply calls _queues_info() while holding the drm_device::struct_mutex lock.
- */
-static int drm_queues_info(char *buf, char **start, off_t offset, int request,
-                          int *eof, void *data)
-{
-       struct drm_minor *minor = (struct drm_minor *) data;
-       struct drm_device *dev = minor->dev;
-       int ret;
-
-       mutex_lock(&dev->struct_mutex);
-       ret = drm__queues_info(buf, start, offset, request, eof, data);
-       mutex_unlock(&dev->struct_mutex);
-       return ret;
+       return 0;
 }
 
-/**
- * Called when "/proc/dri/.../bufs" is read.
- *
- * \param buf output buffer.
- * \param start start of output data.
- * \param offset requested start offset.
- * \param request requested number of bytes.
- * \param eof whether there is no more data to return.
- * \param data private data.
- * \return number of written bytes.
- */
-static int drm__bufs_info(char *buf, char **start, off_t offset, int request,
-                         int *eof, void *data)
+int drm_proc_remove_files(struct drm_info_list *files, int count,
+                         struct drm_minor *minor)
 {
-       struct drm_minor *minor = (struct drm_minor *) data;
-       struct drm_device *dev = minor->dev;
-       int len = 0;
-       struct drm_device_dma *dma = dev->dma;
+       struct list_head *pos, *q;
+       struct drm_info_node *tmp;
        int i;
 
-       if (!dma || offset > DRM_PROC_LIMIT) {
-               *eof = 1;
-               return 0;
-       }
-
-       *start = &buf[offset];
-       *eof = 0;
-
-       DRM_PROC_PRINT(" o     size count  free  segs pages    kB\n\n");
-       for (i = 0; i <= DRM_MAX_ORDER; i++) {
-               if (dma->bufs[i].buf_count)
-                       DRM_PROC_PRINT("%2d %8d %5d %5d %5d %5d %5ld\n",
-                                      i,
-                                      dma->bufs[i].buf_size,
-                                      dma->bufs[i].buf_count,
-                                      atomic_read(&dma->bufs[i]
-                                                  .freelist.count),
-                                      dma->bufs[i].seg_count,
-                                      dma->bufs[i].seg_count
-                                      * (1 << dma->bufs[i].page_order),
-                                      (dma->bufs[i].seg_count
-                                       * (1 << dma->bufs[i].page_order))
-                                      * PAGE_SIZE / 1024);
-       }
-       DRM_PROC_PRINT("\n");
-       for (i = 0; i < dma->buf_count; i++) {
-               if (i && !(i % 32))
-                       DRM_PROC_PRINT("\n");
-               DRM_PROC_PRINT(" %d", dma->buflist[i]->list);
+       for (i = 0; i < count; i++) {
+               list_for_each_safe(pos, q, &minor->proc_nodes.list) {
+                       tmp = list_entry(pos, struct drm_info_node, list);
+                       if (tmp->info_ent == &files[i]) {
+                               remove_proc_entry(files[i].name,
+                                                 minor->proc_root);
+                               list_del(pos);
+                               drm_free(tmp, sizeof(struct drm_info_node),
+                                        _DRM_DRIVER);
+                       }
+               }
        }
-       DRM_PROC_PRINT("\n");
-
-       if (len > request + offset)
-               return request;
-       *eof = 1;
-       return len - offset;
-}
-
-/**
- * Simply calls _bufs_info() while holding the drm_device::struct_mutex lock.
- */
-static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
-                        int *eof, void *data)
-{
-       struct drm_minor *minor = (struct drm_minor *) data;
-       struct drm_device *dev = minor->dev;
-       int ret;
-
-       mutex_lock(&dev->struct_mutex);
-       ret = drm__bufs_info(buf, start, offset, request, eof, data);
-       mutex_unlock(&dev->struct_mutex);
-       return ret;
+       return 0;
 }
 
 /**
- * Called when "/proc/dri/.../vblank" is read.
+ * Cleanup the proc filesystem resources.
  *
- * \param buf output buffer.
- * \param start start of output data.
- * \param offset requested start offset.
- * \param request requested number of bytes.
- * \param eof whether there is no more data to return.
- * \param data private data.
- * \return number of written bytes.
- */
-static int drm__vblank_info(char *buf, char **start, off_t offset, int request,
-                         int *eof, void *data)
-{
-       struct drm_minor *minor = (struct drm_minor *) data;
-       struct drm_device *dev = minor->dev;
-       int len = 0;
-       int crtc;
-
-       if (offset > DRM_PROC_LIMIT) {
-               *eof = 1;
-               return 0;
-       }
-
-       *start = &buf[offset];
-       *eof = 0;
-
-       for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
-               DRM_PROC_PRINT("CRTC %d enable:     %d\n",
-                              crtc, atomic_read(&dev->vblank_refcount[crtc]));
-               DRM_PROC_PRINT("CRTC %d counter:    %d\n",
-                              crtc, drm_vblank_count(dev, crtc));
-               DRM_PROC_PRINT("CRTC %d last wait:  %d\n",
-                              crtc, dev->last_vblank_wait[crtc]);
-               DRM_PROC_PRINT("CRTC %d in modeset: %d\n",
-                              crtc, dev->vblank_inmodeset[crtc]);
-       }
-
-       if (len > request + offset)
-               return request;
-       *eof = 1;
-       return len - offset;
-}
-
-/**
- * Simply calls _vblank_info() while holding the drm_device::struct_mutex lock.
- */
-static int drm_vblank_info(char *buf, char **start, off_t offset, int request,
-                        int *eof, void *data)
-{
-       struct drm_minor *minor = (struct drm_minor *) data;
-       struct drm_device *dev = minor->dev;
-       int ret;
-
-       mutex_lock(&dev->struct_mutex);
-       ret = drm__vblank_info(buf, start, offset, request, eof, data);
-       mutex_unlock(&dev->struct_mutex);
-       return ret;
-}
-
-/**
- * Called when "/proc/dri/.../clients" is read.
+ * \param minor device minor number.
+ * \param root DRI proc dir entry.
+ * \param dev_root DRI device proc dir entry.
+ * \return always zero.
  *
- * \param buf output buffer.
- * \param start start of output data.
- * \param offset requested start offset.
- * \param request requested number of bytes.
- * \param eof whether there is no more data to return.
- * \param data private data.
- * \return number of written bytes.
+ * Remove all proc entries created by proc_init().
  */
-static int drm__clients_info(char *buf, char **start, off_t offset,
-                            int request, int *eof, void *data)
+int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
 {
-       struct drm_minor *minor = (struct drm_minor *) data;
        struct drm_device *dev = minor->dev;
-       int len = 0;
-       struct drm_file *priv;
+       char name[64];
 
-       if (offset > DRM_PROC_LIMIT) {
-               *eof = 1;
+       if (!root || !minor->proc_root)
                return 0;
-       }
-
-       *start = &buf[offset];
-       *eof = 0;
-
-       DRM_PROC_PRINT("a dev   pid    uid      magic     ioctls\n\n");
-       list_for_each_entry(priv, &dev->filelist, lhead) {
-               DRM_PROC_PRINT("%c %3d %5d %5d %10u %10lu\n",
-                              priv->authenticated ? 'y' : 'n',
-                              priv->minor->index,
-                              priv->pid,
-                              priv->uid, priv->magic, priv->ioctl_count);
-       }
 
-       if (len > request + offset)
-               return request;
-       *eof = 1;
-       return len - offset;
-}
-
-/**
- * Simply calls _clients_info() while holding the drm_device::struct_mutex lock.
- */
-static int drm_clients_info(char *buf, char **start, off_t offset,
-                           int request, int *eof, void *data)
-{
-       struct drm_minor *minor = (struct drm_minor *) data;
-       struct drm_device *dev = minor->dev;
-       int ret;
-
-       mutex_lock(&dev->struct_mutex);
-       ret = drm__clients_info(buf, start, offset, request, eof, data);
-       mutex_unlock(&dev->struct_mutex);
-       return ret;
-}
-
-struct drm_gem_name_info_data {
-       int                     len;
-       char                    *buf;
-       int                     eof;
-};
+       if (dev->driver->proc_cleanup)
+               dev->driver->proc_cleanup(minor);
 
-static int drm_gem_one_name_info(int id, void *ptr, void *data)
-{
-       struct drm_gem_object *obj = ptr;
-       struct drm_gem_name_info_data   *nid = data;
+       drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor);
 
-       DRM_INFO("name %d size %zd\n", obj->name, obj->size);
-       if (nid->eof)
-               return 0;
+       sprintf(name, "%d", minor->index);
+       remove_proc_entry(name, root);
 
-       nid->len += sprintf(&nid->buf[nid->len],
-                           "%6d %8zd %7d %8d\n",
-                           obj->name, obj->size,
-                           atomic_read(&obj->handlecount.refcount),
-                           atomic_read(&obj->refcount.refcount));
-       if (nid->len > DRM_PROC_LIMIT) {
-               nid->eof = 1;
-               return 0;
-       }
        return 0;
 }
 
-static int drm_gem_name_info(char *buf, char **start, off_t offset,
-                            int request, int *eof, void *data)
-{
-       struct drm_minor *minor = (struct drm_minor *) data;
-       struct drm_device *dev = minor->dev;
-       struct drm_gem_name_info_data nid;
-
-       if (offset > DRM_PROC_LIMIT) {
-               *eof = 1;
-               return 0;
-       }
-
-       nid.len = sprintf(buf, "  name     size handles refcount\n");
-       nid.buf = buf;
-       nid.eof = 0;
-       idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, &nid);
-
-       *start = &buf[offset];
-       *eof = 0;
-       if (nid.len > request + offset)
-               return request;
-       *eof = 1;
-       return nid.len - offset;
-}
-
-static int drm_gem_object_info(char *buf, char **start, off_t offset,
-                              int request, int *eof, void *data)
-{
-       struct drm_minor *minor = (struct drm_minor *) data;
-       struct drm_device *dev = minor->dev;
-       int len = 0;
-
-       if (offset > DRM_PROC_LIMIT) {
-               *eof = 1;
-               return 0;
-       }
-
-       *start = &buf[offset];
-       *eof = 0;
-       DRM_PROC_PRINT("%d objects\n", atomic_read(&dev->object_count));
-       DRM_PROC_PRINT("%d object bytes\n", atomic_read(&dev->object_memory));
-       DRM_PROC_PRINT("%d pinned\n", atomic_read(&dev->pin_count));
-       DRM_PROC_PRINT("%d pin bytes\n", atomic_read(&dev->pin_memory));
-       DRM_PROC_PRINT("%d gtt bytes\n", atomic_read(&dev->gtt_memory));
-       DRM_PROC_PRINT("%d gtt total\n", dev->gtt_total);
-       if (len > request + offset)
-               return request;
-       *eof = 1;
-       return len - offset;
-}
-
-#if DRM_DEBUG_CODE
-
-static int drm__vma_info(char *buf, char **start, off_t offset, int request,
-                        int *eof, void *data)
-{
-       struct drm_minor *minor = (struct drm_minor *) data;
-       struct drm_device *dev = minor->dev;
-       int len = 0;
-       struct drm_vma_entry *pt;
-       struct vm_area_struct *vma;
-#if defined(__i386__)
-       unsigned int pgprot;
-#endif
-
-       if (offset > DRM_PROC_LIMIT) {
-               *eof = 1;
-               return 0;
-       }
-
-       *start = &buf[offset];
-       *eof = 0;
-
-       DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n",
-                      atomic_read(&dev->vma_count),
-                      high_memory, virt_to_phys(high_memory));
-       list_for_each_entry(pt, &dev->vmalist, head) {
-               if (!(vma = pt->vma))
-                       continue;
-               DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
-                              pt->pid,
-                              vma->vm_start,
-                              vma->vm_end,
-                              vma->vm_flags & VM_READ ? 'r' : '-',
-                              vma->vm_flags & VM_WRITE ? 'w' : '-',
-                              vma->vm_flags & VM_EXEC ? 'x' : '-',
-                              vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
-                              vma->vm_flags & VM_LOCKED ? 'l' : '-',
-                              vma->vm_flags & VM_IO ? 'i' : '-',
-                              vma->vm_pgoff);
-
-#if defined(__i386__)
-               pgprot = pgprot_val(vma->vm_page_prot);
-               DRM_PROC_PRINT(" %c%c%c%c%c%c%c%c%c",
-                              pgprot & _PAGE_PRESENT ? 'p' : '-',
-                              pgprot & _PAGE_RW ? 'w' : 'r',
-                              pgprot & _PAGE_USER ? 'u' : 's',
-                              pgprot & _PAGE_PWT ? 't' : 'b',
-                              pgprot & _PAGE_PCD ? 'u' : 'c',
-                              pgprot & _PAGE_ACCESSED ? 'a' : '-',
-                              pgprot & _PAGE_DIRTY ? 'd' : '-',
-                              pgprot & _PAGE_PSE ? 'm' : 'k',
-                              pgprot & _PAGE_GLOBAL ? 'g' : 'l');
-#endif
-               DRM_PROC_PRINT("\n");
-       }
-
-       if (len > request + offset)
-               return request;
-       *eof = 1;
-       return len - offset;
-}
-
-static int drm_vma_info(char *buf, char **start, off_t offset, int request,
-                       int *eof, void *data)
-{
-       struct drm_minor *minor = (struct drm_minor *) data;
-       struct drm_device *dev = minor->dev;
-       int ret;
-
-       mutex_lock(&dev->struct_mutex);
-       ret = drm__vma_info(buf, start, offset, request, eof, data);
-       mutex_unlock(&dev->struct_mutex);
-       return ret;
-}
-#endif
index 7c8b15b22bf2e089fd9356debc9f967031723cbe..48f33be8fd0fd649776e0e2e75280de3ebc2d81e 100644 (file)
@@ -50,6 +50,7 @@ struct idr drm_minors_idr;
 
 struct class *drm_class;
 struct proc_dir_entry *drm_proc_root;
+struct dentry *drm_debugfs_root;
 
 static int drm_minor_get_id(struct drm_device *dev, int type)
 {
@@ -313,7 +314,15 @@ static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int t
                        goto err_mem;
                }
        } else
-               new_minor->dev_root = NULL;
+               new_minor->proc_root = NULL;
+
+#if defined(CONFIG_DEBUG_FS)
+       ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
+       if (ret) {
+               DRM_ERROR("DRM: Failed to initialize /debugfs/dri.\n");
+               goto err_g2;
+       }
+#endif
 
        ret = drm_sysfs_device_add(new_minor);
        if (ret) {
@@ -451,6 +460,10 @@ int drm_put_minor(struct drm_minor **minor_p)
 
        if (minor->type == DRM_MINOR_LEGACY)
                drm_proc_cleanup(minor, drm_proc_root);
+#if defined(CONFIG_DEBUG_FS)
+       drm_debugfs_cleanup(minor);
+#endif
+
        drm_sysfs_device_remove(minor);
 
        idr_remove(&drm_minors_idr, minor->index);
index 793cba39d832b97ae139b3e7fd79c20b287b3915..51c5a050aa730ee2b1f841d2a413c6e77c31833d 100644 (file)
@@ -7,7 +7,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
           i915_suspend.o \
          i915_gem.o \
          i915_gem_debug.o \
-         i915_gem_proc.o \
+         i915_gem_debugfs.o \
          i915_gem_tiling.o \
          intel_display.o \
          intel_crt.o \
index 6d21b9e48b89a4624936123fd169d79ce3f35cc9..a818b377e1f73207e6e25c35d09babe0d0b82a1f 100644 (file)
@@ -41,7 +41,6 @@
 int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
        drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
        u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
        u32 last_acthd = I915_READ(acthd_reg);
@@ -58,8 +57,12 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
                if (ring->space >= n)
                        return 0;
 
-               if (master_priv->sarea_priv)
-                       master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+               if (dev->primary->master) {
+                       struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+                       if (master_priv->sarea_priv)
+                               master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+               }
+
 
                if (ring->head != last_head)
                        i = 0;
@@ -356,7 +359,7 @@ static int validate_cmd(int cmd)
        return ret;
 }
 
-static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwords)
+static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        int i;
@@ -370,8 +373,7 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
        for (i = 0; i < dwords;) {
                int cmd, sz;
 
-               if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
-                       return -EINVAL;
+               cmd = buffer[i];
 
                if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
                        return -EINVAL;
@@ -379,11 +381,7 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
                OUT_RING(cmd);
 
                while (++i, --sz) {
-                       if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
-                                                        sizeof(cmd))) {
-                               return -EINVAL;
-                       }
-                       OUT_RING(cmd);
+                       OUT_RING(buffer[i]);
                }
        }
 
@@ -397,17 +395,13 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
 
 int
 i915_emit_box(struct drm_device *dev,
-             struct drm_clip_rect __user *boxes,
+             struct drm_clip_rect *boxes,
              int i, int DR1, int DR4)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_clip_rect box;
+       struct drm_clip_rect box = boxes[i];
        RING_LOCALS;
 
-       if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
-               return -EFAULT;
-       }
-
        if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
                DRM_ERROR("Bad box %d,%d..%d,%d\n",
                          box.x1, box.y1, box.x2, box.y2);
@@ -460,7 +454,9 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
 }
 
 static int i915_dispatch_cmdbuffer(struct drm_device * dev,
-                                  drm_i915_cmdbuffer_t * cmd)
+                                  drm_i915_cmdbuffer_t *cmd,
+                                  struct drm_clip_rect *cliprects,
+                                  void *cmdbuf)
 {
        int nbox = cmd->num_cliprects;
        int i = 0, count, ret;
@@ -476,13 +472,13 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
 
        for (i = 0; i < count; i++) {
                if (i < nbox) {
-                       ret = i915_emit_box(dev, cmd->cliprects, i,
+                       ret = i915_emit_box(dev, cliprects, i,
                                            cmd->DR1, cmd->DR4);
                        if (ret)
                                return ret;
                }
 
-               ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4);
+               ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
                if (ret)
                        return ret;
        }
@@ -492,10 +488,10 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
 }
 
 static int i915_dispatch_batchbuffer(struct drm_device * dev,
-                                    drm_i915_batchbuffer_t * batch)
+                                    drm_i915_batchbuffer_t * batch,
+                                    struct drm_clip_rect *cliprects)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_clip_rect __user *boxes = batch->cliprects;
        int nbox = batch->num_cliprects;
        int i = 0, count;
        RING_LOCALS;
@@ -511,7 +507,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
 
        for (i = 0; i < count; i++) {
                if (i < nbox) {
-                       int ret = i915_emit_box(dev, boxes, i,
+                       int ret = i915_emit_box(dev, cliprects, i,
                                                batch->DR1, batch->DR4);
                        if (ret)
                                return ret;
@@ -626,6 +622,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
            master_priv->sarea_priv;
        drm_i915_batchbuffer_t *batch = data;
        int ret;
+       struct drm_clip_rect *cliprects = NULL;
 
        if (!dev_priv->allow_batchbuffer) {
                DRM_ERROR("Batchbuffer ioctl disabled\n");
@@ -637,17 +634,35 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
 
        RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
 
-       if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
-                                                      batch->num_cliprects *
-                                                      sizeof(struct drm_clip_rect)))
-               return -EFAULT;
+       if (batch->num_cliprects < 0)
+               return -EINVAL;
+
+       if (batch->num_cliprects) {
+               cliprects = drm_calloc(batch->num_cliprects,
+                                      sizeof(struct drm_clip_rect),
+                                      DRM_MEM_DRIVER);
+               if (cliprects == NULL)
+                       return -ENOMEM;
+
+               ret = copy_from_user(cliprects, batch->cliprects,
+                                    batch->num_cliprects *
+                                    sizeof(struct drm_clip_rect));
+               if (ret != 0)
+                       goto fail_free;
+       }
 
        mutex_lock(&dev->struct_mutex);
-       ret = i915_dispatch_batchbuffer(dev, batch);
+       ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
        mutex_unlock(&dev->struct_mutex);
 
        if (sarea_priv)
                sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+
+fail_free:
+       drm_free(cliprects,
+                batch->num_cliprects * sizeof(struct drm_clip_rect),
+                DRM_MEM_DRIVER);
+
        return ret;
 }
 
@@ -659,6 +674,8 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
        drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
            master_priv->sarea_priv;
        drm_i915_cmdbuffer_t *cmdbuf = data;
+       struct drm_clip_rect *cliprects = NULL;
+       void *batch_data;
        int ret;
 
        DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
@@ -666,25 +683,50 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
 
        RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
 
-       if (cmdbuf->num_cliprects &&
-           DRM_VERIFYAREA_READ(cmdbuf->cliprects,
-                               cmdbuf->num_cliprects *
-                               sizeof(struct drm_clip_rect))) {
-               DRM_ERROR("Fault accessing cliprects\n");
-               return -EFAULT;
+       if (cmdbuf->num_cliprects < 0)
+               return -EINVAL;
+
+       batch_data = drm_alloc(cmdbuf->sz, DRM_MEM_DRIVER);
+       if (batch_data == NULL)
+               return -ENOMEM;
+
+       ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
+       if (ret != 0)
+               goto fail_batch_free;
+
+       if (cmdbuf->num_cliprects) {
+               cliprects = drm_calloc(cmdbuf->num_cliprects,
+                                      sizeof(struct drm_clip_rect),
+                                      DRM_MEM_DRIVER);
+               if (cliprects == NULL)
+                       goto fail_batch_free;
+
+               ret = copy_from_user(cliprects, cmdbuf->cliprects,
+                                    cmdbuf->num_cliprects *
+                                    sizeof(struct drm_clip_rect));
+               if (ret != 0)
+                       goto fail_clip_free;
        }
 
        mutex_lock(&dev->struct_mutex);
-       ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
+       ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
        mutex_unlock(&dev->struct_mutex);
        if (ret) {
                DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
-               return ret;
+               goto fail_batch_free;
        }
 
        if (sarea_priv)
                sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
-       return 0;
+
+fail_batch_free:
+       drm_free(batch_data, cmdbuf->sz, DRM_MEM_DRIVER);
+fail_clip_free:
+       drm_free(cliprects,
+                cmdbuf->num_cliprects * sizeof(struct drm_clip_rect),
+                DRM_MEM_DRIVER);
+
+       return ret;
 }
 
 static int i915_flip_bufs(struct drm_device *dev, void *data,
index b293ef0bae7153805d587201c8ce6c84f3703f4c..dcb91f5df6e33037f189d956ea8dfd09bd5c3fd9 100644 (file)
@@ -150,8 +150,10 @@ static struct drm_driver driver = {
        .get_reg_ofs = drm_core_get_reg_ofs,
        .master_create = i915_master_create,
        .master_destroy = i915_master_destroy,
-       .proc_init = i915_gem_proc_init,
-       .proc_cleanup = i915_gem_proc_cleanup,
+#if defined(CONFIG_DEBUG_FS)
+       .debugfs_init = i915_gem_debugfs_init,
+       .debugfs_cleanup = i915_gem_debugfs_cleanup,
+#endif
        .gem_init_object = i915_gem_init_object,
        .gem_free_object = i915_gem_free_object,
        .gem_vm_ops = &i915_gem_vm_ops,
index d6cc9861e0a1a49d70c3c6fd056f709e34dc017f..c1685d0c704faa540c78990400c65a184a8d65a7 100644 (file)
@@ -404,7 +404,8 @@ struct drm_i915_gem_object {
        /** AGP memory structure for our GTT binding. */
        DRM_AGP_MEM *agp_mem;
 
-       struct page **page_list;
+       struct page **pages;
+       int pages_refcount;
 
        /**
         * Current offset of the object in GTT space.
@@ -519,7 +520,7 @@ extern int i915_driver_device_is_agp(struct drm_device * dev);
 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
                              unsigned long arg);
 extern int i915_emit_box(struct drm_device *dev,
-                        struct drm_clip_rect __user *boxes,
+                        struct drm_clip_rect *boxes,
                         int i, int DR1, int DR4);
 
 /* i915_irq.c */
@@ -604,8 +605,6 @@ int i915_gem_get_tiling(struct drm_device *dev, void *data,
 int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
 void i915_gem_load(struct drm_device *dev);
-int i915_gem_proc_init(struct drm_minor *minor);
-void i915_gem_proc_cleanup(struct drm_minor *minor);
 int i915_gem_init_object(struct drm_gem_object *obj);
 void i915_gem_free_object(struct drm_gem_object *obj);
 int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
@@ -649,6 +648,10 @@ void i915_gem_dump_object(struct drm_gem_object *obj, int len,
                          const char *where, uint32_t mark);
 void i915_dump_lru(struct drm_device *dev, const char *where);
 
+/* i915_debugfs.c */
+int i915_gem_debugfs_init(struct drm_minor *minor);
+void i915_gem_debugfs_cleanup(struct drm_minor *minor);
+
 /* i915_suspend.c */
 extern int i915_save_state(struct drm_device *dev);
 extern int i915_restore_state(struct drm_device *dev);
@@ -784,15 +787,21 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
                     (dev)->pci_device == 0x2E22 || \
                     IS_GM45(dev))
 
+#define IS_IGDG(dev) ((dev)->pci_device == 0xa001)
+#define IS_IGDGM(dev) ((dev)->pci_device == 0xa011)
+#define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev))
+
 #define IS_G33(dev)    ((dev)->pci_device == 0x29C2 || \
                        (dev)->pci_device == 0x29B2 ||  \
-                       (dev)->pci_device == 0x29D2)
+                       (dev)->pci_device == 0x29D2 ||  \
+                       (IS_IGD(dev)))
 
 #define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
                      IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
 
 #define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
-                       IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev))
+                       IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
+                       IS_IGD(dev))
 
 #define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev))
 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
index 37427e4016cbe5f93b487107cc4284379085e219..b52cba0f16d2c6339fd875adeecc1f46078328b4 100644 (file)
@@ -43,8 +43,8 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
                                                     uint64_t offset,
                                                     uint64_t size);
 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
-static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
-static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
+static int i915_gem_object_get_pages(struct drm_gem_object *obj);
+static void i915_gem_object_put_pages(struct drm_gem_object *obj);
 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
 static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
                                           unsigned alignment);
@@ -136,6 +136,224 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
        return 0;
 }
 
+static inline int
+fast_shmem_read(struct page **pages,
+               loff_t page_base, int page_offset,
+               char __user *data,
+               int length)
+{
+       char __iomem *vaddr;
+       int ret;
+
+       vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
+       if (vaddr == NULL)
+               return -ENOMEM;
+       ret = __copy_to_user_inatomic(data, vaddr + page_offset, length);
+       kunmap_atomic(vaddr, KM_USER0);
+
+       return ret;
+}
+
+static inline int
+slow_shmem_copy(struct page *dst_page,
+               int dst_offset,
+               struct page *src_page,
+               int src_offset,
+               int length)
+{
+       char *dst_vaddr, *src_vaddr;
+
+       dst_vaddr = kmap_atomic(dst_page, KM_USER0);
+       if (dst_vaddr == NULL)
+               return -ENOMEM;
+
+       src_vaddr = kmap_atomic(src_page, KM_USER1);
+       if (src_vaddr == NULL) {
+               kunmap_atomic(dst_vaddr, KM_USER0);
+               return -ENOMEM;
+       }
+
+       memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
+
+       kunmap_atomic(src_vaddr, KM_USER1);
+       kunmap_atomic(dst_vaddr, KM_USER0);
+
+       return 0;
+}
+
+/**
+ * This is the fast shmem pread path, which attempts to copy_from_user directly
+ * from the backing pages of the object to the user's address space.  On a
+ * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
+ */
+static int
+i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
+                         struct drm_i915_gem_pread *args,
+                         struct drm_file *file_priv)
+{
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       ssize_t remain;
+       loff_t offset, page_base;
+       char __user *user_data;
+       int page_offset, page_length;
+       int ret;
+
+       user_data = (char __user *) (uintptr_t) args->data_ptr;
+       remain = args->size;
+
+       mutex_lock(&dev->struct_mutex);
+
+       ret = i915_gem_object_get_pages(obj);
+       if (ret != 0)
+               goto fail_unlock;
+
+       ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
+                                                       args->size);
+       if (ret != 0)
+               goto fail_put_pages;
+
+       obj_priv = obj->driver_private;
+       offset = args->offset;
+
+       while (remain > 0) {
+               /* Operation in this page
+                *
+                * page_base = page offset within aperture
+                * page_offset = offset within page
+                * page_length = bytes to copy for this page
+                */
+               page_base = (offset & ~(PAGE_SIZE-1));
+               page_offset = offset & (PAGE_SIZE-1);
+               page_length = remain;
+               if ((page_offset + remain) > PAGE_SIZE)
+                       page_length = PAGE_SIZE - page_offset;
+
+               ret = fast_shmem_read(obj_priv->pages,
+                                     page_base, page_offset,
+                                     user_data, page_length);
+               if (ret)
+                       goto fail_put_pages;
+
+               remain -= page_length;
+               user_data += page_length;
+               offset += page_length;
+       }
+
+fail_put_pages:
+       i915_gem_object_put_pages(obj);
+fail_unlock:
+       mutex_unlock(&dev->struct_mutex);
+
+       return ret;
+}
+
+/**
+ * This is the fallback shmem pread path, which allocates temporary storage
+ * in kernel space to copy_to_user into outside of the struct_mutex, so we
+ * can copy out of the object's backing pages while holding the struct mutex
+ * and not take page faults.
+ */
+static int
+i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
+                         struct drm_i915_gem_pread *args,
+                         struct drm_file *file_priv)
+{
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct mm_struct *mm = current->mm;
+       struct page **user_pages;
+       ssize_t remain;
+       loff_t offset, pinned_pages, i;
+       loff_t first_data_page, last_data_page, num_pages;
+       int shmem_page_index, shmem_page_offset;
+       int data_page_index,  data_page_offset;
+       int page_length;
+       int ret;
+       uint64_t data_ptr = args->data_ptr;
+
+       remain = args->size;
+
+       /* Pin the user pages containing the data.  We can't fault while
+        * holding the struct mutex, yet we want to hold it while
+        * dereferencing the user data.
+        */
+       first_data_page = data_ptr / PAGE_SIZE;
+       last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
+       num_pages = last_data_page - first_data_page + 1;
+
+       user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
+       if (user_pages == NULL)
+               return -ENOMEM;
+
+       down_read(&mm->mmap_sem);
+       pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
+                                     num_pages, 0, 0, user_pages, NULL);
+       up_read(&mm->mmap_sem);
+       if (pinned_pages < num_pages) {
+               ret = -EFAULT;
+               goto fail_put_user_pages;
+       }
+
+       mutex_lock(&dev->struct_mutex);
+
+       ret = i915_gem_object_get_pages(obj);
+       if (ret != 0)
+               goto fail_unlock;
+
+       ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
+                                                       args->size);
+       if (ret != 0)
+               goto fail_put_pages;
+
+       obj_priv = obj->driver_private;
+       offset = args->offset;
+
+       while (remain > 0) {
+               /* Operation in this page
+                *
+                * shmem_page_index = page number within shmem file
+                * shmem_page_offset = offset within page in shmem file
+                * data_page_index = page number in get_user_pages return
+                * data_page_offset = offset with data_page_index page.
+                * page_length = bytes to copy for this page
+                */
+               shmem_page_index = offset / PAGE_SIZE;
+               shmem_page_offset = offset & ~PAGE_MASK;
+               data_page_index = data_ptr / PAGE_SIZE - first_data_page;
+               data_page_offset = data_ptr & ~PAGE_MASK;
+
+               page_length = remain;
+               if ((shmem_page_offset + page_length) > PAGE_SIZE)
+                       page_length = PAGE_SIZE - shmem_page_offset;
+               if ((data_page_offset + page_length) > PAGE_SIZE)
+                       page_length = PAGE_SIZE - data_page_offset;
+
+               ret = slow_shmem_copy(user_pages[data_page_index],
+                                     data_page_offset,
+                                     obj_priv->pages[shmem_page_index],
+                                     shmem_page_offset,
+                                     page_length);
+               if (ret)
+                       goto fail_put_pages;
+
+               remain -= page_length;
+               data_ptr += page_length;
+               offset += page_length;
+       }
+
+fail_put_pages:
+       i915_gem_object_put_pages(obj);
+fail_unlock:
+       mutex_unlock(&dev->struct_mutex);
+fail_put_user_pages:
+       for (i = 0; i < pinned_pages; i++) {
+               SetPageDirty(user_pages[i]);
+               page_cache_release(user_pages[i]);
+       }
+       kfree(user_pages);
+
+       return ret;
+}
+
 /**
  * Reads data from the object referenced by handle.
  *
@@ -148,8 +366,6 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_gem_pread *args = data;
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
-       ssize_t read;
-       loff_t offset;
        int ret;
 
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
@@ -167,33 +383,13 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
                return -EINVAL;
        }
 
-       mutex_lock(&dev->struct_mutex);
-
-       ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
-                                                       args->size);
-       if (ret != 0) {
-               drm_gem_object_unreference(obj);
-               mutex_unlock(&dev->struct_mutex);
-               return ret;
-       }
-
-       offset = args->offset;
-
-       read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
-                       args->size, &offset);
-       if (read != args->size) {
-               drm_gem_object_unreference(obj);
-               mutex_unlock(&dev->struct_mutex);
-               if (read < 0)
-                       return read;
-               else
-                       return -EINVAL;
-       }
+       ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
+       if (ret != 0)
+               ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
 
        drm_gem_object_unreference(obj);
-       mutex_unlock(&dev->struct_mutex);
 
-       return 0;
+       return ret;
 }
 
 /* This is the fast write path which cannot handle
@@ -223,29 +419,51 @@ fast_user_write(struct io_mapping *mapping,
  */
 
 static inline int
-slow_user_write(struct io_mapping *mapping,
-               loff_t page_base, int page_offset,
-               char __user *user_data,
-               int length)
+slow_kernel_write(struct io_mapping *mapping,
+                 loff_t gtt_base, int gtt_offset,
+                 struct page *user_page, int user_offset,
+                 int length)
 {
-       char __iomem *vaddr;
+       char *src_vaddr, *dst_vaddr;
        unsigned long unwritten;
 
-       vaddr = io_mapping_map_wc(mapping, page_base);
-       if (vaddr == NULL)
-               return -EFAULT;
-       unwritten = __copy_from_user(vaddr + page_offset,
-                                    user_data, length);
-       io_mapping_unmap(vaddr);
+       dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
+       src_vaddr = kmap_atomic(user_page, KM_USER1);
+       unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
+                                                     src_vaddr + user_offset,
+                                                     length);
+       kunmap_atomic(src_vaddr, KM_USER1);
+       io_mapping_unmap_atomic(dst_vaddr);
        if (unwritten)
                return -EFAULT;
        return 0;
 }
 
+static inline int
+fast_shmem_write(struct page **pages,
+                loff_t page_base, int page_offset,
+                char __user *data,
+                int length)
+{
+       char __iomem *vaddr;
+
+       vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
+       if (vaddr == NULL)
+               return -ENOMEM;
+       __copy_from_user_inatomic(vaddr + page_offset, data, length);
+       kunmap_atomic(vaddr, KM_USER0);
+
+       return 0;
+}
+
+/**
+ * This is the fast pwrite path, where we copy the data directly from the
+ * user into the GTT, uncached.
+ */
 static int
-i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
-                   struct drm_i915_gem_pwrite *args,
-                   struct drm_file *file_priv)
+i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
+                        struct drm_i915_gem_pwrite *args,
+                        struct drm_file *file_priv)
 {
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
        drm_i915_private_t *dev_priv = dev->dev_private;
@@ -273,7 +491,6 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
 
        obj_priv = obj->driver_private;
        offset = obj_priv->gtt_offset + args->offset;
-       obj_priv->dirty = 1;
 
        while (remain > 0) {
                /* Operation in this page
@@ -292,16 +509,11 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
                                       page_offset, user_data, page_length);
 
                /* If we get a fault while copying data, then (presumably) our
-                * source page isn't available. In this case, use the
-                * non-atomic function
+                * source page isn't available.  Return the error and we'll
+                * retry in the slow path.
                 */
-               if (ret) {
-                       ret = slow_user_write (dev_priv->mm.gtt_mapping,
-                                              page_base, page_offset,
-                                              user_data, page_length);
-                       if (ret)
-                               goto fail;
-               }
+               if (ret)
+                       goto fail;
 
                remain -= page_length;
                user_data += page_length;
@@ -315,39 +527,284 @@ fail:
        return ret;
 }
 
+/**
+ * This is the fallback GTT pwrite path, which uses get_user_pages to pin
+ * the memory and maps it using kmap_atomic for copying.
+ *
+ * This code resulted in x11perf -rgb10text consuming about 10% more CPU
+ * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
+ */
 static int
-i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
-                     struct drm_i915_gem_pwrite *args,
-                     struct drm_file *file_priv)
+i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
+                        struct drm_i915_gem_pwrite *args,
+                        struct drm_file *file_priv)
 {
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       ssize_t remain;
+       loff_t gtt_page_base, offset;
+       loff_t first_data_page, last_data_page, num_pages;
+       loff_t pinned_pages, i;
+       struct page **user_pages;
+       struct mm_struct *mm = current->mm;
+       int gtt_page_offset, data_page_offset, data_page_index, page_length;
        int ret;
-       loff_t offset;
-       ssize_t written;
+       uint64_t data_ptr = args->data_ptr;
+
+       remain = args->size;
+
+       /* Pin the user pages containing the data.  We can't fault while
+        * holding the struct mutex, and all of the pwrite implementations
+        * want to hold it while dereferencing the user data.
+        */
+       first_data_page = data_ptr / PAGE_SIZE;
+       last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
+       num_pages = last_data_page - first_data_page + 1;
+
+       user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
+       if (user_pages == NULL)
+               return -ENOMEM;
+
+       down_read(&mm->mmap_sem);
+       pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
+                                     num_pages, 0, 0, user_pages, NULL);
+       up_read(&mm->mmap_sem);
+       if (pinned_pages < num_pages) {
+               ret = -EFAULT;
+               goto out_unpin_pages;
+       }
 
        mutex_lock(&dev->struct_mutex);
+       ret = i915_gem_object_pin(obj, 0);
+       if (ret)
+               goto out_unlock;
+
+       ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+       if (ret)
+               goto out_unpin_object;
+
+       obj_priv = obj->driver_private;
+       offset = obj_priv->gtt_offset + args->offset;
+
+       while (remain > 0) {
+               /* Operation in this page
+                *
+                * gtt_page_base = page offset within aperture
+                * gtt_page_offset = offset within page in aperture
+                * data_page_index = page number in get_user_pages return
+                * data_page_offset = offset with data_page_index page.
+                * page_length = bytes to copy for this page
+                */
+               gtt_page_base = offset & PAGE_MASK;
+               gtt_page_offset = offset & ~PAGE_MASK;
+               data_page_index = data_ptr / PAGE_SIZE - first_data_page;
+               data_page_offset = data_ptr & ~PAGE_MASK;
+
+               page_length = remain;
+               if ((gtt_page_offset + page_length) > PAGE_SIZE)
+                       page_length = PAGE_SIZE - gtt_page_offset;
+               if ((data_page_offset + page_length) > PAGE_SIZE)
+                       page_length = PAGE_SIZE - data_page_offset;
+
+               ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
+                                       gtt_page_base, gtt_page_offset,
+                                       user_pages[data_page_index],
+                                       data_page_offset,
+                                       page_length);
+
+               /* If we get a fault while copying data, then (presumably) our
+                * source page isn't available.  Return the error and we'll
+                * retry in the slow path.
+                */
+               if (ret)
+                       goto out_unpin_object;
+
+               remain -= page_length;
+               offset += page_length;
+               data_ptr += page_length;
+       }
+
+out_unpin_object:
+       i915_gem_object_unpin(obj);
+out_unlock:
+       mutex_unlock(&dev->struct_mutex);
+out_unpin_pages:
+       for (i = 0; i < pinned_pages; i++)
+               page_cache_release(user_pages[i]);
+       kfree(user_pages);
+
+       return ret;
+}
+
+/**
+ * This is the fast shmem pwrite path, which attempts to directly
+ * copy_from_user into the kmapped pages backing the object.
+ */
+static int
+i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
+                          struct drm_i915_gem_pwrite *args,
+                          struct drm_file *file_priv)
+{
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       ssize_t remain;
+       loff_t offset, page_base;
+       char __user *user_data;
+       int page_offset, page_length;
+       int ret;
+
+       user_data = (char __user *) (uintptr_t) args->data_ptr;
+       remain = args->size;
+
+       mutex_lock(&dev->struct_mutex);
+
+       ret = i915_gem_object_get_pages(obj);
+       if (ret != 0)
+               goto fail_unlock;
 
        ret = i915_gem_object_set_to_cpu_domain(obj, 1);
-       if (ret) {
-               mutex_unlock(&dev->struct_mutex);
-               return ret;
+       if (ret != 0)
+               goto fail_put_pages;
+
+       obj_priv = obj->driver_private;
+       offset = args->offset;
+       obj_priv->dirty = 1;
+
+       while (remain > 0) {
+               /* Operation in this page
+                *
+                * page_base = page offset within aperture
+                * page_offset = offset within page
+                * page_length = bytes to copy for this page
+                */
+               page_base = (offset & ~(PAGE_SIZE-1));
+               page_offset = offset & (PAGE_SIZE-1);
+               page_length = remain;
+               if ((page_offset + remain) > PAGE_SIZE)
+                       page_length = PAGE_SIZE - page_offset;
+
+               ret = fast_shmem_write(obj_priv->pages,
+                                      page_base, page_offset,
+                                      user_data, page_length);
+               if (ret)
+                       goto fail_put_pages;
+
+               remain -= page_length;
+               user_data += page_length;
+               offset += page_length;
        }
 
+fail_put_pages:
+       i915_gem_object_put_pages(obj);
+fail_unlock:
+       mutex_unlock(&dev->struct_mutex);
+
+       return ret;
+}
+
+/**
+ * This is the fallback shmem pwrite path, which uses get_user_pages to pin
+ * the memory and maps it using kmap_atomic for copying.
+ *
+ * This avoids taking mmap_sem for faulting on the user's address while the
+ * struct_mutex is held.
+ */
+static int
+i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
+                          struct drm_i915_gem_pwrite *args,
+                          struct drm_file *file_priv)
+{
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct mm_struct *mm = current->mm;
+       struct page **user_pages;
+       ssize_t remain;
+       loff_t offset, pinned_pages, i;
+       loff_t first_data_page, last_data_page, num_pages;
+       int shmem_page_index, shmem_page_offset;
+       int data_page_index,  data_page_offset;
+       int page_length;
+       int ret;
+       uint64_t data_ptr = args->data_ptr;
+
+       remain = args->size;
+
+       /* Pin the user pages containing the data.  We can't fault while
+        * holding the struct mutex, and all of the pwrite implementations
+        * want to hold it while dereferencing the user data.
+        */
+       first_data_page = data_ptr / PAGE_SIZE;
+       last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
+       num_pages = last_data_page - first_data_page + 1;
+
+       user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
+       if (user_pages == NULL)
+               return -ENOMEM;
+
+       down_read(&mm->mmap_sem);
+       pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
+                                     num_pages, 0, 0, user_pages, NULL);
+       up_read(&mm->mmap_sem);
+       if (pinned_pages < num_pages) {
+               ret = -EFAULT;
+               goto fail_put_user_pages;
+       }
+
+       mutex_lock(&dev->struct_mutex);
+
+       ret = i915_gem_object_get_pages(obj);
+       if (ret != 0)
+               goto fail_unlock;
+
+       ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+       if (ret != 0)
+               goto fail_put_pages;
+
+       obj_priv = obj->driver_private;
        offset = args->offset;
+       obj_priv->dirty = 1;
 
-       written = vfs_write(obj->filp,
-                           (char __user *)(uintptr_t) args->data_ptr,
-                           args->size, &offset);
-       if (written != args->size) {
-               mutex_unlock(&dev->struct_mutex);
-               if (written < 0)
-                       return written;
-               else
-                       return -EINVAL;
+       while (remain > 0) {
+               /* Operation in this page
+                *
+                * shmem_page_index = page number within shmem file
+                * shmem_page_offset = offset within page in shmem file
+                * data_page_index = page number in get_user_pages return
+                * data_page_offset = offset with data_page_index page.
+                * page_length = bytes to copy for this page
+                */
+               shmem_page_index = offset / PAGE_SIZE;
+               shmem_page_offset = offset & ~PAGE_MASK;
+               data_page_index = data_ptr / PAGE_SIZE - first_data_page;
+               data_page_offset = data_ptr & ~PAGE_MASK;
+
+               page_length = remain;
+               if ((shmem_page_offset + page_length) > PAGE_SIZE)
+                       page_length = PAGE_SIZE - shmem_page_offset;
+               if ((data_page_offset + page_length) > PAGE_SIZE)
+                       page_length = PAGE_SIZE - data_page_offset;
+
+               ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
+                                     shmem_page_offset,
+                                     user_pages[data_page_index],
+                                     data_page_offset,
+                                     page_length);
+               if (ret)
+                       goto fail_put_pages;
+
+               remain -= page_length;
+               data_ptr += page_length;
+               offset += page_length;
        }
 
+fail_put_pages:
+       i915_gem_object_put_pages(obj);
+fail_unlock:
        mutex_unlock(&dev->struct_mutex);
+fail_put_user_pages:
+       for (i = 0; i < pinned_pages; i++)
+               page_cache_release(user_pages[i]);
+       kfree(user_pages);
 
-       return 0;
+       return ret;
 }
 
 /**
@@ -388,10 +845,19 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
        if (obj_priv->phys_obj)
                ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
        else if (obj_priv->tiling_mode == I915_TILING_NONE &&
-                dev->gtt_total != 0)
-               ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
-       else
-               ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
+                dev->gtt_total != 0) {
+               ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
+               if (ret == -EFAULT) {
+                       ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
+                                                      file_priv);
+               }
+       } else {
+               ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
+               if (ret == -EFAULT) {
+                       ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
+                                                        file_priv);
+               }
+       }
 
 #if WATCH_PWRITE
        if (ret)
@@ -816,29 +1282,30 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
 }
 
 static void
-i915_gem_object_free_page_list(struct drm_gem_object *obj)
+i915_gem_object_put_pages(struct drm_gem_object *obj)
 {
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
        int page_count = obj->size / PAGE_SIZE;
        int i;
 
-       if (obj_priv->page_list == NULL)
-               return;
+       BUG_ON(obj_priv->pages_refcount == 0);
 
+       if (--obj_priv->pages_refcount != 0)
+               return;
 
        for (i = 0; i < page_count; i++)
-               if (obj_priv->page_list[i] != NULL) {
+               if (obj_priv->pages[i] != NULL) {
                        if (obj_priv->dirty)
-                               set_page_dirty(obj_priv->page_list[i]);
-                       mark_page_accessed(obj_priv->page_list[i]);
-                       page_cache_release(obj_priv->page_list[i]);
+                               set_page_dirty(obj_priv->pages[i]);
+                       mark_page_accessed(obj_priv->pages[i]);
+                       page_cache_release(obj_priv->pages[i]);
                }
        obj_priv->dirty = 0;
 
-       drm_free(obj_priv->page_list,
+       drm_free(obj_priv->pages,
                 page_count * sizeof(struct page *),
                 DRM_MEM_DRIVER);
-       obj_priv->page_list = NULL;
+       obj_priv->pages = NULL;
 }
 
 static void
@@ -1290,7 +1757,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
        if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
                i915_gem_clear_fence_reg(obj);
 
-       i915_gem_object_free_page_list(obj);
+       i915_gem_object_put_pages(obj);
 
        if (obj_priv->gtt_space) {
                atomic_dec(&dev->gtt_count);
@@ -1409,7 +1876,7 @@ i915_gem_evict_everything(struct drm_device *dev)
 }
 
 static int
-i915_gem_object_get_page_list(struct drm_gem_object *obj)
+i915_gem_object_get_pages(struct drm_gem_object *obj)
 {
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
        int page_count, i;
@@ -1418,18 +1885,19 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj)
        struct page *page;
        int ret;
 
-       if (obj_priv->page_list)
+       if (obj_priv->pages_refcount++ != 0)
                return 0;
 
        /* Get the list of pages out of our struct file.  They'll be pinned
         * at this point until we release them.
         */
        page_count = obj->size / PAGE_SIZE;
-       BUG_ON(obj_priv->page_list != NULL);
-       obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
-                                        DRM_MEM_DRIVER);
-       if (obj_priv->page_list == NULL) {
+       BUG_ON(obj_priv->pages != NULL);
+       obj_priv->pages = drm_calloc(page_count, sizeof(struct page *),
+                                    DRM_MEM_DRIVER);
+       if (obj_priv->pages == NULL) {
                DRM_ERROR("Faled to allocate page list\n");
+               obj_priv->pages_refcount--;
                return -ENOMEM;
        }
 
@@ -1440,10 +1908,10 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj)
                if (IS_ERR(page)) {
                        ret = PTR_ERR(page);
                        DRM_ERROR("read_mapping_page failed: %d\n", ret);
-                       i915_gem_object_free_page_list(obj);
+                       i915_gem_object_put_pages(obj);
                        return ret;
                }
-               obj_priv->page_list[i] = page;
+               obj_priv->pages[i] = page;
        }
        return 0;
 }
@@ -1766,7 +2234,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
        DRM_INFO("Binding object of size %d at 0x%08x\n",
                 obj->size, obj_priv->gtt_offset);
 #endif
-       ret = i915_gem_object_get_page_list(obj);
+       ret = i915_gem_object_get_pages(obj);
        if (ret) {
                drm_mm_put_block(obj_priv->gtt_space);
                obj_priv->gtt_space = NULL;
@@ -1778,12 +2246,12 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
         * into the GTT.
         */
        obj_priv->agp_mem = drm_agp_bind_pages(dev,
-                                              obj_priv->page_list,
+                                              obj_priv->pages,
                                               page_count,
                                               obj_priv->gtt_offset,
                                               obj_priv->agp_type);
        if (obj_priv->agp_mem == NULL) {
-               i915_gem_object_free_page_list(obj);
+               i915_gem_object_put_pages(obj);
                drm_mm_put_block(obj_priv->gtt_space);
                obj_priv->gtt_space = NULL;
                return -ENOMEM;
@@ -1810,10 +2278,10 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
         * to GPU, and we can ignore the cache flush because it'll happen
         * again at bind time.
         */
-       if (obj_priv->page_list == NULL)
+       if (obj_priv->pages == NULL)
                return;
 
-       drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
+       drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
 }
 
 /** Flushes any GPU write domain for the object if it's dirty. */
@@ -1913,7 +2381,6 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
 static int
 i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
 {
-       struct drm_device *dev = obj->dev;
        int ret;
 
        i915_gem_object_flush_gpu_write_domain(obj);
@@ -1932,7 +2399,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
        /* Flush the CPU cache if it's still invalid. */
        if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
                i915_gem_clflush_object(obj);
-               drm_agp_chipset_flush(dev);
 
                obj->read_domains |= I915_GEM_DOMAIN_CPU;
        }
@@ -2144,7 +2610,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
 static void
 i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
 {
-       struct drm_device *dev = obj->dev;
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
 
        if (!obj_priv->page_cpu_valid)
@@ -2158,9 +2623,8 @@ i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
                for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
                        if (obj_priv->page_cpu_valid[i])
                                continue;
-                       drm_clflush_pages(obj_priv->page_list + i, 1);
+                       drm_clflush_pages(obj_priv->pages + i, 1);
                }
-               drm_agp_chipset_flush(dev);
        }
 
        /* Free the page_cpu_valid mappings which are now stale, whether
@@ -2224,7 +2688,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
                if (obj_priv->page_cpu_valid[i])
                        continue;
 
-               drm_clflush_pages(obj_priv->page_list + i, 1);
+               drm_clflush_pages(obj_priv->pages + i, 1);
 
                obj_priv->page_cpu_valid[i] = 1;
        }
@@ -2245,12 +2709,11 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
 static int
 i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
                                 struct drm_file *file_priv,
-                                struct drm_i915_gem_exec_object *entry)
+                                struct drm_i915_gem_exec_object *entry,
+                                struct drm_i915_gem_relocation_entry *relocs)
 {
        struct drm_device *dev = obj->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_relocation_entry reloc;
-       struct drm_i915_gem_relocation_entry __user *relocs;
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
        int i, ret;
        void __iomem *reloc_page;
@@ -2262,25 +2725,18 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
 
        entry->offset = obj_priv->gtt_offset;
 
-       relocs = (struct drm_i915_gem_relocation_entry __user *)
-                (uintptr_t) entry->relocs_ptr;
        /* Apply the relocations, using the GTT aperture to avoid cache
         * flushing requirements.
         */
        for (i = 0; i < entry->relocation_count; i++) {
+               struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
                struct drm_gem_object *target_obj;
                struct drm_i915_gem_object *target_obj_priv;
                uint32_t reloc_val, reloc_offset;
                uint32_t __iomem *reloc_entry;
 
-               ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
-               if (ret != 0) {
-                       i915_gem_object_unpin(obj);
-                       return ret;
-               }
-
                target_obj = drm_gem_object_lookup(obj->dev, file_priv,
-                                                  reloc.target_handle);
+                                                  reloc->target_handle);
                if (target_obj == NULL) {
                        i915_gem_object_unpin(obj);
                        return -EBADF;
@@ -2292,53 +2748,53 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
                 */
                if (target_obj_priv->gtt_space == NULL) {
                        DRM_ERROR("No GTT space found for object %d\n",
-                                 reloc.target_handle);
+                                 reloc->target_handle);
                        drm_gem_object_unreference(target_obj);
                        i915_gem_object_unpin(obj);
                        return -EINVAL;
                }
 
-               if (reloc.offset > obj->size - 4) {
+               if (reloc->offset > obj->size - 4) {
                        DRM_ERROR("Relocation beyond object bounds: "
                                  "obj %p target %d offset %d size %d.\n",
-                                 obj, reloc.target_handle,
-                                 (int) reloc.offset, (int) obj->size);
+                                 obj, reloc->target_handle,
+                                 (int) reloc->offset, (int) obj->size);
                        drm_gem_object_unreference(target_obj);
                        i915_gem_object_unpin(obj);
                        return -EINVAL;
                }
-               if (reloc.offset & 3) {
+               if (reloc->offset & 3) {
                        DRM_ERROR("Relocation not 4-byte aligned: "
                                  "obj %p target %d offset %d.\n",
-                                 obj, reloc.target_handle,
-                                 (int) reloc.offset);
+                                 obj, reloc->target_handle,
+                                 (int) reloc->offset);
                        drm_gem_object_unreference(target_obj);
                        i915_gem_object_unpin(obj);
                        return -EINVAL;
                }
 
-               if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
-                   reloc.read_domains & I915_GEM_DOMAIN_CPU) {
+               if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
+                   reloc->read_domains & I915_GEM_DOMAIN_CPU) {
                        DRM_ERROR("reloc with read/write CPU domains: "
                                  "obj %p target %d offset %d "
                                  "read %08x write %08x",
-                                 obj, reloc.target_handle,
-                                 (int) reloc.offset,
-                                 reloc.read_domains,
-                                 reloc.write_domain);
+                                 obj, reloc->target_handle,
+                                 (int) reloc->offset,
+                                 reloc->read_domains,
+                                 reloc->write_domain);
                        drm_gem_object_unreference(target_obj);
                        i915_gem_object_unpin(obj);
                        return -EINVAL;
                }
 
-               if (reloc.write_domain && target_obj->pending_write_domain &&
-                   reloc.write_domain != target_obj->pending_write_domain) {
+               if (reloc->write_domain && target_obj->pending_write_domain &&
+                   reloc->write_domain != target_obj->pending_write_domain) {
                        DRM_ERROR("Write domain conflict: "
                                  "obj %p target %d offset %d "
                                  "new %08x old %08x\n",
-                                 obj, reloc.target_handle,
-                                 (int) reloc.offset,
-                                 reloc.write_domain,
+                                 obj, reloc->target_handle,
+                                 (int) reloc->offset,
+                                 reloc->write_domain,
                                  target_obj->pending_write_domain);
                        drm_gem_object_unreference(target_obj);
                        i915_gem_object_unpin(obj);
@@ -2351,22 +2807,22 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
                         "presumed %08x delta %08x\n",
                         __func__,
                         obj,
-                        (int) reloc.offset,
-                        (int) reloc.target_handle,
-                        (int) reloc.read_domains,
-                        (int) reloc.write_domain,
+                        (int) reloc->offset,
+                        (int) reloc->target_handle,
+                        (int) reloc->read_domains,
+                        (int) reloc->write_domain,
                         (int) target_obj_priv->gtt_offset,
-                        (int) reloc.presumed_offset,
-                        reloc.delta);
+                        (int) reloc->presumed_offset,
+                        reloc->delta);
 #endif
 
-               target_obj->pending_read_domains |= reloc.read_domains;
-               target_obj->pending_write_domain |= reloc.write_domain;
+               target_obj->pending_read_domains |= reloc->read_domains;
+               target_obj->pending_write_domain |= reloc->write_domain;
 
                /* If the relocation already has the right value in it, no
                 * more work needs to be done.
                 */
-               if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
+               if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
                        drm_gem_object_unreference(target_obj);
                        continue;
                }
@@ -2381,32 +2837,26 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
                /* Map the page containing the relocation we're going to
                 * perform.
                 */
-               reloc_offset = obj_priv->gtt_offset + reloc.offset;
+               reloc_offset = obj_priv->gtt_offset + reloc->offset;
                reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
                                                      (reloc_offset &
                                                       ~(PAGE_SIZE - 1)));
                reloc_entry = (uint32_t __iomem *)(reloc_page +
                                                   (reloc_offset & (PAGE_SIZE - 1)));
-               reloc_val = target_obj_priv->gtt_offset + reloc.delta;
+               reloc_val = target_obj_priv->gtt_offset + reloc->delta;
 
 #if WATCH_BUF
                DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
-                         obj, (unsigned int) reloc.offset,
+                         obj, (unsigned int) reloc->offset,
                          readl(reloc_entry), reloc_val);
 #endif
                writel(reloc_val, reloc_entry);
                io_mapping_unmap_atomic(reloc_page);
 
-               /* Write the updated presumed offset for this entry back out
-                * to the user.
+               /* The updated presumed offset for this entry will be
+                * copied back out to the user.
                 */
-               reloc.presumed_offset = target_obj_priv->gtt_offset;
-               ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
-               if (ret != 0) {
-                       drm_gem_object_unreference(target_obj);
-                       i915_gem_object_unpin(obj);
-                       return ret;
-               }
+               reloc->presumed_offset = target_obj_priv->gtt_offset;
 
                drm_gem_object_unreference(target_obj);
        }
@@ -2423,11 +2873,10 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
 static int
 i915_dispatch_gem_execbuffer(struct drm_device *dev,
                              struct drm_i915_gem_execbuffer *exec,
+                             struct drm_clip_rect *cliprects,
                              uint64_t exec_offset)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
-                                            (uintptr_t) exec->cliprects_ptr;
        int nbox = exec->num_cliprects;
        int i = 0, count;
        uint32_t        exec_start, exec_len;
@@ -2448,7 +2897,7 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev,
 
        for (i = 0; i < count; i++) {
                if (i < nbox) {
-                       int ret = i915_emit_box(dev, boxes, i,
+                       int ret = i915_emit_box(dev, cliprects, i,
                                                exec->DR1, exec->DR4);
                        if (ret)
                                return ret;
@@ -2504,6 +2953,75 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
        return ret;
 }
 
+static int
+i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
+                             uint32_t buffer_count,
+                             struct drm_i915_gem_relocation_entry **relocs)
+{
+       uint32_t reloc_count = 0, reloc_index = 0, i;
+       int ret;
+
+       *relocs = NULL;
+       for (i = 0; i < buffer_count; i++) {
+               if (reloc_count + exec_list[i].relocation_count < reloc_count)
+                       return -EINVAL;
+               reloc_count += exec_list[i].relocation_count;
+       }
+
+       *relocs = drm_calloc(reloc_count, sizeof(**relocs), DRM_MEM_DRIVER);
+       if (*relocs == NULL)
+               return -ENOMEM;
+
+       for (i = 0; i < buffer_count; i++) {
+               struct drm_i915_gem_relocation_entry __user *user_relocs;
+
+               user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
+
+               ret = copy_from_user(&(*relocs)[reloc_index],
+                                    user_relocs,
+                                    exec_list[i].relocation_count *
+                                    sizeof(**relocs));
+               if (ret != 0) {
+                       drm_free(*relocs, reloc_count * sizeof(**relocs),
+                                DRM_MEM_DRIVER);
+                       *relocs = NULL;
+                       return ret;
+               }
+
+               reloc_index += exec_list[i].relocation_count;
+       }
+
+       return ret;
+}
+
+static int
+i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
+                           uint32_t buffer_count,
+                           struct drm_i915_gem_relocation_entry *relocs)
+{
+       uint32_t reloc_count = 0, i;
+       int ret;
+
+       for (i = 0; i < buffer_count; i++) {
+               struct drm_i915_gem_relocation_entry __user *user_relocs;
+
+               user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
+
+               if (ret == 0) {
+                       ret = copy_to_user(user_relocs,
+                                          &relocs[reloc_count],
+                                          exec_list[i].relocation_count *
+                                          sizeof(*relocs));
+               }
+
+               reloc_count += exec_list[i].relocation_count;
+       }
+
+       drm_free(relocs, reloc_count * sizeof(*relocs), DRM_MEM_DRIVER);
+
+       return ret;
+}
+
 int
 i915_gem_execbuffer(struct drm_device *dev, void *data,
                    struct drm_file *file_priv)
@@ -2515,9 +3033,11 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
        struct drm_gem_object **object_list = NULL;
        struct drm_gem_object *batch_obj;
        struct drm_i915_gem_object *obj_priv;
-       int ret, i, pinned = 0;
+       struct drm_clip_rect *cliprects = NULL;
+       struct drm_i915_gem_relocation_entry *relocs;
+       int ret, ret2, i, pinned = 0;
        uint64_t exec_offset;
-       uint32_t seqno, flush_domains;
+       uint32_t seqno, flush_domains, reloc_index;
        int pin_tries;
 
 #if WATCH_EXEC
@@ -2551,6 +3071,28 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
                goto pre_mutex_err;
        }
 
+       if (args->num_cliprects != 0) {
+               cliprects = drm_calloc(args->num_cliprects, sizeof(*cliprects),
+                                      DRM_MEM_DRIVER);
+               if (cliprects == NULL)
+                       goto pre_mutex_err;
+
+               ret = copy_from_user(cliprects,
+                                    (struct drm_clip_rect __user *)
+                                    (uintptr_t) args->cliprects_ptr,
+                                    sizeof(*cliprects) * args->num_cliprects);
+               if (ret != 0) {
+                       DRM_ERROR("copy %d cliprects failed: %d\n",
+                                 args->num_cliprects, ret);
+                       goto pre_mutex_err;
+               }
+       }
+
+       ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
+                                           &relocs);
+       if (ret != 0)
+               goto pre_mutex_err;
+
        mutex_lock(&dev->struct_mutex);
 
        i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -2593,15 +3135,19 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
        /* Pin and relocate */
        for (pin_tries = 0; ; pin_tries++) {
                ret = 0;
+               reloc_index = 0;
+
                for (i = 0; i < args->buffer_count; i++) {
                        object_list[i]->pending_read_domains = 0;
                        object_list[i]->pending_write_domain = 0;
                        ret = i915_gem_object_pin_and_relocate(object_list[i],
                                                               file_priv,
-                                                              &exec_list[i]);
+                                                              &exec_list[i],
+                                                              &relocs[reloc_index]);
                        if (ret)
                                break;
                        pinned = i + 1;
+                       reloc_index += exec_list[i].relocation_count;
                }
                /* success */
                if (ret == 0)
@@ -2687,7 +3233,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
 #endif
 
        /* Exec the batchbuffer */
-       ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
+       ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
        if (ret) {
                DRM_ERROR("dispatch failed %d\n", ret);
                goto err;
@@ -2751,11 +3297,27 @@ err:
                                  args->buffer_count, ret);
        }
 
+       /* Copy the updated relocations out regardless of current error
+        * state.  Failure to update the relocs would mean that the next
+        * time userland calls execbuf, it would do so with presumed offset
+        * state that didn't match the actual object state.
+        */
+       ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
+                                          relocs);
+       if (ret2 != 0) {
+               DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
+
+               if (ret == 0)
+                       ret = ret2;
+       }
+
 pre_mutex_err:
        drm_free(object_list, sizeof(*object_list) * args->buffer_count,
                 DRM_MEM_DRIVER);
        drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
                 DRM_MEM_DRIVER);
+       drm_free(cliprects, sizeof(*cliprects) * args->num_cliprects,
+                DRM_MEM_DRIVER);
 
        return ret;
 }
@@ -3192,7 +3754,7 @@ i915_gem_init_hws(struct drm_device *dev)
 
        dev_priv->status_gfx_addr = obj_priv->gtt_offset;
 
-       dev_priv->hw_status_page = kmap(obj_priv->page_list[0]);
+       dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
        if (dev_priv->hw_status_page == NULL) {
                DRM_ERROR("Failed to map status page.\n");
                memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
@@ -3222,7 +3784,7 @@ i915_gem_cleanup_hws(struct drm_device *dev)
        obj = dev_priv->hws_obj;
        obj_priv = obj->driver_private;
 
-       kunmap(obj_priv->page_list[0]);
+       kunmap(obj_priv->pages[0]);
        i915_gem_object_unpin(obj);
        drm_gem_object_unreference(obj);
        dev_priv->hws_obj = NULL;
@@ -3525,20 +4087,20 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
        if (!obj_priv->phys_obj)
                return;
 
-       ret = i915_gem_object_get_page_list(obj);
+       ret = i915_gem_object_get_pages(obj);
        if (ret)
                goto out;
 
        page_count = obj->size / PAGE_SIZE;
 
        for (i = 0; i < page_count; i++) {
-               char *dst = kmap_atomic(obj_priv->page_list[i], KM_USER0);
+               char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
                char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
 
                memcpy(dst, src, PAGE_SIZE);
                kunmap_atomic(dst, KM_USER0);
        }
-       drm_clflush_pages(obj_priv->page_list, page_count);
+       drm_clflush_pages(obj_priv->pages, page_count);
        drm_agp_chipset_flush(dev);
 out:
        obj_priv->phys_obj->cur_obj = NULL;
@@ -3581,7 +4143,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
        obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
        obj_priv->phys_obj->cur_obj = obj;
 
-       ret = i915_gem_object_get_page_list(obj);
+       ret = i915_gem_object_get_pages(obj);
        if (ret) {
                DRM_ERROR("failed to get page list\n");
                goto out;
@@ -3590,7 +4152,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
        page_count = obj->size / PAGE_SIZE;
 
        for (i = 0; i < page_count; i++) {
-               char *src = kmap_atomic(obj_priv->page_list[i], KM_USER0);
+               char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
                char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
 
                memcpy(dst, src, PAGE_SIZE);
diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c
new file mode 100644 (file)
index 0000000..5a4cdb5
--- /dev/null
@@ -0,0 +1,257 @@
+/*
+ * Copyright Ā© 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *    Keith Packard <keithp@keithp.com>
+ *
+ */
+
+#include <linux/seq_file.h>
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+#define DRM_I915_RING_DEBUG 1
+
+
+#if defined(CONFIG_DEBUG_FS)
+
+#define ACTIVE_LIST    1
+#define FLUSHING_LIST  2
+#define INACTIVE_LIST  3
+
+static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
+{
+       if (obj_priv->user_pin_count > 0)
+               return "P";
+       else if (obj_priv->pin_count > 0)
+               return "p";
+       else
+               return " ";
+}
+
+static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
+{
+    switch (obj_priv->tiling_mode) {
+    default:
+    case I915_TILING_NONE: return " ";
+    case I915_TILING_X: return "X";
+    case I915_TILING_Y: return "Y";
+    }
+}
+
+static int i915_gem_object_list_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       uintptr_t list = (uintptr_t) node->info_ent->data;
+       struct list_head *head;
+       struct drm_device *dev = node->minor->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv;
+
+       switch (list) {
+       case ACTIVE_LIST:
+               seq_printf(m, "Active:\n");
+               head = &dev_priv->mm.active_list;
+               break;
+       case INACTIVE_LIST:
+               seq_printf(m, "Inctive:\n");
+               head = &dev_priv->mm.inactive_list;
+               break;
+       case FLUSHING_LIST:
+               seq_printf(m, "Flushing:\n");
+               head = &dev_priv->mm.flushing_list;
+               break;
+       default:
+               DRM_INFO("Ooops, unexpected list\n");
+               return 0;
+       }
+
+       list_for_each_entry(obj_priv, head, list)
+       {
+               struct drm_gem_object *obj = obj_priv->obj;
+
+               seq_printf(m, "    %p: %s %08x %08x %d",
+                          obj,
+                          get_pin_flag(obj_priv),
+                          obj->read_domains, obj->write_domain,
+                          obj_priv->last_rendering_seqno);
+
+               if (obj->name)
+                       seq_printf(m, " (name: %d)", obj->name);
+               if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
+                       seq_printf(m, " (fence: %d\n", obj_priv->fence_reg);
+               seq_printf(m, "\n");
+       }
+       return 0;
+}
+
+static int i915_gem_request_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_gem_request *gem_request;
+
+       seq_printf(m, "Request:\n");
+       list_for_each_entry(gem_request, &dev_priv->mm.request_list, list) {
+               seq_printf(m, "    %d @ %d\n",
+                          gem_request->seqno,
+                          (int) (jiffies - gem_request->emitted_jiffies));
+       }
+       return 0;
+}
+
+static int i915_gem_seqno_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+
+       if (dev_priv->hw_status_page != NULL) {
+               seq_printf(m, "Current sequence: %d\n",
+                          i915_get_gem_seqno(dev));
+       } else {
+               seq_printf(m, "Current sequence: hws uninitialized\n");
+       }
+       seq_printf(m, "Waiter sequence:  %d\n",
+                       dev_priv->mm.waiting_gem_seqno);
+       seq_printf(m, "IRQ sequence:     %d\n", dev_priv->mm.irq_gem_seqno);
+       return 0;
+}
+
+
+static int i915_interrupt_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+
+       seq_printf(m, "Interrupt enable:    %08x\n",
+                  I915_READ(IER));
+       seq_printf(m, "Interrupt identity:  %08x\n",
+                  I915_READ(IIR));
+       seq_printf(m, "Interrupt mask:      %08x\n",
+                  I915_READ(IMR));
+       seq_printf(m, "Pipe A stat:         %08x\n",
+                  I915_READ(PIPEASTAT));
+       seq_printf(m, "Pipe B stat:         %08x\n",
+                  I915_READ(PIPEBSTAT));
+       seq_printf(m, "Interrupts received: %d\n",
+                  atomic_read(&dev_priv->irq_received));
+       if (dev_priv->hw_status_page != NULL) {
+               seq_printf(m, "Current sequence:    %d\n",
+                          i915_get_gem_seqno(dev));
+       } else {
+               seq_printf(m, "Current sequence:    hws uninitialized\n");
+       }
+       seq_printf(m, "Waiter sequence:     %d\n",
+                  dev_priv->mm.waiting_gem_seqno);
+       seq_printf(m, "IRQ sequence:        %d\n",
+                  dev_priv->mm.irq_gem_seqno);
+       return 0;
+}
+
+static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       int i;
+
+       seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
+       seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
+       for (i = 0; i < dev_priv->num_fence_regs; i++) {
+               struct drm_gem_object *obj = dev_priv->fence_regs[i].obj;
+
+               if (obj == NULL) {
+                       seq_printf(m, "Fenced object[%2d] = unused\n", i);
+               } else {
+                       struct drm_i915_gem_object *obj_priv;
+
+                       obj_priv = obj->driver_private;
+                       seq_printf(m, "Fenced object[%2d] = %p: %s "
+                                  "%08x %08x %08x %s %08x %08x %d",
+                                  i, obj, get_pin_flag(obj_priv),
+                                  obj_priv->gtt_offset,
+                                  obj->size, obj_priv->stride,
+                                  get_tiling_flag(obj_priv),
+                                  obj->read_domains, obj->write_domain,
+                                  obj_priv->last_rendering_seqno);
+                       if (obj->name)
+                               seq_printf(m, " (name: %d)", obj->name);
+                       seq_printf(m, "\n");
+               }
+       }
+
+       return 0;
+}
+
+static int i915_hws_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       int i;
+       volatile u32 *hws;
+
+       hws = (volatile u32 *)dev_priv->hw_status_page;
+       if (hws == NULL)
+               return 0;
+
+       for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
+               seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+                          i * 4,
+                          hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
+       }
+       return 0;
+}
+
+static struct drm_info_list i915_gem_debugfs_list[] = {
+       {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
+       {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
+       {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
+       {"i915_gem_request", i915_gem_request_info, 0},
+       {"i915_gem_seqno", i915_gem_seqno_info, 0},
+       {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
+       {"i915_gem_interrupt", i915_interrupt_info, 0},
+       {"i915_gem_hws", i915_hws_info, 0},
+};
+#define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list)
+
+int i915_gem_debugfs_init(struct drm_minor *minor)
+{
+       return drm_debugfs_create_files(i915_gem_debugfs_list,
+                                       I915_GEM_DEBUGFS_ENTRIES,
+                                       minor->debugfs_root, minor);
+}
+
+void i915_gem_debugfs_cleanup(struct drm_minor *minor)
+{
+       drm_debugfs_remove_files(i915_gem_debugfs_list,
+                                I915_GEM_DEBUGFS_ENTRIES, minor);
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
diff --git a/drivers/gpu/drm/i915/i915_gem_proc.c b/drivers/gpu/drm/i915/i915_gem_proc.c
deleted file mode 100644 (file)
index 4d1b9de..0000000
+++ /dev/null
@@ -1,334 +0,0 @@
-/*
- * Copyright Ā© 2008 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- *    Eric Anholt <eric@anholt.net>
- *    Keith Packard <keithp@keithp.com>
- *
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "i915_drm.h"
-#include "i915_drv.h"
-
-static int i915_gem_active_info(char *buf, char **start, off_t offset,
-                               int request, int *eof, void *data)
-{
-       struct drm_minor *minor = (struct drm_minor *) data;
-       struct drm_device *dev = minor->dev;
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv;
-       int len = 0;
-
-       if (offset > DRM_PROC_LIMIT) {
-               *eof = 1;
-               return 0;
-       }
-
-       *start = &buf[offset];
-       *eof = 0;
-       DRM_PROC_PRINT("Active:\n");
-       list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
-                           list)
-       {
-               struct drm_gem_object *obj = obj_priv->obj;
-               if (obj->name) {
-                       DRM_PROC_PRINT("    %p(%d): %08x %08x %d\n",
-                                      obj, obj->name,
-                                      obj->read_domains, obj->write_domain,
-                                      obj_priv->last_rendering_seqno);
-               } else {
-                       DRM_PROC_PRINT("       %p: %08x %08x %d\n",
-                                      obj,
-                                      obj->read_domains, obj->write_domain,
-                                      obj_priv->last_rendering_seqno);
-               }
-       }
-       if (len > request + offset)
-               return request;
-       *eof = 1;
-       return len - offset;
-}
-
-static int i915_gem_flushing_info(char *buf, char **start, off_t offset,
-                                 int request, int *eof, void *data)
-{
-       struct drm_minor *minor = (struct drm_minor *) data;
-       struct drm_device *dev = minor->dev;
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv;
-       int len = 0;
-
-       if (offset > DRM_PROC_LIMIT) {
-               *eof = 1;
-               return 0;
-       }
-
-       *start = &buf[offset];
-       *eof = 0;
-       DRM_PROC_PRINT("Flushing:\n");
-       list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
-                           list)
-       {
-               struct drm_gem_object *obj = obj_priv->obj;
-               if (obj->name) {
-                       DRM_PROC_PRINT("    %p(%d): %08x %08x %d\n",
-                                      obj, obj->name,
-                                      obj->read_domains, obj->write_domain,
-                                      obj_priv->last_rendering_seqno);
-               } else {
-                       DRM_PROC_PRINT("       %p: %08x %08x %d\n", obj,
-                                      obj->read_domains, obj->write_domain,
-                                      obj_priv->last_rendering_seqno);
-               }
-       }
-       if (len > request + offset)
-               return request;
-       *eof = 1;
-       return len - offset;
-}
-
-static int i915_gem_inactive_info(char *buf, char **start, off_t offset,
-                                 int request, int *eof, void *data)
-{
-       struct drm_minor *minor = (struct drm_minor *) data;
-       struct drm_device *dev = minor->dev;
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv;
-       int len = 0;
-
-       if (offset > DRM_PROC_LIMIT) {
-               *eof = 1;
-               return 0;
-       }
-
-       *start = &buf[offset];
-       *eof = 0;
-       DRM_PROC_PRINT("Inactive:\n");
-       list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list,
-                           list)
-       {
-               struct drm_gem_object *obj = obj_priv->obj;
-               if (obj->name) {
-                       DRM_PROC_PRINT("    %p(%d): %08x %08x %d\n",
-                                      obj, obj->name,
-                                      obj->read_domains, obj->write_domain,
-                                      obj_priv->last_rendering_seqno);
-               } else {
-                       DRM_PROC_PRINT("       %p: %08x %08x %d\n", obj,
-                                      obj->read_domains, obj->write_domain,
-                                      obj_priv->last_rendering_seqno);
-               }
-       }
-       if (len > request + offset)
-               return request;
-       *eof = 1;
-       return len - offset;
-}
-
-static int i915_gem_request_info(char *buf, char **start, off_t offset,
-                                int request, int *eof, void *data)
-{
-       struct drm_minor *minor = (struct drm_minor *) data;
-       struct drm_device *dev = minor->dev;
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_request *gem_request;
-       int len = 0;
-
-       if (offset > DRM_PROC_LIMIT) {
-               *eof = 1;
-               return 0;
-       }
-
-       *start = &buf[offset];
-       *eof = 0;
-       DRM_PROC_PRINT("Request:\n");
-       list_for_each_entry(gem_request, &dev_priv->mm.request_list,
-                           list)
-       {
-               DRM_PROC_PRINT("    %d @ %d\n",
-                              gem_request->seqno,
-                              (int) (jiffies - gem_request->emitted_jiffies));
-       }
-       if (len > request + offset)
-               return request;
-       *eof = 1;
-       return len - offset;
-}
-
-static int i915_gem_seqno_info(char *buf, char **start, off_t offset,
-                              int request, int *eof, void *data)
-{
-       struct drm_minor *minor = (struct drm_minor *) data;
-       struct drm_device *dev = minor->dev;
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       int len = 0;
-
-       if (offset > DRM_PROC_LIMIT) {
-               *eof = 1;
-               return 0;
-       }
-
-       *start = &buf[offset];
-       *eof = 0;
-       if (dev_priv->hw_status_page != NULL) {
-               DRM_PROC_PRINT("Current sequence: %d\n",
-                              i915_get_gem_seqno(dev));
-       } else {
-               DRM_PROC_PRINT("Current sequence: hws uninitialized\n");
-       }
-       DRM_PROC_PRINT("Waiter sequence:  %d\n",
-                      dev_priv->mm.waiting_gem_seqno);
-       DRM_PROC_PRINT("IRQ sequence:     %d\n", dev_priv->mm.irq_gem_seqno);
-       if (len > request + offset)
-               return request;
-       *eof = 1;
-       return len - offset;
-}
-
-
-static int i915_interrupt_info(char *buf, char **start, off_t offset,
-                              int request, int *eof, void *data)
-{
-       struct drm_minor *minor = (struct drm_minor *) data;
-       struct drm_device *dev = minor->dev;
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       int len = 0;
-
-       if (offset > DRM_PROC_LIMIT) {
-               *eof = 1;
-               return 0;
-       }
-
-       *start = &buf[offset];
-       *eof = 0;
-       DRM_PROC_PRINT("Interrupt enable:    %08x\n",
-                      I915_READ(IER));
-       DRM_PROC_PRINT("Interrupt identity:  %08x\n",
-                      I915_READ(IIR));
-       DRM_PROC_PRINT("Interrupt mask:      %08x\n",
-                      I915_READ(IMR));
-       DRM_PROC_PRINT("Pipe A stat:         %08x\n",
-                      I915_READ(PIPEASTAT));
-       DRM_PROC_PRINT("Pipe B stat:         %08x\n",
-                      I915_READ(PIPEBSTAT));
-       DRM_PROC_PRINT("Interrupts received: %d\n",
-                      atomic_read(&dev_priv->irq_received));
-       if (dev_priv->hw_status_page != NULL) {
-               DRM_PROC_PRINT("Current sequence:    %d\n",
-                              i915_get_gem_seqno(dev));
-       } else {
-               DRM_PROC_PRINT("Current sequence:    hws uninitialized\n");
-       }
-       DRM_PROC_PRINT("Waiter sequence:     %d\n",
-                      dev_priv->mm.waiting_gem_seqno);
-       DRM_PROC_PRINT("IRQ sequence:        %d\n",
-                      dev_priv->mm.irq_gem_seqno);
-       if (len > request + offset)
-               return request;
-       *eof = 1;
-       return len - offset;
-}
-
-static int i915_hws_info(char *buf, char **start, off_t offset,
-                        int request, int *eof, void *data)
-{
-       struct drm_minor *minor = (struct drm_minor *) data;
-       struct drm_device *dev = minor->dev;
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       int len = 0, i;
-       volatile u32 *hws;
-
-       if (offset > DRM_PROC_LIMIT) {
-               *eof = 1;
-               return 0;
-       }
-
-       hws = (volatile u32 *)dev_priv->hw_status_page;
-       if (hws == NULL) {
-               *eof = 1;
-               return 0;
-       }
-
-       *start = &buf[offset];
-       *eof = 0;
-       for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
-               DRM_PROC_PRINT("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
-                              i * 4,
-                              hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
-       }
-       if (len > request + offset)
-               return request;
-       *eof = 1;
-       return len - offset;
-}
-
-static struct drm_proc_list {
-       /** file name */
-       const char *name;
-       /** proc callback*/
-       int (*f) (char *, char **, off_t, int, int *, void *);
-} i915_gem_proc_list[] = {
-       {"i915_gem_active", i915_gem_active_info},
-       {"i915_gem_flushing", i915_gem_flushing_info},
-       {"i915_gem_inactive", i915_gem_inactive_info},
-       {"i915_gem_request", i915_gem_request_info},
-       {"i915_gem_seqno", i915_gem_seqno_info},
-       {"i915_gem_interrupt", i915_interrupt_info},
-       {"i915_gem_hws", i915_hws_info},
-};
-
-#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list)
-
-int i915_gem_proc_init(struct drm_minor *minor)
-{
-       struct proc_dir_entry *ent;
-       int i, j;
-
-       for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) {
-               ent = create_proc_entry(i915_gem_proc_list[i].name,
-                                       S_IFREG | S_IRUGO, minor->dev_root);
-               if (!ent) {
-                       DRM_ERROR("Cannot create /proc/dri/.../%s\n",
-                                 i915_gem_proc_list[i].name);
-                       for (j = 0; j < i; j++)
-                               remove_proc_entry(i915_gem_proc_list[i].name,
-                                                 minor->dev_root);
-                       return -1;
-               }
-               ent->read_proc = i915_gem_proc_list[i].f;
-               ent->data = minor;
-       }
-       return 0;
-}
-
-void i915_gem_proc_cleanup(struct drm_minor *minor)
-{
-       int i;
-
-       if (!minor->dev_root)
-               return;
-
-       for (i = 0; i < I915_GEM_PROC_ENTRIES; i++)
-               remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root);
-}
index 7fb4191ef934c2b9a9549a36737e075d6ee3fca7..4cce1aef438e9fc06ed357ccc0c5520b0e9fde27 100644 (file)
@@ -96,16 +96,16 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
                 */
                swizzle_x = I915_BIT_6_SWIZZLE_NONE;
                swizzle_y = I915_BIT_6_SWIZZLE_NONE;
-       } else if ((!IS_I965G(dev) && !IS_G33(dev)) || IS_I965GM(dev) ||
-                  IS_GM45(dev)) {
+       } else if (IS_MOBILE(dev)) {
                uint32_t dcc;
 
-               /* On 915-945 and GM965, channel interleave by the CPU is
-                * determined by DCC.  The CPU will alternate based on bit 6
-                * in interleaved mode, and the GPU will then also alternate
-                * on bit 6, 9, and 10 for X, but the CPU may also optionally
-                * alternate based on bit 17 (XOR not disabled and XOR
-                * bit == 17).
+               /* On mobile 9xx chipsets, channel interleave by the CPU is
+                * determined by DCC.  For single-channel, neither the CPU
+                * nor the GPU do swizzling.  For dual channel interleaved,
+                * the GPU's interleave is bit 9 and 10 for X tiled, and bit
+                * 9 for Y tiled.  The CPU's interleave is independent, and
+                * can be based on either bit 11 (haven't seen this yet) or
+                * bit 17 (common).
                 */
                dcc = I915_READ(DCC);
                switch (dcc & DCC_ADDRESSING_MODE_MASK) {
@@ -115,19 +115,18 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
                        swizzle_y = I915_BIT_6_SWIZZLE_NONE;
                        break;
                case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
-                       if (IS_I915G(dev) || IS_I915GM(dev) ||
-                           dcc & DCC_CHANNEL_XOR_DISABLE) {
+                       if (dcc & DCC_CHANNEL_XOR_DISABLE) {
+                               /* This is the base swizzling by the GPU for
+                                * tiled buffers.
+                                */
                                swizzle_x = I915_BIT_6_SWIZZLE_9_10;
                                swizzle_y = I915_BIT_6_SWIZZLE_9;
-                       } else if ((IS_I965GM(dev) || IS_GM45(dev)) &&
-                                  (dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
-                               /* GM965/GM45 does either bit 11 or bit 17
-                                * swizzling.
-                                */
+                       } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
+                               /* Bit 11 swizzling by the CPU in addition. */
                                swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
                                swizzle_y = I915_BIT_6_SWIZZLE_9_11;
                        } else {
-                               /* Bit 17 or perhaps other swizzling */
+                               /* Bit 17 swizzling by the CPU in addition. */
                                swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
                                swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
                        }
index 90600d89941375c7b3257ce8769621942366d670..377cc588f5e99d834f4e59aad3a83d7c9e749ef6 100644 (file)
 #define   DPLLB_LVDS_P2_CLOCK_DIV_7    (1 << 24) /* i915 */
 #define   DPLL_P2_CLOCK_DIV_MASK       0x03000000 /* i915 */
 #define   DPLL_FPA01_P1_POST_DIV_MASK  0x00ff0000 /* i915 */
+#define   DPLL_FPA01_P1_POST_DIV_MASK_IGD      0x00ff8000 /* IGD */
 
 #define I915_FIFO_UNDERRUN_STATUS              (1UL<<31)
 #define I915_CRC_ERROR_ENABLE                  (1UL<<29)
  */
 #define   DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS        0x003f0000
 #define   DPLL_FPA01_P1_POST_DIV_SHIFT 16
+#define   DPLL_FPA01_P1_POST_DIV_SHIFT_IGD 15
 /* i830, required in DVO non-gang */
 #define   PLL_P2_DIVIDE_BY_4           (1 << 23)
 #define   PLL_P1_DIVIDE_BY_TWO         (1 << 21) /* i830 */
 #define FPB0   0x06048
 #define FPB1   0x0604c
 #define   FP_N_DIV_MASK                0x003f0000
+#define   FP_N_IGD_DIV_MASK    0x00ff0000
 #define   FP_N_DIV_SHIFT               16
 #define   FP_M1_DIV_MASK       0x00003f00
 #define   FP_M1_DIV_SHIFT               8
 #define   FP_M2_DIV_MASK       0x0000003f
+#define   FP_M2_IGD_DIV_MASK   0x000000ff
 #define   FP_M2_DIV_SHIFT               0
 #define DPLL_TEST      0x606c
 #define   DPLLB_TEST_SDVO_DIV_1                (0 << 22)
 #define   TV_HOTPLUG_INT_EN                    (1 << 18)
 #define   CRT_HOTPLUG_INT_EN                   (1 << 9)
 #define   CRT_HOTPLUG_FORCE_DETECT             (1 << 3)
+#define CRT_HOTPLUG_ACTIVATION_PERIOD_32       (0 << 8)
+/* must use period 64 on GM45 according to docs */
+#define CRT_HOTPLUG_ACTIVATION_PERIOD_64       (1 << 8)
+#define CRT_HOTPLUG_DAC_ON_TIME_2M             (0 << 7)
+#define CRT_HOTPLUG_DAC_ON_TIME_4M             (1 << 7)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_40         (0 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_50         (1 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_60         (2 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_70         (3 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK       (3 << 5)
+#define CRT_HOTPLUG_DETECT_DELAY_1G            (0 << 4)
+#define CRT_HOTPLUG_DETECT_DELAY_2G            (1 << 4)
+#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV       (0 << 2)
+#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV       (1 << 2)
+#define CRT_HOTPLUG_MASK                       (0x3fc) /* Bits 9-2 */
+
 
 #define PORT_HOTPLUG_STAT      0x61114
 #define   HDMIB_HOTPLUG_INT_STATUS             (1 << 29)
  */
 # define TV_ENC_C0_FIX                 (1 << 10)
 /** Bits that must be preserved by software */
-# define TV_CTL_SAVE                   ((3 << 8) | (3 << 6))
+# define TV_CTL_SAVE                   ((1 << 11) | (3 << 9) | (7 << 6) | 0xf)
 # define TV_FUSE_STATE_MASK            (3 << 4)
 /** Read-only state that reports all features enabled */
 # define TV_FUSE_STATE_ENABLED         (0 << 4)
index 5ea715ace3a0030f3d3c4ddcc24d79ee45df9f9a..de621aad85b56c9789f1149d6b3806a74c7f8799 100644 (file)
@@ -162,13 +162,13 @@ struct bdb_lvds_options {
        u8 panel_type;
        u8 rsvd1;
        /* LVDS capabilities, stored in a dword */
-       u8 rsvd2:1;
-       u8 lvds_edid:1;
-       u8 pixel_dither:1;
-       u8 pfit_ratio_auto:1;
-       u8 pfit_gfx_mode_enhanced:1;
-       u8 pfit_text_mode_enhanced:1;
        u8 pfit_mode:2;
+       u8 pfit_text_mode_enhanced:1;
+       u8 pfit_gfx_mode_enhanced:1;
+       u8 pfit_ratio_auto:1;
+       u8 pixel_dither:1;
+       u8 lvds_edid:1;
+       u8 rsvd2:1;
        u8 rsvd4;
 } __attribute__((packed));
 
index dcaed3466e835efb1be64b6ece3189e9c38f508c..2b6d44381c310874926eb9f2e8d55b5b6e91fad8 100644 (file)
@@ -64,11 +64,21 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
 static int intel_crt_mode_valid(struct drm_connector *connector,
                                struct drm_display_mode *mode)
 {
+       struct drm_device *dev = connector->dev;
+
+       int max_clock = 0;
        if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
                return MODE_NO_DBLESCAN;
 
-       if (mode->clock > 400000 || mode->clock < 25000)
-               return MODE_CLOCK_RANGE;
+       if (mode->clock < 25000)
+               return MODE_CLOCK_LOW;
+
+       if (!IS_I9XX(dev))
+               max_clock = 350000;
+       else
+               max_clock = 400000;
+       if (mode->clock > max_clock)
+               return MODE_CLOCK_HIGH;
 
        return MODE_OK;
 }
@@ -113,10 +123,13 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
        if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
                adpa |= ADPA_VSYNC_ACTIVE_HIGH;
 
-       if (intel_crtc->pipe == 0)
+       if (intel_crtc->pipe == 0) {
                adpa |= ADPA_PIPE_A_SELECT;
-       else
+               I915_WRITE(BCLRPAT_A, 0);
+       } else {
                adpa |= ADPA_PIPE_B_SELECT;
+               I915_WRITE(BCLRPAT_B, 0);
+       }
 
        I915_WRITE(ADPA, adpa);
 }
@@ -133,20 +146,39 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 temp;
-
-       unsigned long timeout = jiffies + msecs_to_jiffies(1000);
-
-       temp = I915_READ(PORT_HOTPLUG_EN);
-
-       I915_WRITE(PORT_HOTPLUG_EN,
-                  temp | CRT_HOTPLUG_FORCE_DETECT | (1 << 5));
+       u32 hotplug_en;
+       int i, tries = 0;
+       /*
+        * On 4 series desktop, CRT detect sequence need to be done twice
+        * to get a reliable result.
+        */
 
-       do {
-               if (!(I915_READ(PORT_HOTPLUG_EN) & CRT_HOTPLUG_FORCE_DETECT))
-                       break;
-               msleep(1);
-       } while (time_after(timeout, jiffies));
+       if (IS_G4X(dev) && !IS_GM45(dev))
+               tries = 2;
+       else
+               tries = 1;
+       hotplug_en = I915_READ(PORT_HOTPLUG_EN);
+       hotplug_en &= ~(CRT_HOTPLUG_MASK);
+       hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
+
+       if (IS_GM45(dev))
+               hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
+
+       hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+
+       for (i = 0; i < tries ; i++) {
+               unsigned long timeout;
+               /* turn on the FORCE_DETECT */
+               I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+               timeout = jiffies + msecs_to_jiffies(1000);
+               /* wait for FORCE_DETECT to go off */
+               do {
+                       if (!(I915_READ(PORT_HOTPLUG_EN) &
+                                       CRT_HOTPLUG_FORCE_DETECT))
+                               break;
+                       msleep(1);
+               } while (time_after(timeout, jiffies));
+       }
 
        if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) ==
            CRT_HOTPLUG_MONITOR_COLOR)
index a2834276cb38637949648dad2ae2b59de8995fbf..d9c50ff94d7633d2bb6d166e73793bad1eb6691b 100644 (file)
@@ -56,11 +56,13 @@ typedef struct {
 } intel_p2_t;
 
 #define INTEL_P2_NUM                 2
-
-typedef struct {
+typedef struct intel_limit intel_limit_t;
+struct intel_limit {
     intel_range_t   dot, vco, n, m, m1, m2, p, p1;
     intel_p2_t     p2;
-} intel_limit_t;
+    bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
+                     int, int, intel_clock_t *);
+};
 
 #define I8XX_DOT_MIN             25000
 #define I8XX_DOT_MAX            350000
@@ -90,18 +92,32 @@ typedef struct {
 #define I9XX_DOT_MAX            400000
 #define I9XX_VCO_MIN           1400000
 #define I9XX_VCO_MAX           2800000
+#define IGD_VCO_MIN            1700000
+#define IGD_VCO_MAX            3500000
 #define I9XX_N_MIN                   1
 #define I9XX_N_MAX                   6
+/* IGD's Ncounter is a ring counter */
+#define IGD_N_MIN                    3
+#define IGD_N_MAX                    6
 #define I9XX_M_MIN                  70
 #define I9XX_M_MAX                 120
+#define IGD_M_MIN                    2
+#define IGD_M_MAX                  256
 #define I9XX_M1_MIN                 10
 #define I9XX_M1_MAX                 22
 #define I9XX_M2_MIN                  5
 #define I9XX_M2_MAX                  9
+/* IGD M1 is reserved, and must be 0 */
+#define IGD_M1_MIN                   0
+#define IGD_M1_MAX                   0
+#define IGD_M2_MIN                   0
+#define IGD_M2_MAX                   254
 #define I9XX_P_SDVO_DAC_MIN          5
 #define I9XX_P_SDVO_DAC_MAX         80
 #define I9XX_P_LVDS_MIN                      7
 #define I9XX_P_LVDS_MAX                     98
+#define IGD_P_LVDS_MIN               7
+#define IGD_P_LVDS_MAX              112
 #define I9XX_P1_MIN                  1
 #define I9XX_P1_MAX                  8
 #define I9XX_P2_SDVO_DAC_SLOW               10
@@ -115,6 +131,97 @@ typedef struct {
 #define INTEL_LIMIT_I8XX_LVDS      1
 #define INTEL_LIMIT_I9XX_SDVO_DAC   2
 #define INTEL_LIMIT_I9XX_LVDS      3
+#define INTEL_LIMIT_G4X_SDVO       4
+#define INTEL_LIMIT_G4X_HDMI_DAC   5
+#define INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS   6
+#define INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS   7
+#define INTEL_LIMIT_IGD_SDVO_DAC    8
+#define INTEL_LIMIT_IGD_LVDS       9
+
+/*The parameter is for SDVO on G4x platform*/
+#define G4X_DOT_SDVO_MIN           25000
+#define G4X_DOT_SDVO_MAX           270000
+#define G4X_VCO_MIN                1750000
+#define G4X_VCO_MAX                3500000
+#define G4X_N_SDVO_MIN             1
+#define G4X_N_SDVO_MAX             4
+#define G4X_M_SDVO_MIN             104
+#define G4X_M_SDVO_MAX             138
+#define G4X_M1_SDVO_MIN            17
+#define G4X_M1_SDVO_MAX            23
+#define G4X_M2_SDVO_MIN            5
+#define G4X_M2_SDVO_MAX            11
+#define G4X_P_SDVO_MIN             10
+#define G4X_P_SDVO_MAX             30
+#define G4X_P1_SDVO_MIN            1
+#define G4X_P1_SDVO_MAX            3
+#define G4X_P2_SDVO_SLOW           10
+#define G4X_P2_SDVO_FAST           10
+#define G4X_P2_SDVO_LIMIT          270000
+
+/*The parameter is for HDMI_DAC on G4x platform*/
+#define G4X_DOT_HDMI_DAC_MIN           22000
+#define G4X_DOT_HDMI_DAC_MAX           400000
+#define G4X_N_HDMI_DAC_MIN             1
+#define G4X_N_HDMI_DAC_MAX             4
+#define G4X_M_HDMI_DAC_MIN             104
+#define G4X_M_HDMI_DAC_MAX             138
+#define G4X_M1_HDMI_DAC_MIN            16
+#define G4X_M1_HDMI_DAC_MAX            23
+#define G4X_M2_HDMI_DAC_MIN            5
+#define G4X_M2_HDMI_DAC_MAX            11
+#define G4X_P_HDMI_DAC_MIN             5
+#define G4X_P_HDMI_DAC_MAX             80
+#define G4X_P1_HDMI_DAC_MIN            1
+#define G4X_P1_HDMI_DAC_MAX            8
+#define G4X_P2_HDMI_DAC_SLOW           10
+#define G4X_P2_HDMI_DAC_FAST           5
+#define G4X_P2_HDMI_DAC_LIMIT          165000
+
+/*The parameter is for SINGLE_CHANNEL_LVDS on G4x platform*/
+#define G4X_DOT_SINGLE_CHANNEL_LVDS_MIN           20000
+#define G4X_DOT_SINGLE_CHANNEL_LVDS_MAX           115000
+#define G4X_N_SINGLE_CHANNEL_LVDS_MIN             1
+#define G4X_N_SINGLE_CHANNEL_LVDS_MAX             3
+#define G4X_M_SINGLE_CHANNEL_LVDS_MIN             104
+#define G4X_M_SINGLE_CHANNEL_LVDS_MAX             138
+#define G4X_M1_SINGLE_CHANNEL_LVDS_MIN            17
+#define G4X_M1_SINGLE_CHANNEL_LVDS_MAX            23
+#define G4X_M2_SINGLE_CHANNEL_LVDS_MIN            5
+#define G4X_M2_SINGLE_CHANNEL_LVDS_MAX            11
+#define G4X_P_SINGLE_CHANNEL_LVDS_MIN             28
+#define G4X_P_SINGLE_CHANNEL_LVDS_MAX             112
+#define G4X_P1_SINGLE_CHANNEL_LVDS_MIN            2
+#define G4X_P1_SINGLE_CHANNEL_LVDS_MAX            8
+#define G4X_P2_SINGLE_CHANNEL_LVDS_SLOW           14
+#define G4X_P2_SINGLE_CHANNEL_LVDS_FAST           14
+#define G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT          0
+
+/*The parameter is for DUAL_CHANNEL_LVDS on G4x platform*/
+#define G4X_DOT_DUAL_CHANNEL_LVDS_MIN           80000
+#define G4X_DOT_DUAL_CHANNEL_LVDS_MAX           224000
+#define G4X_N_DUAL_CHANNEL_LVDS_MIN             1
+#define G4X_N_DUAL_CHANNEL_LVDS_MAX             3
+#define G4X_M_DUAL_CHANNEL_LVDS_MIN             104
+#define G4X_M_DUAL_CHANNEL_LVDS_MAX             138
+#define G4X_M1_DUAL_CHANNEL_LVDS_MIN            17
+#define G4X_M1_DUAL_CHANNEL_LVDS_MAX            23
+#define G4X_M2_DUAL_CHANNEL_LVDS_MIN            5
+#define G4X_M2_DUAL_CHANNEL_LVDS_MAX            11
+#define G4X_P_DUAL_CHANNEL_LVDS_MIN             14
+#define G4X_P_DUAL_CHANNEL_LVDS_MAX             42
+#define G4X_P1_DUAL_CHANNEL_LVDS_MIN            2
+#define G4X_P1_DUAL_CHANNEL_LVDS_MAX            6
+#define G4X_P2_DUAL_CHANNEL_LVDS_SLOW           7
+#define G4X_P2_DUAL_CHANNEL_LVDS_FAST           7
+#define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT          0
+
+static bool
+intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+                   int target, int refclk, intel_clock_t *best_clock);
+static bool
+intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+                       int target, int refclk, intel_clock_t *best_clock);
 
 static const intel_limit_t intel_limits[] = {
     { /* INTEL_LIMIT_I8XX_DVO_DAC */
@@ -128,6 +235,7 @@ static const intel_limit_t intel_limits[] = {
         .p1  = { .min = I8XX_P1_MIN,           .max = I8XX_P1_MAX },
        .p2  = { .dot_limit = I8XX_P2_SLOW_LIMIT,
                 .p2_slow = I8XX_P2_SLOW,       .p2_fast = I8XX_P2_FAST },
+       .find_pll = intel_find_best_PLL,
     },
     { /* INTEL_LIMIT_I8XX_LVDS */
         .dot = { .min = I8XX_DOT_MIN,          .max = I8XX_DOT_MAX },
@@ -140,6 +248,7 @@ static const intel_limit_t intel_limits[] = {
         .p1  = { .min = I8XX_P1_LVDS_MIN,      .max = I8XX_P1_LVDS_MAX },
        .p2  = { .dot_limit = I8XX_P2_SLOW_LIMIT,
                 .p2_slow = I8XX_P2_LVDS_SLOW,  .p2_fast = I8XX_P2_LVDS_FAST },
+       .find_pll = intel_find_best_PLL,
     },
     { /* INTEL_LIMIT_I9XX_SDVO_DAC */
         .dot = { .min = I9XX_DOT_MIN,          .max = I9XX_DOT_MAX },
@@ -152,6 +261,7 @@ static const intel_limit_t intel_limits[] = {
         .p1  = { .min = I9XX_P1_MIN,           .max = I9XX_P1_MAX },
        .p2  = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
                 .p2_slow = I9XX_P2_SDVO_DAC_SLOW,      .p2_fast = I9XX_P2_SDVO_DAC_FAST },
+       .find_pll = intel_find_best_PLL,
     },
     { /* INTEL_LIMIT_I9XX_LVDS */
         .dot = { .min = I9XX_DOT_MIN,          .max = I9XX_DOT_MAX },
@@ -167,19 +277,157 @@ static const intel_limit_t intel_limits[] = {
         */
        .p2  = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
                 .p2_slow = I9XX_P2_LVDS_SLOW,  .p2_fast = I9XX_P2_LVDS_FAST },
+       .find_pll = intel_find_best_PLL,
+    },
+    /* below parameter and function is for G4X Chipset Family*/
+    { /* INTEL_LIMIT_G4X_SDVO */
+       .dot = { .min = G4X_DOT_SDVO_MIN,       .max = G4X_DOT_SDVO_MAX },
+       .vco = { .min = G4X_VCO_MIN,            .max = G4X_VCO_MAX},
+       .n   = { .min = G4X_N_SDVO_MIN,         .max = G4X_N_SDVO_MAX },
+       .m   = { .min = G4X_M_SDVO_MIN,         .max = G4X_M_SDVO_MAX },
+       .m1  = { .min = G4X_M1_SDVO_MIN,        .max = G4X_M1_SDVO_MAX },
+       .m2  = { .min = G4X_M2_SDVO_MIN,        .max = G4X_M2_SDVO_MAX },
+       .p   = { .min = G4X_P_SDVO_MIN,         .max = G4X_P_SDVO_MAX },
+       .p1  = { .min = G4X_P1_SDVO_MIN,        .max = G4X_P1_SDVO_MAX},
+       .p2  = { .dot_limit = G4X_P2_SDVO_LIMIT,
+                .p2_slow = G4X_P2_SDVO_SLOW,
+                .p2_fast = G4X_P2_SDVO_FAST
+       },
+       .find_pll = intel_g4x_find_best_PLL,
+    },
+    { /* INTEL_LIMIT_G4X_HDMI_DAC */
+       .dot = { .min = G4X_DOT_HDMI_DAC_MIN,   .max = G4X_DOT_HDMI_DAC_MAX },
+       .vco = { .min = G4X_VCO_MIN,            .max = G4X_VCO_MAX},
+       .n   = { .min = G4X_N_HDMI_DAC_MIN,     .max = G4X_N_HDMI_DAC_MAX },
+       .m   = { .min = G4X_M_HDMI_DAC_MIN,     .max = G4X_M_HDMI_DAC_MAX },
+       .m1  = { .min = G4X_M1_HDMI_DAC_MIN,    .max = G4X_M1_HDMI_DAC_MAX },
+       .m2  = { .min = G4X_M2_HDMI_DAC_MIN,    .max = G4X_M2_HDMI_DAC_MAX },
+       .p   = { .min = G4X_P_HDMI_DAC_MIN,     .max = G4X_P_HDMI_DAC_MAX },
+       .p1  = { .min = G4X_P1_HDMI_DAC_MIN,    .max = G4X_P1_HDMI_DAC_MAX},
+       .p2  = { .dot_limit = G4X_P2_HDMI_DAC_LIMIT,
+                .p2_slow = G4X_P2_HDMI_DAC_SLOW,
+                .p2_fast = G4X_P2_HDMI_DAC_FAST
+       },
+       .find_pll = intel_g4x_find_best_PLL,
+    },
+    { /* INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS */
+       .dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN,
+                .max = G4X_DOT_SINGLE_CHANNEL_LVDS_MAX },
+       .vco = { .min = G4X_VCO_MIN,
+                .max = G4X_VCO_MAX },
+       .n   = { .min = G4X_N_SINGLE_CHANNEL_LVDS_MIN,
+                .max = G4X_N_SINGLE_CHANNEL_LVDS_MAX },
+       .m   = { .min = G4X_M_SINGLE_CHANNEL_LVDS_MIN,
+                .max = G4X_M_SINGLE_CHANNEL_LVDS_MAX },
+       .m1  = { .min = G4X_M1_SINGLE_CHANNEL_LVDS_MIN,
+                .max = G4X_M1_SINGLE_CHANNEL_LVDS_MAX },
+       .m2  = { .min = G4X_M2_SINGLE_CHANNEL_LVDS_MIN,
+                .max = G4X_M2_SINGLE_CHANNEL_LVDS_MAX },
+       .p   = { .min = G4X_P_SINGLE_CHANNEL_LVDS_MIN,
+                .max = G4X_P_SINGLE_CHANNEL_LVDS_MAX },
+       .p1  = { .min = G4X_P1_SINGLE_CHANNEL_LVDS_MIN,
+                .max = G4X_P1_SINGLE_CHANNEL_LVDS_MAX },
+       .p2  = { .dot_limit = G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT,
+                .p2_slow = G4X_P2_SINGLE_CHANNEL_LVDS_SLOW,
+                .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST
+       },
+       .find_pll = intel_g4x_find_best_PLL,
+    },
+    { /* INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS */
+       .dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN,
+                .max = G4X_DOT_DUAL_CHANNEL_LVDS_MAX },
+       .vco = { .min = G4X_VCO_MIN,
+                .max = G4X_VCO_MAX },
+       .n   = { .min = G4X_N_DUAL_CHANNEL_LVDS_MIN,
+                .max = G4X_N_DUAL_CHANNEL_LVDS_MAX },
+       .m   = { .min = G4X_M_DUAL_CHANNEL_LVDS_MIN,
+                .max = G4X_M_DUAL_CHANNEL_LVDS_MAX },
+       .m1  = { .min = G4X_M1_DUAL_CHANNEL_LVDS_MIN,
+                .max = G4X_M1_DUAL_CHANNEL_LVDS_MAX },
+       .m2  = { .min = G4X_M2_DUAL_CHANNEL_LVDS_MIN,
+                .max = G4X_M2_DUAL_CHANNEL_LVDS_MAX },
+       .p   = { .min = G4X_P_DUAL_CHANNEL_LVDS_MIN,
+                .max = G4X_P_DUAL_CHANNEL_LVDS_MAX },
+       .p1  = { .min = G4X_P1_DUAL_CHANNEL_LVDS_MIN,
+                .max = G4X_P1_DUAL_CHANNEL_LVDS_MAX },
+       .p2  = { .dot_limit = G4X_P2_DUAL_CHANNEL_LVDS_LIMIT,
+                .p2_slow = G4X_P2_DUAL_CHANNEL_LVDS_SLOW,
+                .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST
+       },
+       .find_pll = intel_g4x_find_best_PLL,
+    },
+    { /* INTEL_LIMIT_IGD_SDVO */
+        .dot = { .min = I9XX_DOT_MIN,          .max = I9XX_DOT_MAX},
+        .vco = { .min = IGD_VCO_MIN,           .max = IGD_VCO_MAX },
+        .n   = { .min = IGD_N_MIN,             .max = IGD_N_MAX },
+        .m   = { .min = IGD_M_MIN,             .max = IGD_M_MAX },
+        .m1  = { .min = IGD_M1_MIN,            .max = IGD_M1_MAX },
+        .m2  = { .min = IGD_M2_MIN,            .max = IGD_M2_MAX },
+        .p   = { .min = I9XX_P_SDVO_DAC_MIN,    .max = I9XX_P_SDVO_DAC_MAX },
+        .p1  = { .min = I9XX_P1_MIN,           .max = I9XX_P1_MAX },
+       .p2  = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
+                .p2_slow = I9XX_P2_SDVO_DAC_SLOW,      .p2_fast = I9XX_P2_SDVO_DAC_FAST },
     },
+    { /* INTEL_LIMIT_IGD_LVDS */
+        .dot = { .min = I9XX_DOT_MIN,          .max = I9XX_DOT_MAX },
+        .vco = { .min = IGD_VCO_MIN,           .max = IGD_VCO_MAX },
+        .n   = { .min = IGD_N_MIN,             .max = IGD_N_MAX },
+        .m   = { .min = IGD_M_MIN,             .max = IGD_M_MAX },
+        .m1  = { .min = IGD_M1_MIN,            .max = IGD_M1_MAX },
+        .m2  = { .min = IGD_M2_MIN,            .max = IGD_M2_MAX },
+        .p   = { .min = IGD_P_LVDS_MIN,        .max = IGD_P_LVDS_MAX },
+        .p1  = { .min = I9XX_P1_MIN,           .max = I9XX_P1_MAX },
+       /* IGD only supports single-channel mode. */
+       .p2  = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
+                .p2_slow = I9XX_P2_LVDS_SLOW,  .p2_fast = I9XX_P2_LVDS_SLOW },
+    },
+
 };
 
+static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       const intel_limit_t *limit;
+
+       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+               if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+                   LVDS_CLKB_POWER_UP)
+                       /* LVDS with dual channel */
+                       limit = &intel_limits
+                                       [INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS];
+               else
+                       /* LVDS with dual channel */
+                       limit = &intel_limits
+                                       [INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS];
+       } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
+                  intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
+               limit = &intel_limits[INTEL_LIMIT_G4X_HDMI_DAC];
+       } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
+               limit = &intel_limits[INTEL_LIMIT_G4X_SDVO];
+       } else /* The option is for other outputs */
+               limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
+
+       return limit;
+}
+
 static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
        const intel_limit_t *limit;
 
-       if (IS_I9XX(dev)) {
+       if (IS_G4X(dev)) {
+               limit = intel_g4x_limit(crtc);
+       } else if (IS_I9XX(dev) && !IS_IGD(dev)) {
                if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
                        limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS];
                else
                        limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
+       } else if (IS_IGD(dev)) {
+               if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+                       limit = &intel_limits[INTEL_LIMIT_IGD_LVDS];
+               else
+                       limit = &intel_limits[INTEL_LIMIT_IGD_SDVO_DAC];
        } else {
                if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
                        limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS];
@@ -189,8 +437,21 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
        return limit;
 }
 
-static void intel_clock(int refclk, intel_clock_t *clock)
+/* m1 is reserved as 0 in IGD, n is a ring counter */
+static void igd_clock(int refclk, intel_clock_t *clock)
 {
+       clock->m = clock->m2 + 2;
+       clock->p = clock->p1 * clock->p2;
+       clock->vco = refclk * clock->m / clock->n;
+       clock->dot = clock->vco / clock->p;
+}
+
+static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
+{
+       if (IS_IGD(dev)) {
+               igd_clock(refclk, clock);
+               return;
+       }
        clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
        clock->p = clock->p1 * clock->p2;
        clock->vco = refclk * clock->m / (clock->n + 2);
@@ -226,6 +487,7 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
 static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
 {
        const intel_limit_t *limit = intel_limit (crtc);
+       struct drm_device *dev = crtc->dev;
 
        if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
                INTELPllInvalid ("p1 out of range\n");
@@ -235,7 +497,7 @@ static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
                INTELPllInvalid ("m2 out of range\n");
        if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
                INTELPllInvalid ("m1 out of range\n");
-       if (clock->m1 <= clock->m2)
+       if (clock->m1 <= clock->m2 && !IS_IGD(dev))
                INTELPllInvalid ("m1 <= m2\n");
        if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
                INTELPllInvalid ("m out of range\n");
@@ -252,18 +514,14 @@ static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
        return true;
 }
 
-/**
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE.  The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- */
-static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
-                               int refclk, intel_clock_t *best_clock)
+static bool
+intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+                   int target, int refclk, intel_clock_t *best_clock)
+
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        intel_clock_t clock;
-       const intel_limit_t *limit = intel_limit(crtc);
        int err = target;
 
        if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
@@ -289,15 +547,17 @@ static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
        memset (best_clock, 0, sizeof (*best_clock));
 
        for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
-               for (clock.m2 = limit->m2.min; clock.m2 < clock.m1 &&
-                            clock.m2 <= limit->m2.max; clock.m2++) {
+               for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) {
+                       /* m1 is always 0 in IGD */
+                       if (clock.m2 >= clock.m1 && !IS_IGD(dev))
+                               break;
                        for (clock.n = limit->n.min; clock.n <= limit->n.max;
                             clock.n++) {
                                for (clock.p1 = limit->p1.min;
                                     clock.p1 <= limit->p1.max; clock.p1++) {
                                        int this_err;
 
-                                       intel_clock(refclk, &clock);
+                                       intel_clock(dev, refclk, &clock);
 
                                        if (!intel_PLL_is_valid(crtc, &clock))
                                                continue;
@@ -315,6 +575,63 @@ static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
        return (err != target);
 }
 
+static bool
+intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+                       int target, int refclk, intel_clock_t *best_clock)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       intel_clock_t clock;
+       int max_n;
+       bool found;
+       /* approximately equals target * 0.00488 */
+       int err_most = (target >> 8) + (target >> 10);
+       found = false;
+
+       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+               if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+                   LVDS_CLKB_POWER_UP)
+                       clock.p2 = limit->p2.p2_fast;
+               else
+                       clock.p2 = limit->p2.p2_slow;
+       } else {
+               if (target < limit->p2.dot_limit)
+                       clock.p2 = limit->p2.p2_slow;
+               else
+                       clock.p2 = limit->p2.p2_fast;
+       }
+
+       memset(best_clock, 0, sizeof(*best_clock));
+       max_n = limit->n.max;
+       /* based on hardware requriment prefer smaller n to precision */
+       for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
+               /* based on hardware requirment prefere larger m1,m2, p1 */
+               for (clock.m1 = limit->m1.max;
+                    clock.m1 >= limit->m1.min; clock.m1--) {
+                       for (clock.m2 = limit->m2.max;
+                            clock.m2 >= limit->m2.min; clock.m2--) {
+                               for (clock.p1 = limit->p1.max;
+                                    clock.p1 >= limit->p1.min; clock.p1--) {
+                                       int this_err;
+
+                                       intel_clock(dev, refclk, &clock);
+                                       if (!intel_PLL_is_valid(crtc, &clock))
+                                               continue;
+                                       this_err = abs(clock.dot - target) ;
+                                       if (this_err < err_most) {
+                                               *best_clock = clock;
+                                               err_most = this_err;
+                                               max_n = clock.n;
+                                               found = true;
+                                       }
+                               }
+                       }
+               }
+       }
+
+       return found;
+}
+
 void
 intel_wait_for_vblank(struct drm_device *dev)
 {
@@ -634,7 +951,7 @@ static int intel_get_core_clock_speed(struct drm_device *dev)
                return 400000;
        else if (IS_I915G(dev))
                return 333000;
-       else if (IS_I945GM(dev) || IS_845G(dev))
+       else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev))
                return 200000;
        else if (IS_I915GM(dev)) {
                u16 gcfgc = 0;
@@ -733,6 +1050,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
        bool is_crt = false, is_lvds = false, is_tv = false;
        struct drm_mode_config *mode_config = &dev->mode_config;
        struct drm_connector *connector;
+       const intel_limit_t *limit;
        int ret;
 
        drm_vblank_pre_modeset(dev, pipe);
@@ -776,13 +1094,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                refclk = 48000;
        }
 
-       ok = intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, &clock);
+       /*
+        * Returns a set of divisors for the desired target clock with the given
+        * refclk, or FALSE.  The returned values represent the clock equation:
+        * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+        */
+       limit = intel_limit(crtc);
+       ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
        if (!ok) {
                DRM_ERROR("Couldn't find PLL settings for mode!\n");
                return -EINVAL;
        }
 
-       fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
+       if (IS_IGD(dev))
+               fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
+       else
+               fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
 
        dpll = DPLL_VGA_MODE_DIS;
        if (IS_I9XX(dev)) {
@@ -799,7 +1126,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                }
 
                /* compute bitmask from p1 value */
-               dpll |= (1 << (clock.p1 - 1)) << 16;
+               if (IS_IGD(dev))
+                       dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_IGD;
+               else
+                       dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
                switch (clock.p2) {
                case 5:
                        dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
@@ -1279,10 +1609,20 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
                fp = I915_READ((pipe == 0) ? FPA1 : FPB1);
 
        clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
-       clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
-       clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
+       if (IS_IGD(dev)) {
+               clock.n = ffs((fp & FP_N_IGD_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
+               clock.m2 = (fp & FP_M2_IGD_DIV_MASK) >> FP_M2_DIV_SHIFT;
+       } else {
+               clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
+               clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
+       }
+
        if (IS_I9XX(dev)) {
-               clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
+               if (IS_IGD(dev))
+                       clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_IGD) >>
+                               DPLL_FPA01_P1_POST_DIV_SHIFT_IGD);
+               else
+                       clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
                               DPLL_FPA01_P1_POST_DIV_SHIFT);
 
                switch (dpll & DPLL_MODE_MASK) {
@@ -1301,7 +1641,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
                }
 
                /* XXX: Handle the 100Mhz refclk */
-               intel_clock(96000, &clock);
+               intel_clock(dev, 96000, &clock);
        } else {
                bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
 
@@ -1313,9 +1653,9 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
                        if ((dpll & PLL_REF_INPUT_MASK) ==
                            PLLB_REF_INPUT_SPREADSPECTRUMIN) {
                                /* XXX: might not be 66MHz */
-                               intel_clock(66000, &clock);
+                               intel_clock(dev, 66000, &clock);
                        } else
-                               intel_clock(48000, &clock);
+                               intel_clock(dev, 48000, &clock);
                } else {
                        if (dpll & PLL_P1_DIVIDE_BY_TWO)
                                clock.p1 = 2;
@@ -1328,7 +1668,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
                        else
                                clock.p2 = 2;
 
-                       intel_clock(48000, &clock);
+                       intel_clock(dev, 48000, &clock);
                }
        }
 
@@ -1474,13 +1814,21 @@ static void intel_setup_outputs(struct drm_device *dev)
 
        if (IS_I9XX(dev)) {
                int found;
+               u32 reg;
 
                if (I915_READ(SDVOB) & SDVO_DETECTED) {
                        found = intel_sdvo_init(dev, SDVOB);
                        if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
                                intel_hdmi_init(dev, SDVOB);
                }
-               if (!IS_G4X(dev) || (I915_READ(SDVOB) & SDVO_DETECTED)) {
+
+               /* Before G4X SDVOC doesn't have its own detect register */
+               if (IS_G4X(dev))
+                       reg = SDVOC;
+               else
+                       reg = SDVOB;
+
+               if (I915_READ(reg) & SDVO_DETECTED) {
                        found = intel_sdvo_init(dev, SDVOC);
                        if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
                                intel_hdmi_init(dev, SDVOC);
index 0d211af98854c18debd6af4f1ea6a246af16871d..6619f26e46a576eb643defb81673aea2c2d88d72 100644 (file)
@@ -265,7 +265,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
                pfit_control = 0;
 
        if (!IS_I965G(dev)) {
-               if (dev_priv->panel_wants_dither)
+               if (dev_priv->panel_wants_dither || dev_priv->lvds_dither)
                        pfit_control |= PANEL_8TO6_DITHER_ENABLE;
        }
        else
index 56485d67369b8dcf913b28b17afa9bb3bd3b3de8..ceca9471a75a2df08780e395c024d0a84fe587a4 100644 (file)
@@ -217,8 +217,8 @@ static const u32 filter_table[] = {
  */
 static const struct color_conversion ntsc_m_csc_composite = {
        .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
-       .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00,
-       .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00,
+       .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
+       .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
 };
 
 static const struct video_levels ntsc_m_levels_composite = {
@@ -226,9 +226,9 @@ static const struct video_levels ntsc_m_levels_composite = {
 };
 
 static const struct color_conversion ntsc_m_csc_svideo = {
-       .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134,
-       .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00,
-       .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00,
+       .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
+       .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
+       .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
 };
 
 static const struct video_levels ntsc_m_levels_svideo = {
@@ -237,8 +237,8 @@ static const struct video_levels ntsc_m_levels_svideo = {
 
 static const struct color_conversion ntsc_j_csc_composite = {
        .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0119,
-       .ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0f00,
-       .rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0f00,
+       .ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0200,
+       .rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0200,
 };
 
 static const struct video_levels ntsc_j_levels_composite = {
@@ -247,8 +247,8 @@ static const struct video_levels ntsc_j_levels_composite = {
 
 static const struct color_conversion ntsc_j_csc_svideo = {
        .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x014c,
-       .ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0f00,
-       .rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0f00,
+       .ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0200,
+       .rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0200,
 };
 
 static const struct video_levels ntsc_j_levels_svideo = {
@@ -257,8 +257,8 @@ static const struct video_levels ntsc_j_levels_svideo = {
 
 static const struct color_conversion pal_csc_composite = {
        .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0113,
-       .ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0f00,
-       .rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0f00,
+       .ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0200,
+       .rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0200,
 };
 
 static const struct video_levels pal_levels_composite = {
@@ -267,8 +267,8 @@ static const struct video_levels pal_levels_composite = {
 
 static const struct color_conversion pal_csc_svideo = {
        .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145,
-       .ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0f00,
-       .rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0f00,
+       .ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0200,
+       .rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0200,
 };
 
 static const struct video_levels pal_levels_svideo = {
@@ -277,8 +277,8 @@ static const struct video_levels pal_levels_svideo = {
 
 static const struct color_conversion pal_m_csc_composite = {
        .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
-       .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00,
-       .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00,
+       .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
+       .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
 };
 
 static const struct video_levels pal_m_levels_composite = {
@@ -286,9 +286,9 @@ static const struct video_levels pal_m_levels_composite = {
 };
 
 static const struct color_conversion pal_m_csc_svideo = {
-       .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134,
-       .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00,
-       .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00,
+       .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
+       .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
+       .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
 };
 
 static const struct video_levels pal_m_levels_svideo = {
@@ -297,8 +297,8 @@ static const struct video_levels pal_m_levels_svideo = {
 
 static const struct color_conversion pal_n_csc_composite = {
        .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
-       .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00,
-       .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00,
+       .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
+       .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
 };
 
 static const struct video_levels pal_n_levels_composite = {
@@ -306,9 +306,9 @@ static const struct video_levels pal_n_levels_composite = {
 };
 
 static const struct color_conversion pal_n_csc_svideo = {
-       .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134,
-       .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00,
-       .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00,
+       .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
+       .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
+       .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
 };
 
 static const struct video_levels pal_n_levels_svideo = {
@@ -319,9 +319,9 @@ static const struct video_levels pal_n_levels_svideo = {
  * Component connections
  */
 static const struct color_conversion sdtv_csc_yprpb = {
-       .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0146,
-       .ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0f00,
-       .rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0f00,
+       .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145,
+       .ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0200,
+       .rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0200,
 };
 
 static const struct color_conversion sdtv_csc_rgb = {
@@ -331,9 +331,9 @@ static const struct color_conversion sdtv_csc_rgb = {
 };
 
 static const struct color_conversion hdtv_csc_yprpb = {
-       .ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0146,
-       .ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0f00,
-       .rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0f00,
+       .ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0145,
+       .ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0200,
+       .rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0200,
 };
 
 static const struct color_conversion hdtv_csc_rgb = {
@@ -414,7 +414,7 @@ struct tv_mode {
 static const struct tv_mode tv_modes[] = {
        {
                .name           = "NTSC-M",
-               .clock          = 107520,
+               .clock          = 108000,
                .refresh        = 29970,
                .oversample     = TV_OVERSAMPLE_8X,
                .component_only = 0,
@@ -442,8 +442,8 @@ static const struct tv_mode tv_modes[] = {
                .vburst_start_f4 = 10,              .vburst_end_f4      = 240,
 
                /* desired 3.5800000 actual 3.5800000 clock 107.52 */
-               .dda1_inc       =    136,
-               .dda2_inc       =   7624,           .dda2_size          =  20013,
+               .dda1_inc       =    135,
+               .dda2_inc       =  20800,           .dda2_size          =  27456,
                .dda3_inc       =      0,           .dda3_size          =      0,
                .sc_reset       = TV_SC_RESET_EVERY_4,
                .pal_burst      = false,
@@ -457,7 +457,7 @@ static const struct tv_mode tv_modes[] = {
        },
        {
                .name           = "NTSC-443",
-               .clock          = 107520,
+               .clock          = 108000,
                .refresh        = 29970,
                .oversample     = TV_OVERSAMPLE_8X,
                .component_only = 0,
@@ -485,10 +485,10 @@ static const struct tv_mode tv_modes[] = {
 
                /* desired 4.4336180 actual 4.4336180 clock 107.52 */
                .dda1_inc       =    168,
-               .dda2_inc       =  18557,       .dda2_size      =  20625,
-               .dda3_inc       =      0,       .dda3_size      =      0,
-               .sc_reset   = TV_SC_RESET_EVERY_8,
-               .pal_burst  = true,
+               .dda2_inc       =   4093,       .dda2_size      =  27456,
+               .dda3_inc       =    310,       .dda3_size      =    525,
+               .sc_reset   = TV_SC_RESET_NEVER,
+               .pal_burst  = false,
 
                .composite_levels = &ntsc_m_levels_composite,
                .composite_color = &ntsc_m_csc_composite,
@@ -499,7 +499,7 @@ static const struct tv_mode tv_modes[] = {
        },
        {
                .name           = "NTSC-J",
-               .clock          = 107520,
+               .clock          = 108000,
                .refresh        = 29970,
                .oversample     = TV_OVERSAMPLE_8X,
                .component_only = 0,
@@ -527,8 +527,8 @@ static const struct tv_mode tv_modes[] = {
                .vburst_start_f4 = 10,              .vburst_end_f4      = 240,
 
                /* desired 3.5800000 actual 3.5800000 clock 107.52 */
-               .dda1_inc       =    136,
-               .dda2_inc       =   7624,           .dda2_size          =  20013,
+               .dda1_inc       =    135,
+               .dda2_inc       =  20800,           .dda2_size          =  27456,
                .dda3_inc       =      0,           .dda3_size          =      0,
                .sc_reset       = TV_SC_RESET_EVERY_4,
                .pal_burst      = false,
@@ -542,7 +542,7 @@ static const struct tv_mode tv_modes[] = {
        },
        {
                .name           = "PAL-M",
-               .clock          = 107520,
+               .clock          = 108000,
                .refresh        = 29970,
                .oversample     = TV_OVERSAMPLE_8X,
                .component_only = 0,
@@ -570,11 +570,11 @@ static const struct tv_mode tv_modes[] = {
                .vburst_start_f4 = 10,              .vburst_end_f4      = 240,
 
                /* desired 3.5800000 actual 3.5800000 clock 107.52 */
-               .dda1_inc       =    136,
-               .dda2_inc       =    7624,          .dda2_size          =  20013,
+               .dda1_inc       =    135,
+               .dda2_inc       =  16704,           .dda2_size          =  27456,
                .dda3_inc       =      0,           .dda3_size          =      0,
-               .sc_reset       = TV_SC_RESET_EVERY_4,
-               .pal_burst  = false,
+               .sc_reset       = TV_SC_RESET_EVERY_8,
+               .pal_burst  = true,
 
                .composite_levels = &pal_m_levels_composite,
                .composite_color = &pal_m_csc_composite,
@@ -586,7 +586,7 @@ static const struct tv_mode tv_modes[] = {
        {
                /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
                .name       = "PAL-N",
-               .clock          = 107520,
+               .clock          = 108000,
                .refresh        = 25000,
                .oversample     = TV_OVERSAMPLE_8X,
                .component_only = 0,
@@ -615,9 +615,9 @@ static const struct tv_mode tv_modes[] = {
 
 
                /* desired 4.4336180 actual 4.4336180 clock 107.52 */
-               .dda1_inc       =    168,
-               .dda2_inc       =  18557,       .dda2_size      =  20625,
-               .dda3_inc       =      0,       .dda3_size      =      0,
+               .dda1_inc       =    135,
+               .dda2_inc       =  23578,       .dda2_size      =  27648,
+               .dda3_inc       =    134,       .dda3_size      =    625,
                .sc_reset   = TV_SC_RESET_EVERY_8,
                .pal_burst  = true,
 
@@ -631,12 +631,12 @@ static const struct tv_mode tv_modes[] = {
        {
                /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
                .name       = "PAL",
-               .clock          = 107520,
+               .clock          = 108000,
                .refresh        = 25000,
                .oversample     = TV_OVERSAMPLE_8X,
                .component_only = 0,
 
-               .hsync_end      = 64,               .hblank_end         = 128,
+               .hsync_end      = 64,               .hblank_end         = 142,
                .hblank_start   = 844,      .htotal             = 863,
 
                .progressive    = false,    .trilevel_sync = false,
@@ -659,8 +659,8 @@ static const struct tv_mode tv_modes[] = {
 
                /* desired 4.4336180 actual 4.4336180 clock 107.52 */
                .dda1_inc       =    168,
-               .dda2_inc       =  18557,       .dda2_size      =  20625,
-               .dda3_inc       =      0,       .dda3_size      =      0,
+               .dda2_inc       =   4122,       .dda2_size      =  27648,
+               .dda3_inc       =     67,       .dda3_size      =    625,
                .sc_reset   = TV_SC_RESET_EVERY_8,
                .pal_burst  = true,
 
@@ -689,7 +689,7 @@ static const struct tv_mode tv_modes[] = {
                .veq_ena        = false,
 
                .vi_end_f1      = 44,               .vi_end_f2          = 44,
-               .nbr_end        = 496,
+               .nbr_end        = 479,
 
                .burst_ena      = false,
 
@@ -713,7 +713,7 @@ static const struct tv_mode tv_modes[] = {
                .veq_ena        = false,
 
                .vi_end_f1      = 44,               .vi_end_f2          = 44,
-               .nbr_end        = 496,
+               .nbr_end        = 479,
 
                .burst_ena      = false,
 
@@ -876,7 +876,7 @@ static const struct tv_mode tv_modes[] = {
                .component_only = 1,
 
                .hsync_end      = 88,               .hblank_end         = 235,
-               .hblank_start   = 2155,             .htotal             = 2200,
+               .hblank_start   = 2155,             .htotal             = 2201,
 
                .progressive    = false,            .trilevel_sync = true,
 
@@ -1082,7 +1082,7 @@ intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mo
        const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
 
        /* Ensure TV refresh is close to desired refresh */
-       if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode)) < 1)
+       if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode)) < 10)
                return MODE_OK;
        return MODE_CLOCK_RANGE;
 }
@@ -1135,7 +1135,8 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
        if (!tv_mode)
                return; /* can't happen (mode_prepare prevents this) */
 
-       tv_ctl = 0;
+       tv_ctl = I915_READ(TV_CTL);
+       tv_ctl &= TV_CTL_SAVE;
 
        switch (tv_priv->type) {
        default:
@@ -1215,7 +1216,6 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
        /* dda1 implies valid video levels */
        if (tv_mode->dda1_inc) {
                scctl1 |= TV_SC_DDA1_EN;
-               scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
        }
 
        if (tv_mode->dda2_inc)
@@ -1225,6 +1225,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
                scctl1 |= TV_SC_DDA3_EN;
 
        scctl1 |= tv_mode->sc_reset;
+       scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
        scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT;
 
        scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT |
@@ -1266,7 +1267,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
                           color_conversion->av);
        }
 
-       I915_WRITE(TV_CLR_KNOBS, 0x00606000);
+       if (IS_I965G(dev))
+               I915_WRITE(TV_CLR_KNOBS, 0x00404000);
+       else
+               I915_WRITE(TV_CLR_KNOBS, 0x00606000);
+
        if (video_levels)
                I915_WRITE(TV_CLR_LEVEL,
                           ((video_levels->black << TV_BLACK_LEVEL_SHIFT) |
@@ -1401,6 +1406,7 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
                tv_dac = I915_READ(TV_DAC);
                I915_WRITE(TV_DAC, save_tv_dac);
                I915_WRITE(TV_CTL, save_tv_ctl);
+               intel_wait_for_vblank(dev);
        }
        /*
         *  A B C
@@ -1451,7 +1457,7 @@ intel_tv_detect(struct drm_connector *connector)
        mode = reported_modes[0];
        drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
 
-       if (encoder->crtc) {
+       if (encoder->crtc && encoder->crtc->enabled) {
                type = intel_tv_detect_type(encoder->crtc, intel_output);
        } else {
                crtc = intel_get_load_detect_pipe(intel_output, &mode, &dpms_mode);
@@ -1462,6 +1468,8 @@ intel_tv_detect(struct drm_connector *connector)
                        type = -1;
        }
 
+       tv_priv->type = type;
+
        if (type < 0)
                return connector_status_disconnected;
 
@@ -1495,7 +1503,8 @@ intel_tv_get_modes(struct drm_connector *connector)
        struct drm_display_mode *mode_ptr;
        struct intel_output *intel_output = to_intel_output(connector);
        const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
-       int j;
+       int j, count = 0;
+       u64 tmp;
 
        for (j = 0; j < sizeof(input_res_table) / sizeof(input_res_table[0]);
             j++) {
@@ -1510,8 +1519,9 @@ intel_tv_get_modes(struct drm_connector *connector)
                                        && !tv_mode->component_only))
                        continue;
 
-               mode_ptr = drm_calloc(1, sizeof(struct drm_display_mode),
-                                     DRM_MEM_DRIVER);
+               mode_ptr = drm_mode_create(connector->dev);
+               if (!mode_ptr)
+                       continue;
                strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN);
 
                mode_ptr->hdisplay = hactive_s;
@@ -1528,15 +1538,17 @@ intel_tv_get_modes(struct drm_connector *connector)
                        mode_ptr->vsync_end = mode_ptr->vsync_start  + 1;
                mode_ptr->vtotal = vactive_s + 33;
 
-               mode_ptr->clock = (int) (tv_mode->refresh *
-                                        mode_ptr->vtotal *
-                                        mode_ptr->htotal / 1000) / 1000;
+               tmp = (u64) tv_mode->refresh * mode_ptr->vtotal;
+               tmp *= mode_ptr->htotal;
+               tmp = div_u64(tmp, 1000000);
+               mode_ptr->clock = (int) tmp;
 
                mode_ptr->type = DRM_MODE_TYPE_DRIVER;
                drm_mode_probed_add(connector, mode_ptr);
+               count++;
        }
 
-       return 0;
+       return count;
 }
 
 static void
index e5f4ae989abf15bd0b7ead6428840584cede0f85..c19a93c3be85f0755515812aaf9916ce59a0e587 100644 (file)
@@ -758,6 +758,8 @@ struct drm_driver {
 
        int (*proc_init)(struct drm_minor *minor);
        void (*proc_cleanup)(struct drm_minor *minor);
+       int (*debugfs_init)(struct drm_minor *minor);
+       void (*debugfs_cleanup)(struct drm_minor *minor);
 
        /**
         * Driver-specific constructor for drm_gem_objects, to set up
@@ -793,6 +795,48 @@ struct drm_driver {
 #define DRM_MINOR_CONTROL 2
 #define DRM_MINOR_RENDER 3
 
+
+/**
+ * debugfs node list. This structure represents a debugfs file to
+ * be created by the drm core
+ */
+struct drm_debugfs_list {
+       const char *name; /** file name */
+       int (*show)(struct seq_file*, void*); /** show callback */
+       u32 driver_features; /**< Required driver features for this entry */
+};
+
+/**
+ * debugfs node structure. This structure represents a debugfs file.
+ */
+struct drm_debugfs_node {
+       struct list_head list;
+       struct drm_minor *minor;
+       struct drm_debugfs_list *debugfs_ent;
+       struct dentry *dent;
+};
+
+/**
+ * Info file list entry. This structure represents a debugfs or proc file to
+ * be created by the drm core
+ */
+struct drm_info_list {
+       const char *name; /** file name */
+       int (*show)(struct seq_file*, void*); /** show callback */
+       u32 driver_features; /**< Required driver features for this entry */
+       void *data;
+};
+
+/**
+ * debugfs node structure. This structure represents a debugfs file.
+ */
+struct drm_info_node {
+       struct list_head list;
+       struct drm_minor *minor;
+       struct drm_info_list *info_ent;
+       struct dentry *dent;
+};
+
 /**
  * DRM minor structure. This structure represents a drm minor number.
  */
@@ -802,7 +846,12 @@ struct drm_minor {
        dev_t device;                   /**< Device number for mknod */
        struct device kdev;             /**< Linux device */
        struct drm_device *dev;
-       struct proc_dir_entry *dev_root;  /**< proc directory entry */
+
+       struct proc_dir_entry *proc_root;  /**< proc directory entry */
+       struct drm_info_node proc_nodes;
+       struct dentry *debugfs_root;
+       struct drm_info_node debugfs_nodes;
+
        struct drm_master *master; /* currently active master for this node */
        struct list_head master_list;
        struct drm_mode_group mode_group;
@@ -1258,6 +1307,7 @@ extern unsigned int drm_debug;
 
 extern struct class *drm_class;
 extern struct proc_dir_entry *drm_proc_root;
+extern struct dentry *drm_debugfs_root;
 
 extern struct idr drm_minors_idr;
 
@@ -1268,6 +1318,31 @@ extern int drm_proc_init(struct drm_minor *minor, int minor_id,
                         struct proc_dir_entry *root);
 extern int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root);
 
+                               /* Debugfs support */
+#if defined(CONFIG_DEBUG_FS)
+extern int drm_debugfs_init(struct drm_minor *minor, int minor_id,
+                           struct dentry *root);
+extern int drm_debugfs_create_files(struct drm_info_list *files, int count,
+                                   struct dentry *root, struct drm_minor *minor);
+extern int drm_debugfs_remove_files(struct drm_info_list *files, int count,
+                                    struct drm_minor *minor);
+extern int drm_debugfs_cleanup(struct drm_minor *minor);
+#endif
+
+                               /* Info file support */
+extern int drm_name_info(struct seq_file *m, void *data);
+extern int drm_vm_info(struct seq_file *m, void *data);
+extern int drm_queues_info(struct seq_file *m, void *data);
+extern int drm_bufs_info(struct seq_file *m, void *data);
+extern int drm_vblank_info(struct seq_file *m, void *data);
+extern int drm_clients_info(struct seq_file *m, void* data);
+extern int drm_gem_name_info(struct seq_file *m, void *data);
+extern int drm_gem_object_info(struct seq_file *m, void* data);
+
+#if DRM_DEBUG_CODE
+extern int drm_vma_info(struct seq_file *m, void *data);
+#endif
+
                                /* Scatter Gather Support (drm_scatter.h) */
 extern void drm_sg_cleanup(struct drm_sg_mem * entry);
 extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
index 5165f240aa68e3f9aa7f9cab84ab6c098640eaa7..76c4c8243038388b3843e6470c256afbe1e125c8 100644 (file)
        {0x8086, 0x2e02, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
        {0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
        {0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
+       {0x8086, 0xa001, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
+       {0x8086, 0xa011, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
        {0, 0, 0}