1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
34 #define MAX_NOPID ((u32)~0)
36 /** These are the interrupts used by the driver */
37 #define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \
38 I915_ASLE_INTERRUPT | \
39 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
40 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
43 i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
45 if ((dev_priv->irq_mask_reg & mask) != 0) {
46 dev_priv->irq_mask_reg &= ~mask;
47 I915_WRITE(IMR, dev_priv->irq_mask_reg);
48 (void) I915_READ(IMR);
53 i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
55 if ((dev_priv->irq_mask_reg & mask) != mask) {
56 dev_priv->irq_mask_reg |= mask;
57 I915_WRITE(IMR, dev_priv->irq_mask_reg);
58 (void) I915_READ(IMR);
63 * i915_get_pipe - return the the pipe associated with a given plane
65 * @plane: plane to look for
67 * The Intel Mesa & 2D drivers call the vblank routines with a plane number
68 * rather than a pipe number, since they may not always be equal. This routine
69 * maps the given @plane back to a pipe number.
72 i915_get_pipe(struct drm_device *dev, int plane)
74 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
77 dspcntr = plane ? I915_READ(DSPBCNTR) : I915_READ(DSPACNTR);
79 return dspcntr & DISPPLANE_SEL_PIPE_MASK ? 1 : 0;
83 * i915_get_plane - return the the plane associated with a given pipe
85 * @pipe: pipe to look for
87 * The Intel Mesa & 2D drivers call the vblank routines with a plane number
88 * rather than a plane number, since they may not always be equal. This routine
89 * maps the given @pipe back to a plane number.
92 i915_get_plane(struct drm_device *dev, int pipe)
94 if (i915_get_pipe(dev, 0) == pipe)
100 * i915_pipe_enabled - check if a pipe is enabled
102 * @pipe: pipe to check
104 * Reading certain registers when the pipe is disabled can hang the chip.
105 * Use this routine to make sure the PLL is running and the pipe is active
106 * before reading such registers if unsure.
109 i915_pipe_enabled(struct drm_device *dev, int pipe)
111 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
112 unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
114 if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
121 * Emit blits for scheduled buffer swaps.
123 * This function will be called with the HW lock held.
124 * Because this function must grab the ring mutex (dev->struct_mutex),
125 * it can no longer run at soft irq time. We'll fix this when we do
126 * the DRI2 swap buffer work.
128 static void i915_vblank_tasklet(struct drm_device *dev)
130 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
131 unsigned long irqflags;
132 struct list_head *list, *tmp, hits, *hit;
133 int nhits, nrects, slice[2], upper[2], lower[2], i;
135 struct drm_drawable_info *drw;
136 drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
137 u32 cpp = dev_priv->cpp;
138 u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
139 XY_SRC_COPY_BLT_WRITE_ALPHA |
140 XY_SRC_COPY_BLT_WRITE_RGB)
141 : XY_SRC_COPY_BLT_CMD;
142 u32 src_pitch = sarea_priv->pitch * cpp;
143 u32 dst_pitch = sarea_priv->pitch * cpp;
144 u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24);
147 mutex_lock(&dev->struct_mutex);
149 if (IS_I965G(dev) && sarea_priv->front_tiled) {
150 cmd |= XY_SRC_COPY_BLT_DST_TILED;
153 if (IS_I965G(dev) && sarea_priv->back_tiled) {
154 cmd |= XY_SRC_COPY_BLT_SRC_TILED;
158 counter[0] = drm_vblank_count(dev, i915_get_plane(dev, 0));
159 counter[1] = drm_vblank_count(dev, i915_get_plane(dev, 1));
163 INIT_LIST_HEAD(&hits);
167 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
169 /* Find buffer swaps scheduled for this vertical blank */
170 list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
171 drm_i915_vbl_swap_t *vbl_swap =
172 list_entry(list, drm_i915_vbl_swap_t, head);
173 int pipe = vbl_swap->pipe;
175 if ((counter[pipe] - vbl_swap->sequence) > (1<<23))
179 dev_priv->swaps_pending--;
180 drm_vblank_put(dev, pipe);
182 spin_unlock(&dev_priv->swaps_lock);
183 spin_lock(&dev->drw_lock);
185 drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
187 list_for_each(hit, &hits) {
188 drm_i915_vbl_swap_t *swap_cmp =
189 list_entry(hit, drm_i915_vbl_swap_t, head);
190 struct drm_drawable_info *drw_cmp =
191 drm_get_drawable_info(dev, swap_cmp->drw_id);
193 /* Make sure both drawables are still
194 * around and have some rectangles before
195 * we look inside to order them for the
198 if (drw_cmp && drw_cmp->num_rects > 0 &&
199 drw && drw->num_rects > 0 &&
200 drw_cmp->rects[0].y1 > drw->rects[0].y1) {
201 list_add_tail(list, hit);
206 spin_unlock(&dev->drw_lock);
208 /* List of hits was empty, or we reached the end of it */
210 list_add_tail(list, hits.prev);
214 spin_lock(&dev_priv->swaps_lock);
218 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
219 mutex_unlock(&dev->struct_mutex);
223 spin_unlock(&dev_priv->swaps_lock);
225 i915_kernel_lost_context(dev);
230 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
232 OUT_RING(((sarea_priv->width - 1) & 0xffff) | ((sarea_priv->height - 1) << 16));
238 OUT_RING(GFX_OP_DRAWRECT_INFO);
241 OUT_RING(sarea_priv->width | sarea_priv->height << 16);
242 OUT_RING(sarea_priv->width | sarea_priv->height << 16);
248 sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT;
250 upper[0] = upper[1] = 0;
251 slice[0] = max(sarea_priv->pipeA_h / nhits, 1);
252 slice[1] = max(sarea_priv->pipeB_h / nhits, 1);
253 lower[0] = sarea_priv->pipeA_y + slice[0];
254 lower[1] = sarea_priv->pipeB_y + slice[0];
256 spin_lock(&dev->drw_lock);
258 /* Emit blits for buffer swaps, partitioning both outputs into as many
259 * slices as there are buffer swaps scheduled in order to avoid tearing
260 * (based on the assumption that a single buffer swap would always
261 * complete before scanout starts).
263 for (i = 0; i++ < nhits;
264 upper[0] = lower[0], lower[0] += slice[0],
265 upper[1] = lower[1], lower[1] += slice[1]) {
267 lower[0] = lower[1] = sarea_priv->height;
269 list_for_each(hit, &hits) {
270 drm_i915_vbl_swap_t *swap_hit =
271 list_entry(hit, drm_i915_vbl_swap_t, head);
272 struct drm_clip_rect *rect;
274 unsigned short top, bottom;
276 drw = drm_get_drawable_info(dev, swap_hit->drw_id);
278 /* The drawable may have been destroyed since
279 * the vblank swap was queued
285 pipe = swap_hit->pipe;
287 bottom = lower[pipe];
289 for (num_rects = drw->num_rects; num_rects--; rect++) {
290 int y1 = max(rect->y1, top);
291 int y2 = min(rect->y2, bottom);
299 OUT_RING(ropcpp | dst_pitch);
300 OUT_RING((y1 << 16) | rect->x1);
301 OUT_RING((y2 << 16) | rect->x2);
302 OUT_RING(sarea_priv->front_offset);
303 OUT_RING((y1 << 16) | rect->x1);
305 OUT_RING(sarea_priv->back_offset);
312 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
313 mutex_unlock(&dev->struct_mutex);
315 list_for_each_safe(hit, tmp, &hits) {
316 drm_i915_vbl_swap_t *swap_hit =
317 list_entry(hit, drm_i915_vbl_swap_t, head);
321 drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER);
325 u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
327 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
328 unsigned long high_frame;
329 unsigned long low_frame;
330 u32 high1, high2, low, count;
333 pipe = i915_get_pipe(dev, plane);
334 high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
335 low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
337 if (!i915_pipe_enabled(dev, pipe)) {
338 DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe);
343 * High & low register fields aren't synchronized, so make sure
344 * we get a low value that's stable across two reads of the high
348 high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
349 PIPE_FRAME_HIGH_SHIFT);
350 low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
351 PIPE_FRAME_LOW_SHIFT);
352 high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
353 PIPE_FRAME_HIGH_SHIFT);
354 } while (high1 != high2);
356 count = (high1 << 8) | low;
362 i915_vblank_work_handler(struct work_struct *work)
364 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
366 struct drm_device *dev = dev_priv->dev;
367 unsigned long irqflags;
369 if (dev->lock.hw_lock == NULL) {
370 i915_vblank_tasklet(dev);
374 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
375 dev->locked_tasklet_func = i915_vblank_tasklet;
376 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
378 /* Try to get the lock now, if this fails, the lock
379 * holder will execute the tasklet during unlock
381 if (!drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT))
384 dev->lock.lock_time = jiffies;
385 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
387 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
388 dev->locked_tasklet_func = NULL;
389 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
391 i915_vblank_tasklet(dev);
392 drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT);
395 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
397 struct drm_device *dev = (struct drm_device *) arg;
398 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
400 u32 pipea_stats, pipeb_stats;
403 atomic_inc(&dev_priv->irq_received);
405 if (dev->pdev->msi_enabled)
407 iir = I915_READ(IIR);
410 if (dev->pdev->msi_enabled) {
411 I915_WRITE(IMR, dev_priv->irq_mask_reg);
412 (void) I915_READ(IMR);
418 * Clear the PIPE(A|B)STAT regs before the IIR otherwise
419 * we may get extra interrupts.
421 if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) {
422 pipea_stats = I915_READ(PIPEASTAT);
423 if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A))
424 pipea_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
425 PIPE_VBLANK_INTERRUPT_ENABLE);
426 else if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
427 PIPE_VBLANK_INTERRUPT_STATUS)) {
429 drm_handle_vblank(dev, i915_get_plane(dev, 0));
432 I915_WRITE(PIPEASTAT, pipea_stats);
434 if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) {
435 pipeb_stats = I915_READ(PIPEBSTAT);
437 I915_WRITE(PIPEBSTAT, pipeb_stats);
439 /* The vblank interrupt gets enabled even if we didn't ask for
440 it, so make sure it's shut down again */
441 if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B))
442 pipeb_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
443 PIPE_VBLANK_INTERRUPT_ENABLE);
444 else if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
445 PIPE_VBLANK_INTERRUPT_STATUS)) {
447 drm_handle_vblank(dev, i915_get_plane(dev, 1));
450 if (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS)
451 opregion_asle_intr(dev);
452 I915_WRITE(PIPEBSTAT, pipeb_stats);
455 I915_WRITE(IIR, iir);
456 if (dev->pdev->msi_enabled)
457 I915_WRITE(IMR, dev_priv->irq_mask_reg);
458 (void) I915_READ(IIR); /* Flush posted writes */
460 if (dev_priv->sarea_priv)
461 dev_priv->sarea_priv->last_dispatch =
462 READ_BREADCRUMB(dev_priv);
464 if (iir & I915_USER_INTERRUPT) {
465 dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
466 DRM_WAKEUP(&dev_priv->irq_queue);
469 if (iir & I915_ASLE_INTERRUPT)
470 opregion_asle_intr(dev);
472 if (vblank && dev_priv->swaps_pending > 0)
473 schedule_work(&dev_priv->vblank_work);
478 static int i915_emit_irq(struct drm_device * dev)
480 drm_i915_private_t *dev_priv = dev->dev_private;
483 i915_kernel_lost_context(dev);
488 if (dev_priv->counter > 0x7FFFFFFFUL)
489 dev_priv->counter = 1;
490 if (dev_priv->sarea_priv)
491 dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
494 OUT_RING(MI_STORE_DWORD_INDEX);
495 OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
496 OUT_RING(dev_priv->counter);
499 OUT_RING(MI_USER_INTERRUPT);
502 return dev_priv->counter;
505 void i915_user_irq_get(struct drm_device *dev)
507 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
508 unsigned long irqflags;
510 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
511 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1))
512 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
513 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
516 void i915_user_irq_put(struct drm_device *dev)
518 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
519 unsigned long irqflags;
521 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
522 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
523 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0))
524 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
525 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
528 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
530 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
533 DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
534 READ_BREADCRUMB(dev_priv));
536 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
537 if (dev_priv->sarea_priv) {
538 dev_priv->sarea_priv->last_dispatch =
539 READ_BREADCRUMB(dev_priv);
544 if (dev_priv->sarea_priv)
545 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
547 i915_user_irq_get(dev);
548 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
549 READ_BREADCRUMB(dev_priv) >= irq_nr);
550 i915_user_irq_put(dev);
553 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
554 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
557 if (dev_priv->sarea_priv)
558 dev_priv->sarea_priv->last_dispatch =
559 READ_BREADCRUMB(dev_priv);
564 /* Needs the lock as it touches the ring.
566 int i915_irq_emit(struct drm_device *dev, void *data,
567 struct drm_file *file_priv)
569 drm_i915_private_t *dev_priv = dev->dev_private;
570 drm_i915_irq_emit_t *emit = data;
573 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
576 DRM_ERROR("called with no initialization\n");
579 mutex_lock(&dev->struct_mutex);
580 result = i915_emit_irq(dev);
581 mutex_unlock(&dev->struct_mutex);
583 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
584 DRM_ERROR("copy_to_user\n");
591 /* Doesn't need the hardware lock.
593 int i915_irq_wait(struct drm_device *dev, void *data,
594 struct drm_file *file_priv)
596 drm_i915_private_t *dev_priv = dev->dev_private;
597 drm_i915_irq_wait_t *irqwait = data;
600 DRM_ERROR("called with no initialization\n");
604 return i915_wait_irq(dev, irqwait->irq_seq);
607 int i915_enable_vblank(struct drm_device *dev, int plane)
609 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
610 int pipe = i915_get_pipe(dev, plane);
611 u32 pipestat_reg = 0;
614 unsigned long irqflags;
618 pipestat_reg = PIPEASTAT;
619 interrupt = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
622 pipestat_reg = PIPEBSTAT;
623 interrupt = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
626 DRM_ERROR("tried to enable vblank on non-existent pipe %d\n",
631 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
632 /* Enabling vblank events in IMR comes before PIPESTAT write, or
633 * there's a race where the PIPESTAT vblank bit gets set to 1, so
634 * the OR of enabled PIPESTAT bits goes to 1, so the PIPExEVENT in
635 * ISR flashes to 1, but the IIR bit doesn't get set to 1 because
636 * IMR masks it. It doesn't ever get set after we clear the masking
637 * in IMR because the ISR bit is edge, not level-triggered, on the
638 * OR of PIPESTAT bits.
640 i915_enable_irq(dev_priv, interrupt);
641 pipestat = I915_READ(pipestat_reg);
643 pipestat |= PIPE_START_VBLANK_INTERRUPT_ENABLE;
645 pipestat |= PIPE_VBLANK_INTERRUPT_ENABLE;
646 /* Clear any stale interrupt status */
647 pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
648 PIPE_VBLANK_INTERRUPT_STATUS);
649 I915_WRITE(pipestat_reg, pipestat);
650 (void) I915_READ(pipestat_reg); /* Posting read */
651 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
656 void i915_disable_vblank(struct drm_device *dev, int plane)
658 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
659 int pipe = i915_get_pipe(dev, plane);
660 u32 pipestat_reg = 0;
663 unsigned long irqflags;
667 pipestat_reg = PIPEASTAT;
668 interrupt = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
671 pipestat_reg = PIPEBSTAT;
672 interrupt = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
675 DRM_ERROR("tried to disable vblank on non-existent pipe %d\n",
681 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
682 i915_disable_irq(dev_priv, interrupt);
683 pipestat = I915_READ(pipestat_reg);
684 pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
685 PIPE_VBLANK_INTERRUPT_ENABLE);
686 /* Clear any stale interrupt status */
687 pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
688 PIPE_VBLANK_INTERRUPT_STATUS);
689 I915_WRITE(pipestat_reg, pipestat);
690 (void) I915_READ(pipestat_reg); /* Posting read */
691 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
694 /* Set the vblank monitor pipe
696 int i915_vblank_pipe_set(struct drm_device *dev, void *data,
697 struct drm_file *file_priv)
699 drm_i915_private_t *dev_priv = dev->dev_private;
702 DRM_ERROR("called with no initialization\n");
709 int i915_vblank_pipe_get(struct drm_device *dev, void *data,
710 struct drm_file *file_priv)
712 drm_i915_private_t *dev_priv = dev->dev_private;
713 drm_i915_vblank_pipe_t *pipe = data;
716 DRM_ERROR("called with no initialization\n");
720 pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
726 * Schedule buffer swap at given vertical blank.
728 int i915_vblank_swap(struct drm_device *dev, void *data,
729 struct drm_file *file_priv)
731 drm_i915_private_t *dev_priv = dev->dev_private;
732 drm_i915_vblank_swap_t *swap = data;
733 drm_i915_vbl_swap_t *vbl_swap, *vbl_old;
734 unsigned int pipe, seqtype, curseq, plane;
735 unsigned long irqflags;
736 struct list_head *list;
739 if (!dev_priv || !dev_priv->sarea_priv) {
740 DRM_ERROR("%s called with no initialization\n", __func__);
744 if (dev_priv->sarea_priv->rotation) {
745 DRM_DEBUG("Rotation not supported\n");
749 if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
750 _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) {
751 DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype);
755 plane = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
756 pipe = i915_get_pipe(dev, plane);
758 seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
760 if (!(dev_priv->vblank_pipe & (1 << pipe))) {
761 DRM_ERROR("Invalid pipe %d\n", pipe);
765 spin_lock_irqsave(&dev->drw_lock, irqflags);
767 if (!drm_get_drawable_info(dev, swap->drawable)) {
768 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
769 DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable);
773 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
776 * We take the ref here and put it when the swap actually completes
779 ret = drm_vblank_get(dev, pipe);
782 curseq = drm_vblank_count(dev, pipe);
784 if (seqtype == _DRM_VBLANK_RELATIVE)
785 swap->sequence += curseq;
787 if ((curseq - swap->sequence) <= (1<<23)) {
788 if (swap->seqtype & _DRM_VBLANK_NEXTONMISS) {
789 swap->sequence = curseq + 1;
791 DRM_DEBUG("Missed target sequence\n");
792 drm_vblank_put(dev, pipe);
797 vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
800 DRM_ERROR("Failed to allocate memory to queue swap\n");
801 drm_vblank_put(dev, pipe);
805 vbl_swap->drw_id = swap->drawable;
806 vbl_swap->pipe = pipe;
807 vbl_swap->sequence = swap->sequence;
809 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
811 list_for_each(list, &dev_priv->vbl_swaps.head) {
812 vbl_old = list_entry(list, drm_i915_vbl_swap_t, head);
814 if (vbl_old->drw_id == swap->drawable &&
815 vbl_old->pipe == pipe &&
816 vbl_old->sequence == swap->sequence) {
817 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
818 drm_vblank_put(dev, pipe);
819 drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
820 DRM_DEBUG("Already scheduled\n");
825 if (dev_priv->swaps_pending >= 10) {
826 DRM_DEBUG("Too many swaps queued\n");
827 DRM_DEBUG(" pipe 0: %d pipe 1: %d\n",
828 drm_vblank_count(dev, i915_get_plane(dev, 0)),
829 drm_vblank_count(dev, i915_get_plane(dev, 1)));
831 list_for_each(list, &dev_priv->vbl_swaps.head) {
832 vbl_old = list_entry(list, drm_i915_vbl_swap_t, head);
833 DRM_DEBUG("\tdrw %x pipe %d seq %x\n",
834 vbl_old->drw_id, vbl_old->pipe,
837 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
838 drm_vblank_put(dev, pipe);
839 drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
843 list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head);
844 dev_priv->swaps_pending++;
846 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
853 void i915_driver_irq_preinstall(struct drm_device * dev)
855 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
857 I915_WRITE(HWSTAM, 0xeffe);
858 I915_WRITE(IMR, 0xffffffff);
859 I915_WRITE(IER, 0x0);
862 int i915_driver_irq_postinstall(struct drm_device *dev)
864 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
865 int ret, num_pipes = 2;
867 spin_lock_init(&dev_priv->swaps_lock);
868 INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
869 INIT_WORK(&dev_priv->vblank_work, i915_vblank_work_handler);
870 dev_priv->swaps_pending = 0;
872 /* Set initial unmasked IRQs to just the selected vblank pipes. */
873 dev_priv->irq_mask_reg = ~0;
875 ret = drm_vblank_init(dev, num_pipes);
879 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
880 dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
881 dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
883 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
885 dev_priv->irq_mask_reg &= I915_INTERRUPT_ENABLE_MASK;
887 I915_WRITE(IMR, dev_priv->irq_mask_reg);
888 I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
889 (void) I915_READ(IER);
891 opregion_enable_asle(dev);
892 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
897 void i915_driver_irq_uninstall(struct drm_device * dev)
899 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
905 dev_priv->vblank_pipe = 0;
907 I915_WRITE(HWSTAM, 0xffffffff);
908 I915_WRITE(IMR, 0xffffffff);
909 I915_WRITE(IER, 0x0);
911 temp = I915_READ(PIPEASTAT);
912 I915_WRITE(PIPEASTAT, temp);
913 temp = I915_READ(PIPEBSTAT);
914 I915_WRITE(PIPEBSTAT, temp);
915 temp = I915_READ(IIR);
916 I915_WRITE(IIR, temp);