1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
34 #define USER_INT_FLAG (1<<1)
35 #define VSYNC_PIPEB_FLAG (1<<5)
36 #define VSYNC_PIPEA_FLAG (1<<7)
38 #define MAX_NOPID ((u32)~0)
41 * Emit blits for scheduled buffer swaps.
43 * This function will be called with the HW lock held.
45 static void i915_vblank_tasklet(struct drm_device *dev)
47 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
48 unsigned long irqflags;
49 struct list_head *list, *tmp, hits, *hit;
50 int nhits, nrects, slice[2], upper[2], lower[2], i;
51 unsigned counter[2] = { atomic_read(&dev->vbl_received),
52 atomic_read(&dev->vbl_received2) };
53 struct drm_drawable_info *drw;
54 drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
55 u32 cpp = dev_priv->cpp;
56 u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
57 XY_SRC_COPY_BLT_WRITE_ALPHA |
58 XY_SRC_COPY_BLT_WRITE_RGB)
59 : XY_SRC_COPY_BLT_CMD;
60 u32 pitchropcpp = (sarea_priv->pitch * cpp) | (0xcc << 16) |
61 (cpp << 23) | (1 << 24);
66 INIT_LIST_HEAD(&hits);
70 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
72 /* Find buffer swaps scheduled for this vertical blank */
73 list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
74 drm_i915_vbl_swap_t *vbl_swap =
75 list_entry(list, drm_i915_vbl_swap_t, head);
77 if ((counter[vbl_swap->pipe] - vbl_swap->sequence) > (1<<23))
81 dev_priv->swaps_pending--;
83 spin_unlock(&dev_priv->swaps_lock);
84 spin_lock(&dev->drw_lock);
86 drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
89 spin_unlock(&dev->drw_lock);
90 drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
91 spin_lock(&dev_priv->swaps_lock);
95 list_for_each(hit, &hits) {
96 drm_i915_vbl_swap_t *swap_cmp =
97 list_entry(hit, drm_i915_vbl_swap_t, head);
98 struct drm_drawable_info *drw_cmp =
99 drm_get_drawable_info(dev, swap_cmp->drw_id);
102 drw_cmp->rects[0].y1 > drw->rects[0].y1) {
103 list_add_tail(list, hit);
108 spin_unlock(&dev->drw_lock);
110 /* List of hits was empty, or we reached the end of it */
112 list_add_tail(list, hits.prev);
116 spin_lock(&dev_priv->swaps_lock);
120 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
124 spin_unlock(&dev_priv->swaps_lock);
126 i915_kernel_lost_context(dev);
131 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
133 OUT_RING(((sarea_priv->width - 1) & 0xffff) | ((sarea_priv->height - 1) << 16));
139 OUT_RING(GFX_OP_DRAWRECT_INFO);
142 OUT_RING(sarea_priv->width | sarea_priv->height << 16);
143 OUT_RING(sarea_priv->width | sarea_priv->height << 16);
149 sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT;
151 upper[0] = upper[1] = 0;
152 slice[0] = max(sarea_priv->pipeA_h / nhits, 1);
153 slice[1] = max(sarea_priv->pipeB_h / nhits, 1);
154 lower[0] = sarea_priv->pipeA_y + slice[0];
155 lower[1] = sarea_priv->pipeB_y + slice[0];
157 spin_lock(&dev->drw_lock);
159 /* Emit blits for buffer swaps, partitioning both outputs into as many
160 * slices as there are buffer swaps scheduled in order to avoid tearing
161 * (based on the assumption that a single buffer swap would always
162 * complete before scanout starts).
164 for (i = 0; i++ < nhits;
165 upper[0] = lower[0], lower[0] += slice[0],
166 upper[1] = lower[1], lower[1] += slice[1]) {
168 lower[0] = lower[1] = sarea_priv->height;
170 list_for_each(hit, &hits) {
171 drm_i915_vbl_swap_t *swap_hit =
172 list_entry(hit, drm_i915_vbl_swap_t, head);
173 struct drm_clip_rect *rect;
175 unsigned short top, bottom;
177 drw = drm_get_drawable_info(dev, swap_hit->drw_id);
183 pipe = swap_hit->pipe;
185 bottom = lower[pipe];
187 for (num_rects = drw->num_rects; num_rects--; rect++) {
188 int y1 = max(rect->y1, top);
189 int y2 = min(rect->y2, bottom);
197 OUT_RING(pitchropcpp);
198 OUT_RING((y1 << 16) | rect->x1);
199 OUT_RING((y2 << 16) | rect->x2);
200 OUT_RING(sarea_priv->front_offset);
201 OUT_RING((y1 << 16) | rect->x1);
202 OUT_RING(pitchropcpp & 0xffff);
203 OUT_RING(sarea_priv->back_offset);
210 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
212 list_for_each_safe(hit, tmp, &hits) {
213 drm_i915_vbl_swap_t *swap_hit =
214 list_entry(hit, drm_i915_vbl_swap_t, head);
218 drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER);
222 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
224 struct drm_device *dev = (struct drm_device *) arg;
225 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
227 u32 pipea_stats, pipeb_stats;
229 pipea_stats = I915_READ(I915REG_PIPEASTAT);
230 pipeb_stats = I915_READ(I915REG_PIPEBSTAT);
232 temp = I915_READ16(I915REG_INT_IDENTITY_R);
234 temp &= (USER_INT_FLAG | VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG);
236 DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp);
241 I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
242 (void) I915_READ16(I915REG_INT_IDENTITY_R);
243 DRM_READMEMORYBARRIER();
245 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
247 if (temp & USER_INT_FLAG)
248 DRM_WAKEUP(&dev_priv->irq_queue);
250 if (temp & (VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG)) {
251 int vblank_pipe = dev_priv->vblank_pipe;
254 (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B))
255 == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) {
256 if (temp & VSYNC_PIPEA_FLAG)
257 atomic_inc(&dev->vbl_received);
258 if (temp & VSYNC_PIPEB_FLAG)
259 atomic_inc(&dev->vbl_received2);
260 } else if (((temp & VSYNC_PIPEA_FLAG) &&
261 (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) ||
262 ((temp & VSYNC_PIPEB_FLAG) &&
263 (vblank_pipe & DRM_I915_VBLANK_PIPE_B)))
264 atomic_inc(&dev->vbl_received);
266 DRM_WAKEUP(&dev->vbl_queue);
267 drm_vbl_send_signals(dev);
269 if (dev_priv->swaps_pending > 0)
270 drm_locked_tasklet(dev, i915_vblank_tasklet);
271 I915_WRITE(I915REG_PIPEASTAT,
272 pipea_stats|I915_VBLANK_INTERRUPT_ENABLE|
274 I915_WRITE(I915REG_PIPEBSTAT,
275 pipeb_stats|I915_VBLANK_INTERRUPT_ENABLE|
282 static int i915_emit_irq(struct drm_device * dev)
284 drm_i915_private_t *dev_priv = dev->dev_private;
287 i915_kernel_lost_context(dev);
291 dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
293 if (dev_priv->counter > 0x7FFFFFFFUL)
294 dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
297 OUT_RING(CMD_STORE_DWORD_IDX);
299 OUT_RING(dev_priv->counter);
302 OUT_RING(GFX_OP_USER_INTERRUPT);
305 return dev_priv->counter;
308 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
310 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
313 DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
314 READ_BREADCRUMB(dev_priv));
316 if (READ_BREADCRUMB(dev_priv) >= irq_nr)
319 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
321 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
322 READ_BREADCRUMB(dev_priv) >= irq_nr);
325 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
326 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
329 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
333 static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequence,
336 drm_i915_private_t *dev_priv = dev->dev_private;
337 unsigned int cur_vblank;
341 DRM_ERROR("called with no initialization\n");
345 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
346 (((cur_vblank = atomic_read(counter))
347 - *sequence) <= (1<<23)));
349 *sequence = cur_vblank;
355 int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
357 return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received);
360 int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
362 return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2);
365 /* Needs the lock as it touches the ring.
367 int i915_irq_emit(struct drm_device *dev, void *data,
368 struct drm_file *file_priv)
370 drm_i915_private_t *dev_priv = dev->dev_private;
371 drm_i915_irq_emit_t *emit = data;
374 LOCK_TEST_WITH_RETURN(dev, file_priv);
377 DRM_ERROR("called with no initialization\n");
381 result = i915_emit_irq(dev);
383 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
384 DRM_ERROR("copy_to_user\n");
391 /* Doesn't need the hardware lock.
393 int i915_irq_wait(struct drm_device *dev, void *data,
394 struct drm_file *file_priv)
396 drm_i915_private_t *dev_priv = dev->dev_private;
397 drm_i915_irq_wait_t *irqwait = data;
400 DRM_ERROR("called with no initialization\n");
404 return i915_wait_irq(dev, irqwait->irq_seq);
407 static void i915_enable_interrupt (struct drm_device *dev)
409 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
413 if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
414 flag |= VSYNC_PIPEA_FLAG;
415 if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
416 flag |= VSYNC_PIPEB_FLAG;
418 I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG | flag);
421 /* Set the vblank monitor pipe
423 int i915_vblank_pipe_set(struct drm_device *dev, void *data,
424 struct drm_file *file_priv)
426 drm_i915_private_t *dev_priv = dev->dev_private;
427 drm_i915_vblank_pipe_t *pipe = data;
430 DRM_ERROR("called with no initialization\n");
434 if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
435 DRM_ERROR("called with invalid pipe 0x%x\n", pipe->pipe);
439 dev_priv->vblank_pipe = pipe->pipe;
441 i915_enable_interrupt (dev);
446 int i915_vblank_pipe_get(struct drm_device *dev, void *data,
447 struct drm_file *file_priv)
449 drm_i915_private_t *dev_priv = dev->dev_private;
450 drm_i915_vblank_pipe_t *pipe = data;
454 DRM_ERROR("called with no initialization\n");
458 flag = I915_READ(I915REG_INT_ENABLE_R);
460 if (flag & VSYNC_PIPEA_FLAG)
461 pipe->pipe |= DRM_I915_VBLANK_PIPE_A;
462 if (flag & VSYNC_PIPEB_FLAG)
463 pipe->pipe |= DRM_I915_VBLANK_PIPE_B;
469 * Schedule buffer swap at given vertical blank.
471 int i915_vblank_swap(struct drm_device *dev, void *data,
472 struct drm_file *file_priv)
474 drm_i915_private_t *dev_priv = dev->dev_private;
475 drm_i915_vblank_swap_t *swap = data;
476 drm_i915_vbl_swap_t *vbl_swap;
477 unsigned int pipe, seqtype, curseq;
478 unsigned long irqflags;
479 struct list_head *list;
482 DRM_ERROR("%s called with no initialization\n", __func__);
486 if (dev_priv->sarea_priv->rotation) {
487 DRM_DEBUG("Rotation not supported\n");
491 if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
492 _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) {
493 DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype);
497 pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
499 seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
501 if (!(dev_priv->vblank_pipe & (1 << pipe))) {
502 DRM_ERROR("Invalid pipe %d\n", pipe);
506 spin_lock_irqsave(&dev->drw_lock, irqflags);
508 if (!drm_get_drawable_info(dev, swap->drawable)) {
509 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
510 DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable);
514 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
516 curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received);
518 if (seqtype == _DRM_VBLANK_RELATIVE)
519 swap->sequence += curseq;
521 if ((curseq - swap->sequence) <= (1<<23)) {
522 if (swap->seqtype & _DRM_VBLANK_NEXTONMISS) {
523 swap->sequence = curseq + 1;
525 DRM_DEBUG("Missed target sequence\n");
530 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
532 list_for_each(list, &dev_priv->vbl_swaps.head) {
533 vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
535 if (vbl_swap->drw_id == swap->drawable &&
536 vbl_swap->pipe == pipe &&
537 vbl_swap->sequence == swap->sequence) {
538 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
539 DRM_DEBUG("Already scheduled\n");
544 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
546 if (dev_priv->swaps_pending >= 100) {
547 DRM_DEBUG("Too many swaps queued\n");
551 vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
554 DRM_ERROR("Failed to allocate memory to queue swap\n");
560 vbl_swap->drw_id = swap->drawable;
561 vbl_swap->pipe = pipe;
562 vbl_swap->sequence = swap->sequence;
564 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
566 list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head);
567 dev_priv->swaps_pending++;
569 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
576 void i915_driver_irq_preinstall(struct drm_device * dev)
578 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
580 I915_WRITE16(I915REG_HWSTAM, 0xfffe);
581 I915_WRITE16(I915REG_INT_MASK_R, 0x0);
582 I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
585 void i915_driver_irq_postinstall(struct drm_device * dev)
587 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
589 spin_lock_init(&dev_priv->swaps_lock);
590 INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
591 dev_priv->swaps_pending = 0;
593 if (!dev_priv->vblank_pipe)
594 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
595 i915_enable_interrupt(dev);
596 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
599 void i915_driver_irq_uninstall(struct drm_device * dev)
601 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
607 I915_WRITE16(I915REG_HWSTAM, 0xffff);
608 I915_WRITE16(I915REG_INT_MASK_R, 0xffff);
609 I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
611 temp = I915_READ16(I915REG_INT_IDENTITY_R);
612 I915_WRITE16(I915REG_INT_IDENTITY_R, temp);