]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/gpu/drm/radeon/radeon_cp.c
drm/radeon: add support for RS740 IGP chipsets.
[linux-2.6-omap-h63xx.git] / drivers / gpu / drm / radeon / radeon_cp.c
1 /* radeon_cp.c -- CP support for Radeon -*- linux-c -*- */
2 /*
3  * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
4  * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
5  * Copyright 2007 Advanced Micro Devices, Inc.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the next
16  * paragraph) shall be included in all copies or substantial portions of the
17  * Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25  * DEALINGS IN THE SOFTWARE.
26  *
27  * Authors:
28  *    Kevin E. Martin <martin@valinux.com>
29  *    Gareth Hughes <gareth@valinux.com>
30  */
31
32 #include "drmP.h"
33 #include "drm.h"
34 #include "radeon_drm.h"
35 #include "radeon_drv.h"
36 #include "r300_reg.h"
37
38 #include "radeon_microcode.h"
39
40 #define RADEON_FIFO_DEBUG       0
41
42 static int radeon_do_cleanup_cp(struct drm_device * dev);
43 static void radeon_do_cp_start(drm_radeon_private_t * dev_priv);
44
45 static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
46 {
47         u32 ret;
48         RADEON_WRITE(R520_MC_IND_INDEX, 0x7f0000 | (addr & 0xff));
49         ret = RADEON_READ(R520_MC_IND_DATA);
50         RADEON_WRITE(R520_MC_IND_INDEX, 0);
51         return ret;
52 }
53
54 static u32 RS480_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
55 {
56         u32 ret;
57         RADEON_WRITE(RS480_NB_MC_INDEX, addr & 0xff);
58         ret = RADEON_READ(RS480_NB_MC_DATA);
59         RADEON_WRITE(RS480_NB_MC_INDEX, 0xff);
60         return ret;
61 }
62
63 static u32 RS690_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
64 {
65         u32 ret;
66         RADEON_WRITE(RS690_MC_INDEX, (addr & RS690_MC_INDEX_MASK));
67         ret = RADEON_READ(RS690_MC_DATA);
68         RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_MASK);
69         return ret;
70 }
71
72 static u32 IGP_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
73 {
74         if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
75             ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
76                 return RS690_READ_MCIND(dev_priv, addr);
77         else
78                 return RS480_READ_MCIND(dev_priv, addr);
79 }
80
81 u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv)
82 {
83
84         if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
85                 return R500_READ_MCIND(dev_priv, RV515_MC_FB_LOCATION);
86         else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
87                  ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
88                 return RS690_READ_MCIND(dev_priv, RS690_MC_FB_LOCATION);
89         else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
90                 return R500_READ_MCIND(dev_priv, R520_MC_FB_LOCATION);
91         else
92                 return RADEON_READ(RADEON_MC_FB_LOCATION);
93 }
94
95 static void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc)
96 {
97         if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
98                 R500_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc);
99         else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
100                  ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
101                 RS690_WRITE_MCIND(RS690_MC_FB_LOCATION, fb_loc);
102         else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
103                 R500_WRITE_MCIND(R520_MC_FB_LOCATION, fb_loc);
104         else
105                 RADEON_WRITE(RADEON_MC_FB_LOCATION, fb_loc);
106 }
107
108 static void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc)
109 {
110         if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
111                 R500_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc);
112         else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
113                  ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
114                 RS690_WRITE_MCIND(RS690_MC_AGP_LOCATION, agp_loc);
115         else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
116                 R500_WRITE_MCIND(R520_MC_AGP_LOCATION, agp_loc);
117         else
118                 RADEON_WRITE(RADEON_MC_AGP_LOCATION, agp_loc);
119 }
120
121 static void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base)
122 {
123         u32 agp_base_hi = upper_32_bits(agp_base);
124         u32 agp_base_lo = agp_base & 0xffffffff;
125
126         if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) {
127                 R500_WRITE_MCIND(RV515_MC_AGP_BASE, agp_base_lo);
128                 R500_WRITE_MCIND(RV515_MC_AGP_BASE_2, agp_base_hi);
129         } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
130                  ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) {
131                 RS690_WRITE_MCIND(RS690_MC_AGP_BASE, agp_base_lo);
132                 RS690_WRITE_MCIND(RS690_MC_AGP_BASE_2, agp_base_hi);
133         } else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) {
134                 R500_WRITE_MCIND(R520_MC_AGP_BASE, agp_base_lo);
135                 R500_WRITE_MCIND(R520_MC_AGP_BASE_2, agp_base_hi);
136         } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480) {
137                 RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo);
138                 RADEON_WRITE(RS480_AGP_BASE_2, 0);
139         } else {
140                 RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo);
141                 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R200)
142                         RADEON_WRITE(RADEON_AGP_BASE_2, agp_base_hi);
143         }
144 }
145
146 static int RADEON_READ_PLL(struct drm_device * dev, int addr)
147 {
148         drm_radeon_private_t *dev_priv = dev->dev_private;
149
150         RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, addr & 0x1f);
151         return RADEON_READ(RADEON_CLOCK_CNTL_DATA);
152 }
153
154 static u32 RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr)
155 {
156         RADEON_WRITE8(RADEON_PCIE_INDEX, addr & 0xff);
157         return RADEON_READ(RADEON_PCIE_DATA);
158 }
159
160 #if RADEON_FIFO_DEBUG
161 static void radeon_status(drm_radeon_private_t * dev_priv)
162 {
163         printk("%s:\n", __func__);
164         printk("RBBM_STATUS = 0x%08x\n",
165                (unsigned int)RADEON_READ(RADEON_RBBM_STATUS));
166         printk("CP_RB_RTPR = 0x%08x\n",
167                (unsigned int)RADEON_READ(RADEON_CP_RB_RPTR));
168         printk("CP_RB_WTPR = 0x%08x\n",
169                (unsigned int)RADEON_READ(RADEON_CP_RB_WPTR));
170         printk("AIC_CNTL = 0x%08x\n",
171                (unsigned int)RADEON_READ(RADEON_AIC_CNTL));
172         printk("AIC_STAT = 0x%08x\n",
173                (unsigned int)RADEON_READ(RADEON_AIC_STAT));
174         printk("AIC_PT_BASE = 0x%08x\n",
175                (unsigned int)RADEON_READ(RADEON_AIC_PT_BASE));
176         printk("TLB_ADDR = 0x%08x\n",
177                (unsigned int)RADEON_READ(RADEON_AIC_TLB_ADDR));
178         printk("TLB_DATA = 0x%08x\n",
179                (unsigned int)RADEON_READ(RADEON_AIC_TLB_DATA));
180 }
181 #endif
182
183 /* ================================================================
184  * Engine, FIFO control
185  */
186
187 static int radeon_do_pixcache_flush(drm_radeon_private_t * dev_priv)
188 {
189         u32 tmp;
190         int i;
191
192         dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
193
194         if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) {
195                 tmp = RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT);
196                 tmp |= RADEON_RB3D_DC_FLUSH_ALL;
197                 RADEON_WRITE(RADEON_RB3D_DSTCACHE_CTLSTAT, tmp);
198
199                 for (i = 0; i < dev_priv->usec_timeout; i++) {
200                         if (!(RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT)
201                               & RADEON_RB3D_DC_BUSY)) {
202                                 return 0;
203                         }
204                         DRM_UDELAY(1);
205                 }
206         } else {
207                 /* don't flush or purge cache here or lockup */
208                 return 0;
209         }
210
211 #if RADEON_FIFO_DEBUG
212         DRM_ERROR("failed!\n");
213         radeon_status(dev_priv);
214 #endif
215         return -EBUSY;
216 }
217
218 static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries)
219 {
220         int i;
221
222         dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
223
224         for (i = 0; i < dev_priv->usec_timeout; i++) {
225                 int slots = (RADEON_READ(RADEON_RBBM_STATUS)
226                              & RADEON_RBBM_FIFOCNT_MASK);
227                 if (slots >= entries)
228                         return 0;
229                 DRM_UDELAY(1);
230         }
231         DRM_DEBUG("wait for fifo failed status : 0x%08X 0x%08X\n",
232                  RADEON_READ(RADEON_RBBM_STATUS),
233                  RADEON_READ(R300_VAP_CNTL_STATUS));
234
235 #if RADEON_FIFO_DEBUG
236         DRM_ERROR("failed!\n");
237         radeon_status(dev_priv);
238 #endif
239         return -EBUSY;
240 }
241
242 static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv)
243 {
244         int i, ret;
245
246         dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
247
248         ret = radeon_do_wait_for_fifo(dev_priv, 64);
249         if (ret)
250                 return ret;
251
252         for (i = 0; i < dev_priv->usec_timeout; i++) {
253                 if (!(RADEON_READ(RADEON_RBBM_STATUS)
254                       & RADEON_RBBM_ACTIVE)) {
255                         radeon_do_pixcache_flush(dev_priv);
256                         return 0;
257                 }
258                 DRM_UDELAY(1);
259         }
260         DRM_DEBUG("wait idle failed status : 0x%08X 0x%08X\n",
261                  RADEON_READ(RADEON_RBBM_STATUS),
262                  RADEON_READ(R300_VAP_CNTL_STATUS));
263
264 #if RADEON_FIFO_DEBUG
265         DRM_ERROR("failed!\n");
266         radeon_status(dev_priv);
267 #endif
268         return -EBUSY;
269 }
270
271 static void radeon_init_pipes(drm_radeon_private_t *dev_priv)
272 {
273         uint32_t gb_tile_config, gb_pipe_sel = 0;
274
275         /* RS4xx/RS6xx/R4xx/R5xx */
276         if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) {
277                 gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT);
278                 dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1;
279         } else {
280                 /* R3xx */
281                 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) ||
282                     ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350)) {
283                         dev_priv->num_gb_pipes = 2;
284                 } else {
285                         /* R3Vxx */
286                         dev_priv->num_gb_pipes = 1;
287                 }
288         }
289         DRM_INFO("Num pipes: %d\n", dev_priv->num_gb_pipes);
290
291         gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16 /*| R300_SUBPIXEL_1_16*/);
292
293         switch (dev_priv->num_gb_pipes) {
294         case 2: gb_tile_config |= R300_PIPE_COUNT_R300; break;
295         case 3: gb_tile_config |= R300_PIPE_COUNT_R420_3P; break;
296         case 4: gb_tile_config |= R300_PIPE_COUNT_R420; break;
297         default:
298         case 1: gb_tile_config |= R300_PIPE_COUNT_RV350; break;
299         }
300
301         if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
302                 RADEON_WRITE_PLL(R500_DYN_SCLK_PWMEM_PIPE, (1 | ((gb_pipe_sel >> 8) & 0xf) << 4));
303                 RADEON_WRITE(R500_SU_REG_DEST, ((1 << dev_priv->num_gb_pipes) - 1));
304         }
305         RADEON_WRITE(R300_GB_TILE_CONFIG, gb_tile_config);
306         radeon_do_wait_for_idle(dev_priv);
307         RADEON_WRITE(R300_DST_PIPE_CONFIG, RADEON_READ(R300_DST_PIPE_CONFIG) | R300_PIPE_AUTO_CONFIG);
308         RADEON_WRITE(R300_RB2D_DSTCACHE_MODE, (RADEON_READ(R300_RB2D_DSTCACHE_MODE) |
309                                                R300_DC_AUTOFLUSH_ENABLE |
310                                                R300_DC_DC_DISABLE_IGNORE_PE));
311
312
313 }
314
315 /* ================================================================
316  * CP control, initialization
317  */
318
319 /* Load the microcode for the CP */
320 static void radeon_cp_load_microcode(drm_radeon_private_t * dev_priv)
321 {
322         int i;
323         DRM_DEBUG("\n");
324
325         radeon_do_wait_for_idle(dev_priv);
326
327         RADEON_WRITE(RADEON_CP_ME_RAM_ADDR, 0);
328         if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R100) ||
329             ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV100) ||
330             ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV200) ||
331             ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS100) ||
332             ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS200)) {
333                 DRM_INFO("Loading R100 Microcode\n");
334                 for (i = 0; i < 256; i++) {
335                         RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
336                                      R100_cp_microcode[i][1]);
337                         RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
338                                      R100_cp_microcode[i][0]);
339                 }
340         } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R200) ||
341                    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV250) ||
342                    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV280) ||
343                    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS300)) {
344                 DRM_INFO("Loading R200 Microcode\n");
345                 for (i = 0; i < 256; i++) {
346                         RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
347                                      R200_cp_microcode[i][1]);
348                         RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
349                                      R200_cp_microcode[i][0]);
350                 }
351         } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) ||
352                    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350) ||
353                    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV350) ||
354                    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) ||
355                    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
356                 DRM_INFO("Loading R300 Microcode\n");
357                 for (i = 0; i < 256; i++) {
358                         RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
359                                      R300_cp_microcode[i][1]);
360                         RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
361                                      R300_cp_microcode[i][0]);
362                 }
363         } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) ||
364                    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV410)) {
365                 DRM_INFO("Loading R400 Microcode\n");
366                 for (i = 0; i < 256; i++) {
367                         RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
368                                      R420_cp_microcode[i][1]);
369                         RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
370                                      R420_cp_microcode[i][0]);
371                 }
372         } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
373                    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) {
374                 DRM_INFO("Loading RS690/RS740 Microcode\n");
375                 for (i = 0; i < 256; i++) {
376                         RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
377                                      RS690_cp_microcode[i][1]);
378                         RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
379                                      RS690_cp_microcode[i][0]);
380                 }
381         } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) ||
382                    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R520) ||
383                    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) ||
384                    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R580) ||
385                    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV560) ||
386                    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV570)) {
387                 DRM_INFO("Loading R500 Microcode\n");
388                 for (i = 0; i < 256; i++) {
389                         RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
390                                      R520_cp_microcode[i][1]);
391                         RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
392                                      R520_cp_microcode[i][0]);
393                 }
394         }
395 }
396
397 /* Flush any pending commands to the CP.  This should only be used just
398  * prior to a wait for idle, as it informs the engine that the command
399  * stream is ending.
400  */
401 static void radeon_do_cp_flush(drm_radeon_private_t * dev_priv)
402 {
403         DRM_DEBUG("\n");
404 #if 0
405         u32 tmp;
406
407         tmp = RADEON_READ(RADEON_CP_RB_WPTR) | (1 << 31);
408         RADEON_WRITE(RADEON_CP_RB_WPTR, tmp);
409 #endif
410 }
411
412 /* Wait for the CP to go idle.
413  */
414 int radeon_do_cp_idle(drm_radeon_private_t * dev_priv)
415 {
416         RING_LOCALS;
417         DRM_DEBUG("\n");
418
419         BEGIN_RING(6);
420
421         RADEON_PURGE_CACHE();
422         RADEON_PURGE_ZCACHE();
423         RADEON_WAIT_UNTIL_IDLE();
424
425         ADVANCE_RING();
426         COMMIT_RING();
427
428         return radeon_do_wait_for_idle(dev_priv);
429 }
430
431 /* Start the Command Processor.
432  */
433 static void radeon_do_cp_start(drm_radeon_private_t * dev_priv)
434 {
435         RING_LOCALS;
436         DRM_DEBUG("\n");
437
438         radeon_do_wait_for_idle(dev_priv);
439
440         RADEON_WRITE(RADEON_CP_CSQ_CNTL, dev_priv->cp_mode);
441
442         dev_priv->cp_running = 1;
443
444         BEGIN_RING(8);
445         /* isync can only be written through cp on r5xx write it here */
446         OUT_RING(CP_PACKET0(RADEON_ISYNC_CNTL, 0));
447         OUT_RING(RADEON_ISYNC_ANY2D_IDLE3D |
448                  RADEON_ISYNC_ANY3D_IDLE2D |
449                  RADEON_ISYNC_WAIT_IDLEGUI |
450                  RADEON_ISYNC_CPSCRATCH_IDLEGUI);
451         RADEON_PURGE_CACHE();
452         RADEON_PURGE_ZCACHE();
453         RADEON_WAIT_UNTIL_IDLE();
454         ADVANCE_RING();
455         COMMIT_RING();
456
457         dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED;
458 }
459
460 /* Reset the Command Processor.  This will not flush any pending
461  * commands, so you must wait for the CP command stream to complete
462  * before calling this routine.
463  */
464 static void radeon_do_cp_reset(drm_radeon_private_t * dev_priv)
465 {
466         u32 cur_read_ptr;
467         DRM_DEBUG("\n");
468
469         cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR);
470         RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr);
471         SET_RING_HEAD(dev_priv, cur_read_ptr);
472         dev_priv->ring.tail = cur_read_ptr;
473 }
474
475 /* Stop the Command Processor.  This will not flush any pending
476  * commands, so you must flush the command stream and wait for the CP
477  * to go idle before calling this routine.
478  */
479 static void radeon_do_cp_stop(drm_radeon_private_t * dev_priv)
480 {
481         DRM_DEBUG("\n");
482
483         RADEON_WRITE(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIDIS_INDDIS);
484
485         dev_priv->cp_running = 0;
486 }
487
488 /* Reset the engine.  This will stop the CP if it is running.
489  */
490 static int radeon_do_engine_reset(struct drm_device * dev)
491 {
492         drm_radeon_private_t *dev_priv = dev->dev_private;
493         u32 clock_cntl_index = 0, mclk_cntl = 0, rbbm_soft_reset;
494         DRM_DEBUG("\n");
495
496         radeon_do_pixcache_flush(dev_priv);
497
498         if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) {
499                 /* may need something similar for newer chips */
500                 clock_cntl_index = RADEON_READ(RADEON_CLOCK_CNTL_INDEX);
501                 mclk_cntl = RADEON_READ_PLL(dev, RADEON_MCLK_CNTL);
502
503                 RADEON_WRITE_PLL(RADEON_MCLK_CNTL, (mclk_cntl |
504                                                     RADEON_FORCEON_MCLKA |
505                                                     RADEON_FORCEON_MCLKB |
506                                                     RADEON_FORCEON_YCLKA |
507                                                     RADEON_FORCEON_YCLKB |
508                                                     RADEON_FORCEON_MC |
509                                                     RADEON_FORCEON_AIC));
510         }
511
512         rbbm_soft_reset = RADEON_READ(RADEON_RBBM_SOFT_RESET);
513
514         RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset |
515                                               RADEON_SOFT_RESET_CP |
516                                               RADEON_SOFT_RESET_HI |
517                                               RADEON_SOFT_RESET_SE |
518                                               RADEON_SOFT_RESET_RE |
519                                               RADEON_SOFT_RESET_PP |
520                                               RADEON_SOFT_RESET_E2 |
521                                               RADEON_SOFT_RESET_RB));
522         RADEON_READ(RADEON_RBBM_SOFT_RESET);
523         RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset &
524                                               ~(RADEON_SOFT_RESET_CP |
525                                                 RADEON_SOFT_RESET_HI |
526                                                 RADEON_SOFT_RESET_SE |
527                                                 RADEON_SOFT_RESET_RE |
528                                                 RADEON_SOFT_RESET_PP |
529                                                 RADEON_SOFT_RESET_E2 |
530                                                 RADEON_SOFT_RESET_RB)));
531         RADEON_READ(RADEON_RBBM_SOFT_RESET);
532
533         if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) {
534                 RADEON_WRITE_PLL(RADEON_MCLK_CNTL, mclk_cntl);
535                 RADEON_WRITE(RADEON_CLOCK_CNTL_INDEX, clock_cntl_index);
536                 RADEON_WRITE(RADEON_RBBM_SOFT_RESET, rbbm_soft_reset);
537         }
538
539         /* setup the raster pipes */
540         if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300)
541             radeon_init_pipes(dev_priv);
542
543         /* Reset the CP ring */
544         radeon_do_cp_reset(dev_priv);
545
546         /* The CP is no longer running after an engine reset */
547         dev_priv->cp_running = 0;
548
549         /* Reset any pending vertex, indirect buffers */
550         radeon_freelist_reset(dev);
551
552         return 0;
553 }
554
555 static void radeon_cp_init_ring_buffer(struct drm_device * dev,
556                                        drm_radeon_private_t * dev_priv)
557 {
558         u32 ring_start, cur_read_ptr;
559         u32 tmp;
560
561         /* Initialize the memory controller. With new memory map, the fb location
562          * is not changed, it should have been properly initialized already. Part
563          * of the problem is that the code below is bogus, assuming the GART is
564          * always appended to the fb which is not necessarily the case
565          */
566         if (!dev_priv->new_memmap)
567                 radeon_write_fb_location(dev_priv,
568                              ((dev_priv->gart_vm_start - 1) & 0xffff0000)
569                              | (dev_priv->fb_location >> 16));
570
571 #if __OS_HAS_AGP
572         if (dev_priv->flags & RADEON_IS_AGP) {
573                 radeon_write_agp_base(dev_priv, dev->agp->base);
574
575                 radeon_write_agp_location(dev_priv,
576                              (((dev_priv->gart_vm_start - 1 +
577                                 dev_priv->gart_size) & 0xffff0000) |
578                               (dev_priv->gart_vm_start >> 16)));
579
580                 ring_start = (dev_priv->cp_ring->offset
581                               - dev->agp->base
582                               + dev_priv->gart_vm_start);
583         } else
584 #endif
585                 ring_start = (dev_priv->cp_ring->offset
586                               - (unsigned long)dev->sg->virtual
587                               + dev_priv->gart_vm_start);
588
589         RADEON_WRITE(RADEON_CP_RB_BASE, ring_start);
590
591         /* Set the write pointer delay */
592         RADEON_WRITE(RADEON_CP_RB_WPTR_DELAY, 0);
593
594         /* Initialize the ring buffer's read and write pointers */
595         cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR);
596         RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr);
597         SET_RING_HEAD(dev_priv, cur_read_ptr);
598         dev_priv->ring.tail = cur_read_ptr;
599
600 #if __OS_HAS_AGP
601         if (dev_priv->flags & RADEON_IS_AGP) {
602                 RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR,
603                              dev_priv->ring_rptr->offset
604                              - dev->agp->base + dev_priv->gart_vm_start);
605         } else
606 #endif
607         {
608                 struct drm_sg_mem *entry = dev->sg;
609                 unsigned long tmp_ofs, page_ofs;
610
611                 tmp_ofs = dev_priv->ring_rptr->offset -
612                                 (unsigned long)dev->sg->virtual;
613                 page_ofs = tmp_ofs >> PAGE_SHIFT;
614
615                 RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, entry->busaddr[page_ofs]);
616                 DRM_DEBUG("ring rptr: offset=0x%08lx handle=0x%08lx\n",
617                           (unsigned long)entry->busaddr[page_ofs],
618                           entry->handle + tmp_ofs);
619         }
620
621         /* Set ring buffer size */
622 #ifdef __BIG_ENDIAN
623         RADEON_WRITE(RADEON_CP_RB_CNTL,
624                      RADEON_BUF_SWAP_32BIT |
625                      (dev_priv->ring.fetch_size_l2ow << 18) |
626                      (dev_priv->ring.rptr_update_l2qw << 8) |
627                      dev_priv->ring.size_l2qw);
628 #else
629         RADEON_WRITE(RADEON_CP_RB_CNTL,
630                      (dev_priv->ring.fetch_size_l2ow << 18) |
631                      (dev_priv->ring.rptr_update_l2qw << 8) |
632                      dev_priv->ring.size_l2qw);
633 #endif
634
635
636         /* Initialize the scratch register pointer.  This will cause
637          * the scratch register values to be written out to memory
638          * whenever they are updated.
639          *
640          * We simply put this behind the ring read pointer, this works
641          * with PCI GART as well as (whatever kind of) AGP GART
642          */
643         RADEON_WRITE(RADEON_SCRATCH_ADDR, RADEON_READ(RADEON_CP_RB_RPTR_ADDR)
644                      + RADEON_SCRATCH_REG_OFFSET);
645
646         dev_priv->scratch = ((__volatile__ u32 *)
647                              dev_priv->ring_rptr->handle +
648                              (RADEON_SCRATCH_REG_OFFSET / sizeof(u32)));
649
650         RADEON_WRITE(RADEON_SCRATCH_UMSK, 0x7);
651
652         /* Turn on bus mastering */
653         tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
654         RADEON_WRITE(RADEON_BUS_CNTL, tmp);
655
656         dev_priv->sarea_priv->last_frame = dev_priv->scratch[0] = 0;
657         RADEON_WRITE(RADEON_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame);
658
659         dev_priv->sarea_priv->last_dispatch = dev_priv->scratch[1] = 0;
660         RADEON_WRITE(RADEON_LAST_DISPATCH_REG,
661                      dev_priv->sarea_priv->last_dispatch);
662
663         dev_priv->sarea_priv->last_clear = dev_priv->scratch[2] = 0;
664         RADEON_WRITE(RADEON_LAST_CLEAR_REG, dev_priv->sarea_priv->last_clear);
665
666         radeon_do_wait_for_idle(dev_priv);
667
668         /* Sync everything up */
669         RADEON_WRITE(RADEON_ISYNC_CNTL,
670                      (RADEON_ISYNC_ANY2D_IDLE3D |
671                       RADEON_ISYNC_ANY3D_IDLE2D |
672                       RADEON_ISYNC_WAIT_IDLEGUI |
673                       RADEON_ISYNC_CPSCRATCH_IDLEGUI));
674
675 }
676
677 static void radeon_test_writeback(drm_radeon_private_t * dev_priv)
678 {
679         u32 tmp;
680
681         /* Start with assuming that writeback doesn't work */
682         dev_priv->writeback_works = 0;
683
684         /* Writeback doesn't seem to work everywhere, test it here and possibly
685          * enable it if it appears to work
686          */
687         DRM_WRITE32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1), 0);
688         RADEON_WRITE(RADEON_SCRATCH_REG1, 0xdeadbeef);
689
690         for (tmp = 0; tmp < dev_priv->usec_timeout; tmp++) {
691                 if (DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1)) ==
692                     0xdeadbeef)
693                         break;
694                 DRM_UDELAY(1);
695         }
696
697         if (tmp < dev_priv->usec_timeout) {
698                 dev_priv->writeback_works = 1;
699                 DRM_INFO("writeback test succeeded in %d usecs\n", tmp);
700         } else {
701                 dev_priv->writeback_works = 0;
702                 DRM_INFO("writeback test failed\n");
703         }
704         if (radeon_no_wb == 1) {
705                 dev_priv->writeback_works = 0;
706                 DRM_INFO("writeback forced off\n");
707         }
708
709         if (!dev_priv->writeback_works) {
710                 /* Disable writeback to avoid unnecessary bus master transfer */
711                 RADEON_WRITE(RADEON_CP_RB_CNTL, RADEON_READ(RADEON_CP_RB_CNTL) |
712                              RADEON_RB_NO_UPDATE);
713                 RADEON_WRITE(RADEON_SCRATCH_UMSK, 0);
714         }
715 }
716
717 /* Enable or disable IGP GART on the chip */
718 static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on)
719 {
720         u32 temp;
721
722         if (on) {
723                 DRM_DEBUG("programming igp gart %08X %08lX %08X\n",
724                           dev_priv->gart_vm_start,
725                           (long)dev_priv->gart_info.bus_addr,
726                           dev_priv->gart_size);
727
728                 temp = IGP_READ_MCIND(dev_priv, RS480_MC_MISC_CNTL);
729                 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
730                     ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
731                         IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, (RS480_GART_INDEX_REG_EN |
732                                                              RS690_BLOCK_GFX_D3_EN));
733                 else
734                         IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN);
735
736                 IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN |
737                                                                RS480_VA_SIZE_32MB));
738
739                 temp = IGP_READ_MCIND(dev_priv, RS480_GART_FEATURE_ID);
740                 IGP_WRITE_MCIND(RS480_GART_FEATURE_ID, (RS480_HANG_EN |
741                                                         RS480_TLB_ENABLE |
742                                                         RS480_GTW_LAC_EN |
743                                                         RS480_1LEVEL_GART));
744
745                 temp = dev_priv->gart_info.bus_addr & 0xfffff000;
746                 temp |= (upper_32_bits(dev_priv->gart_info.bus_addr) & 0xff) << 4;
747                 IGP_WRITE_MCIND(RS480_GART_BASE, temp);
748
749                 temp = IGP_READ_MCIND(dev_priv, RS480_AGP_MODE_CNTL);
750                 IGP_WRITE_MCIND(RS480_AGP_MODE_CNTL, ((1 << RS480_REQ_TYPE_SNOOP_SHIFT) |
751                                                       RS480_REQ_TYPE_SNOOP_DIS));
752
753                 radeon_write_agp_base(dev_priv, dev_priv->gart_vm_start);
754
755                 dev_priv->gart_size = 32*1024*1024;
756                 temp = (((dev_priv->gart_vm_start - 1 + dev_priv->gart_size) &
757                          0xffff0000) | (dev_priv->gart_vm_start >> 16));
758
759                 radeon_write_agp_location(dev_priv, temp);
760
761                 temp = IGP_READ_MCIND(dev_priv, RS480_AGP_ADDRESS_SPACE_SIZE);
762                 IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN |
763                                                                RS480_VA_SIZE_32MB));
764
765                 do {
766                         temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL);
767                         if ((temp & RS480_GART_CACHE_INVALIDATE) == 0)
768                                 break;
769                         DRM_UDELAY(1);
770                 } while (1);
771
772                 IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL,
773                                 RS480_GART_CACHE_INVALIDATE);
774
775                 do {
776                         temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL);
777                         if ((temp & RS480_GART_CACHE_INVALIDATE) == 0)
778                                 break;
779                         DRM_UDELAY(1);
780                 } while (1);
781
782                 IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL, 0);
783         } else {
784                 IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, 0);
785         }
786 }
787
788 static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on)
789 {
790         u32 tmp = RADEON_READ_PCIE(dev_priv, RADEON_PCIE_TX_GART_CNTL);
791         if (on) {
792
793                 DRM_DEBUG("programming pcie %08X %08lX %08X\n",
794                           dev_priv->gart_vm_start,
795                           (long)dev_priv->gart_info.bus_addr,
796                           dev_priv->gart_size);
797                 RADEON_WRITE_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO,
798                                   dev_priv->gart_vm_start);
799                 RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_BASE,
800                                   dev_priv->gart_info.bus_addr);
801                 RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_START_LO,
802                                   dev_priv->gart_vm_start);
803                 RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_END_LO,
804                                   dev_priv->gart_vm_start +
805                                   dev_priv->gart_size - 1);
806
807                 radeon_write_agp_location(dev_priv, 0xffffffc0); /* ?? */
808
809                 RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL,
810                                   RADEON_PCIE_TX_GART_EN);
811         } else {
812                 RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL,
813                                   tmp & ~RADEON_PCIE_TX_GART_EN);
814         }
815 }
816
817 /* Enable or disable PCI GART on the chip */
818 static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
819 {
820         u32 tmp;
821
822         if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
823             ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740) ||
824             (dev_priv->flags & RADEON_IS_IGPGART)) {
825                 radeon_set_igpgart(dev_priv, on);
826                 return;
827         }
828
829         if (dev_priv->flags & RADEON_IS_PCIE) {
830                 radeon_set_pciegart(dev_priv, on);
831                 return;
832         }
833
834         tmp = RADEON_READ(RADEON_AIC_CNTL);
835
836         if (on) {
837                 RADEON_WRITE(RADEON_AIC_CNTL,
838                              tmp | RADEON_PCIGART_TRANSLATE_EN);
839
840                 /* set PCI GART page-table base address
841                  */
842                 RADEON_WRITE(RADEON_AIC_PT_BASE, dev_priv->gart_info.bus_addr);
843
844                 /* set address range for PCI address translate
845                  */
846                 RADEON_WRITE(RADEON_AIC_LO_ADDR, dev_priv->gart_vm_start);
847                 RADEON_WRITE(RADEON_AIC_HI_ADDR, dev_priv->gart_vm_start
848                              + dev_priv->gart_size - 1);
849
850                 /* Turn off AGP aperture -- is this required for PCI GART?
851                  */
852                 radeon_write_agp_location(dev_priv, 0xffffffc0);
853                 RADEON_WRITE(RADEON_AGP_COMMAND, 0);    /* clear AGP_COMMAND */
854         } else {
855                 RADEON_WRITE(RADEON_AIC_CNTL,
856                              tmp & ~RADEON_PCIGART_TRANSLATE_EN);
857         }
858 }
859
860 static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
861 {
862         drm_radeon_private_t *dev_priv = dev->dev_private;
863
864         DRM_DEBUG("\n");
865
866         /* if we require new memory map but we don't have it fail */
867         if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) {
868                 DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n");
869                 radeon_do_cleanup_cp(dev);
870                 return -EINVAL;
871         }
872
873         if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP)) {
874                 DRM_DEBUG("Forcing AGP card to PCI mode\n");
875                 dev_priv->flags &= ~RADEON_IS_AGP;
876         } else if (!(dev_priv->flags & (RADEON_IS_AGP | RADEON_IS_PCI | RADEON_IS_PCIE))
877                    && !init->is_pci) {
878                 DRM_DEBUG("Restoring AGP flag\n");
879                 dev_priv->flags |= RADEON_IS_AGP;
880         }
881
882         if ((!(dev_priv->flags & RADEON_IS_AGP)) && !dev->sg) {
883                 DRM_ERROR("PCI GART memory not allocated!\n");
884                 radeon_do_cleanup_cp(dev);
885                 return -EINVAL;
886         }
887
888         dev_priv->usec_timeout = init->usec_timeout;
889         if (dev_priv->usec_timeout < 1 ||
890             dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) {
891                 DRM_DEBUG("TIMEOUT problem!\n");
892                 radeon_do_cleanup_cp(dev);
893                 return -EINVAL;
894         }
895
896         /* Enable vblank on CRTC1 for older X servers
897          */
898         dev_priv->vblank_crtc = DRM_RADEON_VBLANK_CRTC1;
899
900         switch(init->func) {
901         case RADEON_INIT_R200_CP:
902                 dev_priv->microcode_version = UCODE_R200;
903                 break;
904         case RADEON_INIT_R300_CP:
905                 dev_priv->microcode_version = UCODE_R300;
906                 break;
907         default:
908                 dev_priv->microcode_version = UCODE_R100;
909         }
910
911         dev_priv->do_boxes = 0;
912         dev_priv->cp_mode = init->cp_mode;
913
914         /* We don't support anything other than bus-mastering ring mode,
915          * but the ring can be in either AGP or PCI space for the ring
916          * read pointer.
917          */
918         if ((init->cp_mode != RADEON_CSQ_PRIBM_INDDIS) &&
919             (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) {
920                 DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode);
921                 radeon_do_cleanup_cp(dev);
922                 return -EINVAL;
923         }
924
925         switch (init->fb_bpp) {
926         case 16:
927                 dev_priv->color_fmt = RADEON_COLOR_FORMAT_RGB565;
928                 break;
929         case 32:
930         default:
931                 dev_priv->color_fmt = RADEON_COLOR_FORMAT_ARGB8888;
932                 break;
933         }
934         dev_priv->front_offset = init->front_offset;
935         dev_priv->front_pitch = init->front_pitch;
936         dev_priv->back_offset = init->back_offset;
937         dev_priv->back_pitch = init->back_pitch;
938
939         switch (init->depth_bpp) {
940         case 16:
941                 dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_16BIT_INT_Z;
942                 break;
943         case 32:
944         default:
945                 dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_24BIT_INT_Z;
946                 break;
947         }
948         dev_priv->depth_offset = init->depth_offset;
949         dev_priv->depth_pitch = init->depth_pitch;
950
951         /* Hardware state for depth clears.  Remove this if/when we no
952          * longer clear the depth buffer with a 3D rectangle.  Hard-code
953          * all values to prevent unwanted 3D state from slipping through
954          * and screwing with the clear operation.
955          */
956         dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE |
957                                            (dev_priv->color_fmt << 10) |
958                                            (dev_priv->microcode_version ==
959                                             UCODE_R100 ? RADEON_ZBLOCK16 : 0));
960
961         dev_priv->depth_clear.rb3d_zstencilcntl =
962             (dev_priv->depth_fmt |
963              RADEON_Z_TEST_ALWAYS |
964              RADEON_STENCIL_TEST_ALWAYS |
965              RADEON_STENCIL_S_FAIL_REPLACE |
966              RADEON_STENCIL_ZPASS_REPLACE |
967              RADEON_STENCIL_ZFAIL_REPLACE | RADEON_Z_WRITE_ENABLE);
968
969         dev_priv->depth_clear.se_cntl = (RADEON_FFACE_CULL_CW |
970                                          RADEON_BFACE_SOLID |
971                                          RADEON_FFACE_SOLID |
972                                          RADEON_FLAT_SHADE_VTX_LAST |
973                                          RADEON_DIFFUSE_SHADE_FLAT |
974                                          RADEON_ALPHA_SHADE_FLAT |
975                                          RADEON_SPECULAR_SHADE_FLAT |
976                                          RADEON_FOG_SHADE_FLAT |
977                                          RADEON_VTX_PIX_CENTER_OGL |
978                                          RADEON_ROUND_MODE_TRUNC |
979                                          RADEON_ROUND_PREC_8TH_PIX);
980
981
982         dev_priv->ring_offset = init->ring_offset;
983         dev_priv->ring_rptr_offset = init->ring_rptr_offset;
984         dev_priv->buffers_offset = init->buffers_offset;
985         dev_priv->gart_textures_offset = init->gart_textures_offset;
986
987         dev_priv->sarea = drm_getsarea(dev);
988         if (!dev_priv->sarea) {
989                 DRM_ERROR("could not find sarea!\n");
990                 radeon_do_cleanup_cp(dev);
991                 return -EINVAL;
992         }
993
994         dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset);
995         if (!dev_priv->cp_ring) {
996                 DRM_ERROR("could not find cp ring region!\n");
997                 radeon_do_cleanup_cp(dev);
998                 return -EINVAL;
999         }
1000         dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
1001         if (!dev_priv->ring_rptr) {
1002                 DRM_ERROR("could not find ring read pointer!\n");
1003                 radeon_do_cleanup_cp(dev);
1004                 return -EINVAL;
1005         }
1006         dev->agp_buffer_token = init->buffers_offset;
1007         dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
1008         if (!dev->agp_buffer_map) {
1009                 DRM_ERROR("could not find dma buffer region!\n");
1010                 radeon_do_cleanup_cp(dev);
1011                 return -EINVAL;
1012         }
1013
1014         if (init->gart_textures_offset) {
1015                 dev_priv->gart_textures =
1016                     drm_core_findmap(dev, init->gart_textures_offset);
1017                 if (!dev_priv->gart_textures) {
1018                         DRM_ERROR("could not find GART texture region!\n");
1019                         radeon_do_cleanup_cp(dev);
1020                         return -EINVAL;
1021                 }
1022         }
1023
1024         dev_priv->sarea_priv =
1025             (drm_radeon_sarea_t *) ((u8 *) dev_priv->sarea->handle +
1026                                     init->sarea_priv_offset);
1027
1028 #if __OS_HAS_AGP
1029         if (dev_priv->flags & RADEON_IS_AGP) {
1030                 drm_core_ioremap(dev_priv->cp_ring, dev);
1031                 drm_core_ioremap(dev_priv->ring_rptr, dev);
1032                 drm_core_ioremap(dev->agp_buffer_map, dev);
1033                 if (!dev_priv->cp_ring->handle ||
1034                     !dev_priv->ring_rptr->handle ||
1035                     !dev->agp_buffer_map->handle) {
1036                         DRM_ERROR("could not find ioremap agp regions!\n");
1037                         radeon_do_cleanup_cp(dev);
1038                         return -EINVAL;
1039                 }
1040         } else
1041 #endif
1042         {
1043                 dev_priv->cp_ring->handle = (void *)dev_priv->cp_ring->offset;
1044                 dev_priv->ring_rptr->handle =
1045                     (void *)dev_priv->ring_rptr->offset;
1046                 dev->agp_buffer_map->handle =
1047                     (void *)dev->agp_buffer_map->offset;
1048
1049                 DRM_DEBUG("dev_priv->cp_ring->handle %p\n",
1050                           dev_priv->cp_ring->handle);
1051                 DRM_DEBUG("dev_priv->ring_rptr->handle %p\n",
1052                           dev_priv->ring_rptr->handle);
1053                 DRM_DEBUG("dev->agp_buffer_map->handle %p\n",
1054                           dev->agp_buffer_map->handle);
1055         }
1056
1057         dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 16;
1058         dev_priv->fb_size =
1059                 ((radeon_read_fb_location(dev_priv) & 0xffff0000u) + 0x10000)
1060                 - dev_priv->fb_location;
1061
1062         dev_priv->front_pitch_offset = (((dev_priv->front_pitch / 64) << 22) |
1063                                         ((dev_priv->front_offset
1064                                           + dev_priv->fb_location) >> 10));
1065
1066         dev_priv->back_pitch_offset = (((dev_priv->back_pitch / 64) << 22) |
1067                                        ((dev_priv->back_offset
1068                                          + dev_priv->fb_location) >> 10));
1069
1070         dev_priv->depth_pitch_offset = (((dev_priv->depth_pitch / 64) << 22) |
1071                                         ((dev_priv->depth_offset
1072                                           + dev_priv->fb_location) >> 10));
1073
1074         dev_priv->gart_size = init->gart_size;
1075
1076         /* New let's set the memory map ... */
1077         if (dev_priv->new_memmap) {
1078                 u32 base = 0;
1079
1080                 DRM_INFO("Setting GART location based on new memory map\n");
1081
1082                 /* If using AGP, try to locate the AGP aperture at the same
1083                  * location in the card and on the bus, though we have to
1084                  * align it down.
1085                  */
1086 #if __OS_HAS_AGP
1087                 if (dev_priv->flags & RADEON_IS_AGP) {
1088                         base = dev->agp->base;
1089                         /* Check if valid */
1090                         if ((base + dev_priv->gart_size - 1) >= dev_priv->fb_location &&
1091                             base < (dev_priv->fb_location + dev_priv->fb_size - 1)) {
1092                                 DRM_INFO("Can't use AGP base @0x%08lx, won't fit\n",
1093                                          dev->agp->base);
1094                                 base = 0;
1095                         }
1096                 }
1097 #endif
1098                 /* If not or if AGP is at 0 (Macs), try to put it elsewhere */
1099                 if (base == 0) {
1100                         base = dev_priv->fb_location + dev_priv->fb_size;
1101                         if (base < dev_priv->fb_location ||
1102                             ((base + dev_priv->gart_size) & 0xfffffffful) < base)
1103                                 base = dev_priv->fb_location
1104                                         - dev_priv->gart_size;
1105                 }
1106                 dev_priv->gart_vm_start = base & 0xffc00000u;
1107                 if (dev_priv->gart_vm_start != base)
1108                         DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n",
1109                                  base, dev_priv->gart_vm_start);
1110         } else {
1111                 DRM_INFO("Setting GART location based on old memory map\n");
1112                 dev_priv->gart_vm_start = dev_priv->fb_location +
1113                         RADEON_READ(RADEON_CONFIG_APER_SIZE);
1114         }
1115
1116 #if __OS_HAS_AGP
1117         if (dev_priv->flags & RADEON_IS_AGP)
1118                 dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
1119                                                  - dev->agp->base
1120                                                  + dev_priv->gart_vm_start);
1121         else
1122 #endif
1123                 dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
1124                                         - (unsigned long)dev->sg->virtual
1125                                         + dev_priv->gart_vm_start);
1126
1127         DRM_DEBUG("dev_priv->gart_size %d\n", dev_priv->gart_size);
1128         DRM_DEBUG("dev_priv->gart_vm_start 0x%x\n", dev_priv->gart_vm_start);
1129         DRM_DEBUG("dev_priv->gart_buffers_offset 0x%lx\n",
1130                   dev_priv->gart_buffers_offset);
1131
1132         dev_priv->ring.start = (u32 *) dev_priv->cp_ring->handle;
1133         dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
1134                               + init->ring_size / sizeof(u32));
1135         dev_priv->ring.size = init->ring_size;
1136         dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
1137
1138         dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
1139         dev_priv->ring.rptr_update_l2qw = drm_order( /* init->rptr_update */ 4096 / 8);
1140
1141         dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
1142         dev_priv->ring.fetch_size_l2ow = drm_order( /* init->fetch_size */ 32 / 16);
1143         dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
1144
1145         dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
1146
1147 #if __OS_HAS_AGP
1148         if (dev_priv->flags & RADEON_IS_AGP) {
1149                 /* Turn off PCI GART */
1150                 radeon_set_pcigart(dev_priv, 0);
1151         } else
1152 #endif
1153         {
1154                 dev_priv->gart_info.table_mask = DMA_BIT_MASK(32);
1155                 /* if we have an offset set from userspace */
1156                 if (dev_priv->pcigart_offset_set) {
1157                         dev_priv->gart_info.bus_addr =
1158                             dev_priv->pcigart_offset + dev_priv->fb_location;
1159                         dev_priv->gart_info.mapping.offset =
1160                             dev_priv->pcigart_offset + dev_priv->fb_aper_offset;
1161                         dev_priv->gart_info.mapping.size =
1162                             dev_priv->gart_info.table_size;
1163
1164                         drm_core_ioremap_wc(&dev_priv->gart_info.mapping, dev);
1165                         dev_priv->gart_info.addr =
1166                             dev_priv->gart_info.mapping.handle;
1167
1168                         if (dev_priv->flags & RADEON_IS_PCIE)
1169                                 dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCIE;
1170                         else
1171                                 dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
1172                         dev_priv->gart_info.gart_table_location =
1173                             DRM_ATI_GART_FB;
1174
1175                         DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n",
1176                                   dev_priv->gart_info.addr,
1177                                   dev_priv->pcigart_offset);
1178                 } else {
1179                         if (dev_priv->flags & RADEON_IS_IGPGART)
1180                                 dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_IGP;
1181                         else
1182                                 dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
1183                         dev_priv->gart_info.gart_table_location =
1184                             DRM_ATI_GART_MAIN;
1185                         dev_priv->gart_info.addr = NULL;
1186                         dev_priv->gart_info.bus_addr = 0;
1187                         if (dev_priv->flags & RADEON_IS_PCIE) {
1188                                 DRM_ERROR
1189                                     ("Cannot use PCI Express without GART in FB memory\n");
1190                                 radeon_do_cleanup_cp(dev);
1191                                 return -EINVAL;
1192                         }
1193                 }
1194
1195                 if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) {
1196                         DRM_ERROR("failed to init PCI GART!\n");
1197                         radeon_do_cleanup_cp(dev);
1198                         return -ENOMEM;
1199                 }
1200
1201                 /* Turn on PCI GART */
1202                 radeon_set_pcigart(dev_priv, 1);
1203         }
1204
1205         radeon_cp_load_microcode(dev_priv);
1206         radeon_cp_init_ring_buffer(dev, dev_priv);
1207
1208         dev_priv->last_buf = 0;
1209
1210         radeon_do_engine_reset(dev);
1211         radeon_test_writeback(dev_priv);
1212
1213         return 0;
1214 }
1215
1216 static int radeon_do_cleanup_cp(struct drm_device * dev)
1217 {
1218         drm_radeon_private_t *dev_priv = dev->dev_private;
1219         DRM_DEBUG("\n");
1220
1221         /* Make sure interrupts are disabled here because the uninstall ioctl
1222          * may not have been called from userspace and after dev_private
1223          * is freed, it's too late.
1224          */
1225         if (dev->irq_enabled)
1226                 drm_irq_uninstall(dev);
1227
1228 #if __OS_HAS_AGP
1229         if (dev_priv->flags & RADEON_IS_AGP) {
1230                 if (dev_priv->cp_ring != NULL) {
1231                         drm_core_ioremapfree(dev_priv->cp_ring, dev);
1232                         dev_priv->cp_ring = NULL;
1233                 }
1234                 if (dev_priv->ring_rptr != NULL) {
1235                         drm_core_ioremapfree(dev_priv->ring_rptr, dev);
1236                         dev_priv->ring_rptr = NULL;
1237                 }
1238                 if (dev->agp_buffer_map != NULL) {
1239                         drm_core_ioremapfree(dev->agp_buffer_map, dev);
1240                         dev->agp_buffer_map = NULL;
1241                 }
1242         } else
1243 #endif
1244         {
1245
1246                 if (dev_priv->gart_info.bus_addr) {
1247                         /* Turn off PCI GART */
1248                         radeon_set_pcigart(dev_priv, 0);
1249                         if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info))
1250                                 DRM_ERROR("failed to cleanup PCI GART!\n");
1251                 }
1252
1253                 if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB)
1254                 {
1255                         drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev);
1256                         dev_priv->gart_info.addr = 0;
1257                 }
1258         }
1259         /* only clear to the start of flags */
1260         memset(dev_priv, 0, offsetof(drm_radeon_private_t, flags));
1261
1262         return 0;
1263 }
1264
1265 /* This code will reinit the Radeon CP hardware after a resume from disc.
1266  * AFAIK, it would be very difficult to pickle the state at suspend time, so
1267  * here we make sure that all Radeon hardware initialisation is re-done without
1268  * affecting running applications.
1269  *
1270  * Charl P. Botha <http://cpbotha.net>
1271  */
1272 static int radeon_do_resume_cp(struct drm_device * dev)
1273 {
1274         drm_radeon_private_t *dev_priv = dev->dev_private;
1275
1276         if (!dev_priv) {
1277                 DRM_ERROR("Called with no initialization\n");
1278                 return -EINVAL;
1279         }
1280
1281         DRM_DEBUG("Starting radeon_do_resume_cp()\n");
1282
1283 #if __OS_HAS_AGP
1284         if (dev_priv->flags & RADEON_IS_AGP) {
1285                 /* Turn off PCI GART */
1286                 radeon_set_pcigart(dev_priv, 0);
1287         } else
1288 #endif
1289         {
1290                 /* Turn on PCI GART */
1291                 radeon_set_pcigart(dev_priv, 1);
1292         }
1293
1294         radeon_cp_load_microcode(dev_priv);
1295         radeon_cp_init_ring_buffer(dev, dev_priv);
1296
1297         radeon_do_engine_reset(dev);
1298         radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
1299
1300         DRM_DEBUG("radeon_do_resume_cp() complete\n");
1301
1302         return 0;
1303 }
1304
1305 int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
1306 {
1307         drm_radeon_init_t *init = data;
1308
1309         LOCK_TEST_WITH_RETURN(dev, file_priv);
1310
1311         if (init->func == RADEON_INIT_R300_CP)
1312                 r300_init_reg_flags(dev);
1313
1314         switch (init->func) {
1315         case RADEON_INIT_CP:
1316         case RADEON_INIT_R200_CP:
1317         case RADEON_INIT_R300_CP:
1318                 return radeon_do_init_cp(dev, init);
1319         case RADEON_CLEANUP_CP:
1320                 return radeon_do_cleanup_cp(dev);
1321         }
1322
1323         return -EINVAL;
1324 }
1325
1326 int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv)
1327 {
1328         drm_radeon_private_t *dev_priv = dev->dev_private;
1329         DRM_DEBUG("\n");
1330
1331         LOCK_TEST_WITH_RETURN(dev, file_priv);
1332
1333         if (dev_priv->cp_running) {
1334                 DRM_DEBUG("while CP running\n");
1335                 return 0;
1336         }
1337         if (dev_priv->cp_mode == RADEON_CSQ_PRIDIS_INDDIS) {
1338                 DRM_DEBUG("called with bogus CP mode (%d)\n",
1339                           dev_priv->cp_mode);
1340                 return 0;
1341         }
1342
1343         radeon_do_cp_start(dev_priv);
1344
1345         return 0;
1346 }
1347
1348 /* Stop the CP.  The engine must have been idled before calling this
1349  * routine.
1350  */
1351 int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv)
1352 {
1353         drm_radeon_private_t *dev_priv = dev->dev_private;
1354         drm_radeon_cp_stop_t *stop = data;
1355         int ret;
1356         DRM_DEBUG("\n");
1357
1358         LOCK_TEST_WITH_RETURN(dev, file_priv);
1359
1360         if (!dev_priv->cp_running)
1361                 return 0;
1362
1363         /* Flush any pending CP commands.  This ensures any outstanding
1364          * commands are exectuted by the engine before we turn it off.
1365          */
1366         if (stop->flush) {
1367                 radeon_do_cp_flush(dev_priv);
1368         }
1369
1370         /* If we fail to make the engine go idle, we return an error
1371          * code so that the DRM ioctl wrapper can try again.
1372          */
1373         if (stop->idle) {
1374                 ret = radeon_do_cp_idle(dev_priv);
1375                 if (ret)
1376                         return ret;
1377         }
1378
1379         /* Finally, we can turn off the CP.  If the engine isn't idle,
1380          * we will get some dropped triangles as they won't be fully
1381          * rendered before the CP is shut down.
1382          */
1383         radeon_do_cp_stop(dev_priv);
1384
1385         /* Reset the engine */
1386         radeon_do_engine_reset(dev);
1387
1388         return 0;
1389 }
1390
1391 void radeon_do_release(struct drm_device * dev)
1392 {
1393         drm_radeon_private_t *dev_priv = dev->dev_private;
1394         int i, ret;
1395
1396         if (dev_priv) {
1397                 if (dev_priv->cp_running) {
1398                         /* Stop the cp */
1399                         while ((ret = radeon_do_cp_idle(dev_priv)) != 0) {
1400                                 DRM_DEBUG("radeon_do_cp_idle %d\n", ret);
1401 #ifdef __linux__
1402                                 schedule();
1403 #else
1404                                 tsleep(&ret, PZERO, "rdnrel", 1);
1405 #endif
1406                         }
1407                         radeon_do_cp_stop(dev_priv);
1408                         radeon_do_engine_reset(dev);
1409                 }
1410
1411                 /* Disable *all* interrupts */
1412                 if (dev_priv->mmio)     /* remove this after permanent addmaps */
1413                         RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
1414
1415                 if (dev_priv->mmio) {   /* remove all surfaces */
1416                         for (i = 0; i < RADEON_MAX_SURFACES; i++) {
1417                                 RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * i, 0);
1418                                 RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND +
1419                                              16 * i, 0);
1420                                 RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND +
1421                                              16 * i, 0);
1422                         }
1423                 }
1424
1425                 /* Free memory heap structures */
1426                 radeon_mem_takedown(&(dev_priv->gart_heap));
1427                 radeon_mem_takedown(&(dev_priv->fb_heap));
1428
1429                 /* deallocate kernel resources */
1430                 radeon_do_cleanup_cp(dev);
1431         }
1432 }
1433
1434 /* Just reset the CP ring.  Called as part of an X Server engine reset.
1435  */
1436 int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
1437 {
1438         drm_radeon_private_t *dev_priv = dev->dev_private;
1439         DRM_DEBUG("\n");
1440
1441         LOCK_TEST_WITH_RETURN(dev, file_priv);
1442
1443         if (!dev_priv) {
1444                 DRM_DEBUG("called before init done\n");
1445                 return -EINVAL;
1446         }
1447
1448         radeon_do_cp_reset(dev_priv);
1449
1450         /* The CP is no longer running after an engine reset */
1451         dev_priv->cp_running = 0;
1452
1453         return 0;
1454 }
1455
1456 int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv)
1457 {
1458         drm_radeon_private_t *dev_priv = dev->dev_private;
1459         DRM_DEBUG("\n");
1460
1461         LOCK_TEST_WITH_RETURN(dev, file_priv);
1462
1463         return radeon_do_cp_idle(dev_priv);
1464 }
1465
1466 /* Added by Charl P. Botha to call radeon_do_resume_cp().
1467  */
1468 int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv)
1469 {
1470
1471         return radeon_do_resume_cp(dev);
1472 }
1473
1474 int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
1475 {
1476         DRM_DEBUG("\n");
1477
1478         LOCK_TEST_WITH_RETURN(dev, file_priv);
1479
1480         return radeon_do_engine_reset(dev);
1481 }
1482
1483 /* ================================================================
1484  * Fullscreen mode
1485  */
1486
1487 /* KW: Deprecated to say the least:
1488  */
1489 int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv)
1490 {
1491         return 0;
1492 }
1493
1494 /* ================================================================
1495  * Freelist management
1496  */
1497
1498 /* Original comment: FIXME: ROTATE_BUFS is a hack to cycle through
1499  *   bufs until freelist code is used.  Note this hides a problem with
1500  *   the scratch register * (used to keep track of last buffer
1501  *   completed) being written to before * the last buffer has actually
1502  *   completed rendering.
1503  *
1504  * KW:  It's also a good way to find free buffers quickly.
1505  *
1506  * KW: Ideally this loop wouldn't exist, and freelist_get wouldn't
1507  * sleep.  However, bugs in older versions of radeon_accel.c mean that
1508  * we essentially have to do this, else old clients will break.
1509  *
1510  * However, it does leave open a potential deadlock where all the
1511  * buffers are held by other clients, which can't release them because
1512  * they can't get the lock.
1513  */
1514
1515 struct drm_buf *radeon_freelist_get(struct drm_device * dev)
1516 {
1517         struct drm_device_dma *dma = dev->dma;
1518         drm_radeon_private_t *dev_priv = dev->dev_private;
1519         drm_radeon_buf_priv_t *buf_priv;
1520         struct drm_buf *buf;
1521         int i, t;
1522         int start;
1523
1524         if (++dev_priv->last_buf >= dma->buf_count)
1525                 dev_priv->last_buf = 0;
1526
1527         start = dev_priv->last_buf;
1528
1529         for (t = 0; t < dev_priv->usec_timeout; t++) {
1530                 u32 done_age = GET_SCRATCH(1);
1531                 DRM_DEBUG("done_age = %d\n", done_age);
1532                 for (i = start; i < dma->buf_count; i++) {
1533                         buf = dma->buflist[i];
1534                         buf_priv = buf->dev_private;
1535                         if (buf->file_priv == NULL || (buf->pending &&
1536                                                        buf_priv->age <=
1537                                                        done_age)) {
1538                                 dev_priv->stats.requested_bufs++;
1539                                 buf->pending = 0;
1540                                 return buf;
1541                         }
1542                         start = 0;
1543                 }
1544
1545                 if (t) {
1546                         DRM_UDELAY(1);
1547                         dev_priv->stats.freelist_loops++;
1548                 }
1549         }
1550
1551         DRM_DEBUG("returning NULL!\n");
1552         return NULL;
1553 }
1554
1555 #if 0
1556 struct drm_buf *radeon_freelist_get(struct drm_device * dev)
1557 {
1558         struct drm_device_dma *dma = dev->dma;
1559         drm_radeon_private_t *dev_priv = dev->dev_private;
1560         drm_radeon_buf_priv_t *buf_priv;
1561         struct drm_buf *buf;
1562         int i, t;
1563         int start;
1564         u32 done_age = DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1));
1565
1566         if (++dev_priv->last_buf >= dma->buf_count)
1567                 dev_priv->last_buf = 0;
1568
1569         start = dev_priv->last_buf;
1570         dev_priv->stats.freelist_loops++;
1571
1572         for (t = 0; t < 2; t++) {
1573                 for (i = start; i < dma->buf_count; i++) {
1574                         buf = dma->buflist[i];
1575                         buf_priv = buf->dev_private;
1576                         if (buf->file_priv == 0 || (buf->pending &&
1577                                                     buf_priv->age <=
1578                                                     done_age)) {
1579                                 dev_priv->stats.requested_bufs++;
1580                                 buf->pending = 0;
1581                                 return buf;
1582                         }
1583                 }
1584                 start = 0;
1585         }
1586
1587         return NULL;
1588 }
1589 #endif
1590
1591 void radeon_freelist_reset(struct drm_device * dev)
1592 {
1593         struct drm_device_dma *dma = dev->dma;
1594         drm_radeon_private_t *dev_priv = dev->dev_private;
1595         int i;
1596
1597         dev_priv->last_buf = 0;
1598         for (i = 0; i < dma->buf_count; i++) {
1599                 struct drm_buf *buf = dma->buflist[i];
1600                 drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
1601                 buf_priv->age = 0;
1602         }
1603 }
1604
1605 /* ================================================================
1606  * CP command submission
1607  */
1608
1609 int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n)
1610 {
1611         drm_radeon_ring_buffer_t *ring = &dev_priv->ring;
1612         int i;
1613         u32 last_head = GET_RING_HEAD(dev_priv);
1614
1615         for (i = 0; i < dev_priv->usec_timeout; i++) {
1616                 u32 head = GET_RING_HEAD(dev_priv);
1617
1618                 ring->space = (head - ring->tail) * sizeof(u32);
1619                 if (ring->space <= 0)
1620                         ring->space += ring->size;
1621                 if (ring->space > n)
1622                         return 0;
1623
1624                 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
1625
1626                 if (head != last_head)
1627                         i = 0;
1628                 last_head = head;
1629
1630                 DRM_UDELAY(1);
1631         }
1632
1633         /* FIXME: This return value is ignored in the BEGIN_RING macro! */
1634 #if RADEON_FIFO_DEBUG
1635         radeon_status(dev_priv);
1636         DRM_ERROR("failed!\n");
1637 #endif
1638         return -EBUSY;
1639 }
1640
1641 static int radeon_cp_get_buffers(struct drm_device *dev,
1642                                  struct drm_file *file_priv,
1643                                  struct drm_dma * d)
1644 {
1645         int i;
1646         struct drm_buf *buf;
1647
1648         for (i = d->granted_count; i < d->request_count; i++) {
1649                 buf = radeon_freelist_get(dev);
1650                 if (!buf)
1651                         return -EBUSY;  /* NOTE: broken client */
1652
1653                 buf->file_priv = file_priv;
1654
1655                 if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
1656                                      sizeof(buf->idx)))
1657                         return -EFAULT;
1658                 if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
1659                                      sizeof(buf->total)))
1660                         return -EFAULT;
1661
1662                 d->granted_count++;
1663         }
1664         return 0;
1665 }
1666
1667 int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
1668 {
1669         struct drm_device_dma *dma = dev->dma;
1670         int ret = 0;
1671         struct drm_dma *d = data;
1672
1673         LOCK_TEST_WITH_RETURN(dev, file_priv);
1674
1675         /* Please don't send us buffers.
1676          */
1677         if (d->send_count != 0) {
1678                 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
1679                           DRM_CURRENTPID, d->send_count);
1680                 return -EINVAL;
1681         }
1682
1683         /* We'll send you buffers.
1684          */
1685         if (d->request_count < 0 || d->request_count > dma->buf_count) {
1686                 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
1687                           DRM_CURRENTPID, d->request_count, dma->buf_count);
1688                 return -EINVAL;
1689         }
1690
1691         d->granted_count = 0;
1692
1693         if (d->request_count) {
1694                 ret = radeon_cp_get_buffers(dev, file_priv, d);
1695         }
1696
1697         return ret;
1698 }
1699
1700 int radeon_driver_load(struct drm_device *dev, unsigned long flags)
1701 {
1702         drm_radeon_private_t *dev_priv;
1703         int ret = 0;
1704
1705         dev_priv = drm_alloc(sizeof(drm_radeon_private_t), DRM_MEM_DRIVER);
1706         if (dev_priv == NULL)
1707                 return -ENOMEM;
1708
1709         memset(dev_priv, 0, sizeof(drm_radeon_private_t));
1710         dev->dev_private = (void *)dev_priv;
1711         dev_priv->flags = flags;
1712
1713         switch (flags & RADEON_FAMILY_MASK) {
1714         case CHIP_R100:
1715         case CHIP_RV200:
1716         case CHIP_R200:
1717         case CHIP_R300:
1718         case CHIP_R350:
1719         case CHIP_R420:
1720         case CHIP_RV410:
1721         case CHIP_RV515:
1722         case CHIP_R520:
1723         case CHIP_RV570:
1724         case CHIP_R580:
1725                 dev_priv->flags |= RADEON_HAS_HIERZ;
1726                 break;
1727         default:
1728                 /* all other chips have no hierarchical z buffer */
1729                 break;
1730         }
1731
1732         if (drm_device_is_agp(dev))
1733                 dev_priv->flags |= RADEON_IS_AGP;
1734         else if (drm_device_is_pcie(dev))
1735                 dev_priv->flags |= RADEON_IS_PCIE;
1736         else
1737                 dev_priv->flags |= RADEON_IS_PCI;
1738
1739         DRM_DEBUG("%s card detected\n",
1740                   ((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI"))));
1741         return ret;
1742 }
1743
1744 /* Create mappings for registers and framebuffer so userland doesn't necessarily
1745  * have to find them.
1746  */
1747 int radeon_driver_firstopen(struct drm_device *dev)
1748 {
1749         int ret;
1750         drm_local_map_t *map;
1751         drm_radeon_private_t *dev_priv = dev->dev_private;
1752
1753         dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
1754
1755         ret = drm_addmap(dev, drm_get_resource_start(dev, 2),
1756                          drm_get_resource_len(dev, 2), _DRM_REGISTERS,
1757                          _DRM_READ_ONLY, &dev_priv->mmio);
1758         if (ret != 0)
1759                 return ret;
1760
1761         dev_priv->fb_aper_offset = drm_get_resource_start(dev, 0);
1762         ret = drm_addmap(dev, dev_priv->fb_aper_offset,
1763                          drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER,
1764                          _DRM_WRITE_COMBINING, &map);
1765         if (ret != 0)
1766                 return ret;
1767
1768         return 0;
1769 }
1770
1771 int radeon_driver_unload(struct drm_device *dev)
1772 {
1773         drm_radeon_private_t *dev_priv = dev->dev_private;
1774
1775         DRM_DEBUG("\n");
1776         drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
1777
1778         dev->dev_private = NULL;
1779         return 0;
1780 }