2 * sata_mv.c - Marvell SATA support
4 * Copyright 2008: Marvell Corporation, all rights reserved.
5 * Copyright 2005: EMC Corporation, all rights reserved.
6 * Copyright 2005 Red Hat, Inc. All rights reserved.
8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 * --> Errata workaround for NCQ device errors.
30 * --> More errata workarounds for PCI-X.
32 * --> Complete a full errata audit for all chipsets to identify others.
34 * --> ATAPI support (Marvell claims the 60xx/70xx chips can do it).
36 * --> Investigate problems with PCI Message Signalled Interrupts (MSI).
38 * --> Develop a low-power-consumption strategy, and implement it.
40 * --> [Experiment, low priority] Investigate interrupt coalescing.
41 * Quite often, especially with PCI Message Signalled Interrupts (MSI),
42 * the overhead reduced by interrupt mitigation is quite often not
43 * worth the latency cost.
45 * --> [Experiment, Marvell value added] Is it possible to use target
46 * mode to cross-connect two Linux boxes with Marvell cards? If so,
47 * creating LibATA target mode support would be very interesting.
49 * Target mode, for those without docs, is the ability to directly
50 * connect two SATA ports.
53 #include <linux/kernel.h>
54 #include <linux/module.h>
55 #include <linux/pci.h>
56 #include <linux/init.h>
57 #include <linux/blkdev.h>
58 #include <linux/delay.h>
59 #include <linux/interrupt.h>
60 #include <linux/dmapool.h>
61 #include <linux/dma-mapping.h>
62 #include <linux/device.h>
63 #include <linux/platform_device.h>
64 #include <linux/ata_platform.h>
65 #include <linux/mbus.h>
66 #include <linux/bitops.h>
67 #include <scsi/scsi_host.h>
68 #include <scsi/scsi_cmnd.h>
69 #include <scsi/scsi_device.h>
70 #include <linux/libata.h>
72 #define DRV_NAME "sata_mv"
73 #define DRV_VERSION "1.24"
76 /* BAR's are enumerated in terms of pci_resource_start() terms */
77 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
78 MV_IO_BAR = 2, /* offset 0x18: IO space */
79 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
81 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
82 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
85 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
86 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
87 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
88 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
89 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
90 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
92 MV_SATAHC0_REG_BASE = 0x20000,
93 MV_FLASH_CTL_OFS = 0x1046c,
94 MV_GPIO_PORT_CTL_OFS = 0x104f0,
95 MV_RESET_CFG_OFS = 0x180d8,
97 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
98 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
99 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
100 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
103 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
105 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
106 * CRPB needs alignment on a 256B boundary. Size == 256B
107 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
109 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
110 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
112 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
114 /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
115 MV_PORT_HC_SHIFT = 2,
116 MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */
117 /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
118 MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */
121 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
122 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
124 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
125 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
126 ATA_FLAG_PIO_POLLING,
128 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
130 MV_GENIIE_FLAGS = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
131 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
132 ATA_FLAG_NCQ | ATA_FLAG_AN,
134 CRQB_FLAG_READ = (1 << 0),
136 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
137 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
138 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
139 CRQB_CMD_ADDR_SHIFT = 8,
140 CRQB_CMD_CS = (0x2 << 11),
141 CRQB_CMD_LAST = (1 << 15),
143 CRPB_FLAG_STATUS_SHIFT = 8,
144 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
145 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
147 EPRD_FLAG_END_OF_TBL = (1 << 31),
149 /* PCI interface registers */
151 PCI_COMMAND_OFS = 0xc00,
152 PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */
154 PCI_MAIN_CMD_STS_OFS = 0xd30,
155 STOP_PCI_MASTER = (1 << 2),
156 PCI_MASTER_EMPTY = (1 << 3),
157 GLOB_SFT_RST = (1 << 4),
159 MV_PCI_MODE_OFS = 0xd00,
160 MV_PCI_MODE_MASK = 0x30,
162 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
163 MV_PCI_DISC_TIMER = 0xd04,
164 MV_PCI_MSI_TRIGGER = 0xc38,
165 MV_PCI_SERR_MASK = 0xc28,
166 MV_PCI_XBAR_TMOUT_OFS = 0x1d04,
167 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
168 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
169 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
170 MV_PCI_ERR_COMMAND = 0x1d50,
172 PCI_IRQ_CAUSE_OFS = 0x1d58,
173 PCI_IRQ_MASK_OFS = 0x1d5c,
174 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
176 PCIE_IRQ_CAUSE_OFS = 0x1900,
177 PCIE_IRQ_MASK_OFS = 0x1910,
178 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
180 /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
181 PCI_HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
182 PCI_HC_MAIN_IRQ_MASK_OFS = 0x1d64,
183 SOC_HC_MAIN_IRQ_CAUSE_OFS = 0x20020,
184 SOC_HC_MAIN_IRQ_MASK_OFS = 0x20024,
185 ERR_IRQ = (1 << 0), /* shift by port # */
186 DONE_IRQ = (1 << 1), /* shift by port # */
187 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
188 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
190 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
191 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
192 PORTS_0_3_COAL_DONE = (1 << 8),
193 PORTS_4_7_COAL_DONE = (1 << 17),
194 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
195 GPIO_INT = (1 << 22),
196 SELF_INT = (1 << 23),
197 TWSI_INT = (1 << 24),
198 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
199 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
200 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
202 /* SATAHC registers */
205 HC_IRQ_CAUSE_OFS = 0x14,
206 DMA_IRQ = (1 << 0), /* shift by port # */
207 HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */
208 DEV_IRQ = (1 << 8), /* shift by port # */
210 /* Shadow block registers */
212 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
215 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
216 SATA_ACTIVE_OFS = 0x350,
217 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
218 SATA_FIS_IRQ_AN = (1 << 9), /* async notification */
221 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
225 PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */
226 PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */
227 PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */
228 PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */
231 SATA_IFCTL_OFS = 0x344,
232 SATA_TESTCTL_OFS = 0x348,
233 SATA_IFSTAT_OFS = 0x34c,
234 VENDOR_UNIQUE_FIS_OFS = 0x35c,
237 FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */
238 FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
241 MV5_LTMODE_OFS = 0x30,
242 MV5_PHY_CTL_OFS = 0x0C,
243 SATA_INTERFACE_CFG_OFS = 0x050,
245 MV_M2_PREAMP_MASK = 0x7e0,
249 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
250 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
251 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
252 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
253 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
254 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
255 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
257 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
258 EDMA_ERR_IRQ_MASK_OFS = 0xc,
259 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
260 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
261 EDMA_ERR_DEV = (1 << 2), /* device error */
262 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
263 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
264 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
265 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
266 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
267 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
268 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
269 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
270 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
271 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
272 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
274 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
275 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
276 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
277 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
278 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
280 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
282 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
283 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
284 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
285 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
286 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
287 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
289 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
291 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
292 EDMA_ERR_OVERRUN_5 = (1 << 5),
293 EDMA_ERR_UNDERRUN_5 = (1 << 6),
295 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
296 EDMA_ERR_LNK_CTRL_RX_1 |
297 EDMA_ERR_LNK_CTRL_RX_3 |
298 EDMA_ERR_LNK_CTRL_TX,
300 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
310 EDMA_ERR_LNK_CTRL_RX_2 |
311 EDMA_ERR_LNK_DATA_RX |
312 EDMA_ERR_LNK_DATA_TX |
313 EDMA_ERR_TRANS_PROTO,
315 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
320 EDMA_ERR_UNDERRUN_5 |
321 EDMA_ERR_SELF_DIS_5 |
327 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
328 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
330 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
331 EDMA_REQ_Q_PTR_SHIFT = 5,
333 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
334 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
335 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
336 EDMA_RSP_Q_PTR_SHIFT = 3,
338 EDMA_CMD_OFS = 0x28, /* EDMA command register */
339 EDMA_EN = (1 << 0), /* enable EDMA */
340 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
341 EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */
343 EDMA_STATUS_OFS = 0x30, /* EDMA engine status */
344 EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */
345 EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */
347 EDMA_IORDY_TMOUT_OFS = 0x34,
348 EDMA_ARB_CFG_OFS = 0x38,
350 EDMA_HALTCOND_OFS = 0x60, /* GenIIe halt conditions */
352 GEN_II_NCQ_MAX_SECTORS = 256, /* max sects/io on Gen2 w/NCQ */
354 /* Host private flags (hp_flags) */
355 MV_HP_FLAG_MSI = (1 << 0),
356 MV_HP_ERRATA_50XXB0 = (1 << 1),
357 MV_HP_ERRATA_50XXB2 = (1 << 2),
358 MV_HP_ERRATA_60X1B2 = (1 << 3),
359 MV_HP_ERRATA_60X1C0 = (1 << 4),
360 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
361 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
362 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
363 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
364 MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */
365 MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */
367 /* Port private flags (pp_flags) */
368 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
369 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
370 MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */
371 MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */
374 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
375 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
376 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
377 #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
378 #define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
380 #define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
381 #define WINDOW_BASE(i) (0x20034 + ((i) << 4))
384 /* DMA boundary 0xffff is required by the s/g splitting
385 * we need on /length/ in mv_fill-sg().
387 MV_DMA_BOUNDARY = 0xffffU,
389 /* mask of register bits containing lower 32 bits
390 * of EDMA request queue DMA address
392 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
394 /* ditto, for response queue */
395 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
409 /* Command ReQuest Block: 32B */
425 /* Command ResPonse Block: 8B */
432 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
440 struct mv_port_priv {
441 struct mv_crqb *crqb;
443 struct mv_crpb *crpb;
445 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
446 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
448 unsigned int req_idx;
449 unsigned int resp_idx;
452 unsigned int delayed_eh_pmp_map;
455 struct mv_port_signal {
460 struct mv_host_priv {
463 struct mv_port_signal signal[8];
464 const struct mv_hw_ops *ops;
467 void __iomem *main_irq_cause_addr;
468 void __iomem *main_irq_mask_addr;
473 * These consistent DMA memory pools give us guaranteed
474 * alignment for hardware-accessed data structures,
475 * and less memory waste in accomplishing the alignment.
477 struct dma_pool *crqb_pool;
478 struct dma_pool *crpb_pool;
479 struct dma_pool *sg_tbl_pool;
483 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
485 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
486 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
488 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
490 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
491 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
494 static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
495 static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
496 static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
497 static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
498 static int mv_port_start(struct ata_port *ap);
499 static void mv_port_stop(struct ata_port *ap);
500 static int mv_qc_defer(struct ata_queued_cmd *qc);
501 static void mv_qc_prep(struct ata_queued_cmd *qc);
502 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
503 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
504 static int mv_hardreset(struct ata_link *link, unsigned int *class,
505 unsigned long deadline);
506 static void mv_eh_freeze(struct ata_port *ap);
507 static void mv_eh_thaw(struct ata_port *ap);
508 static void mv6_dev_config(struct ata_device *dev);
510 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
512 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
513 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
515 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
517 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
518 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
520 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
522 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
523 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
525 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
527 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
528 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
530 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
532 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
533 void __iomem *mmio, unsigned int n_hc);
534 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
536 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
537 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
538 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
539 unsigned int port_no);
540 static int mv_stop_edma(struct ata_port *ap);
541 static int mv_stop_edma_engine(void __iomem *port_mmio);
542 static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
544 static void mv_pmp_select(struct ata_port *ap, int pmp);
545 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
546 unsigned long deadline);
547 static int mv_softreset(struct ata_link *link, unsigned int *class,
548 unsigned long deadline);
549 static void mv_pmp_error_handler(struct ata_port *ap);
550 static void mv_process_crpb_entries(struct ata_port *ap,
551 struct mv_port_priv *pp);
553 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
554 * because we have to allow room for worst case splitting of
555 * PRDs for 64K boundaries in mv_fill_sg().
557 static struct scsi_host_template mv5_sht = {
558 ATA_BASE_SHT(DRV_NAME),
559 .sg_tablesize = MV_MAX_SG_CT / 2,
560 .dma_boundary = MV_DMA_BOUNDARY,
563 static struct scsi_host_template mv6_sht = {
564 ATA_NCQ_SHT(DRV_NAME),
565 .can_queue = MV_MAX_Q_DEPTH - 1,
566 .sg_tablesize = MV_MAX_SG_CT / 2,
567 .dma_boundary = MV_DMA_BOUNDARY,
570 static struct ata_port_operations mv5_ops = {
571 .inherits = &ata_sff_port_ops,
573 .qc_defer = mv_qc_defer,
574 .qc_prep = mv_qc_prep,
575 .qc_issue = mv_qc_issue,
577 .freeze = mv_eh_freeze,
579 .hardreset = mv_hardreset,
580 .error_handler = ata_std_error_handler, /* avoid SFF EH */
581 .post_internal_cmd = ATA_OP_NULL,
583 .scr_read = mv5_scr_read,
584 .scr_write = mv5_scr_write,
586 .port_start = mv_port_start,
587 .port_stop = mv_port_stop,
590 static struct ata_port_operations mv6_ops = {
591 .inherits = &mv5_ops,
592 .dev_config = mv6_dev_config,
593 .scr_read = mv_scr_read,
594 .scr_write = mv_scr_write,
596 .pmp_hardreset = mv_pmp_hardreset,
597 .pmp_softreset = mv_softreset,
598 .softreset = mv_softreset,
599 .error_handler = mv_pmp_error_handler,
602 static struct ata_port_operations mv_iie_ops = {
603 .inherits = &mv6_ops,
604 .dev_config = ATA_OP_NULL,
605 .qc_prep = mv_qc_prep_iie,
608 static const struct ata_port_info mv_port_info[] = {
610 .flags = MV_COMMON_FLAGS,
611 .pio_mask = 0x1f, /* pio0-4 */
612 .udma_mask = ATA_UDMA6,
613 .port_ops = &mv5_ops,
616 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
617 .pio_mask = 0x1f, /* pio0-4 */
618 .udma_mask = ATA_UDMA6,
619 .port_ops = &mv5_ops,
622 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
623 .pio_mask = 0x1f, /* pio0-4 */
624 .udma_mask = ATA_UDMA6,
625 .port_ops = &mv5_ops,
628 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
629 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
631 .pio_mask = 0x1f, /* pio0-4 */
632 .udma_mask = ATA_UDMA6,
633 .port_ops = &mv6_ops,
636 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
637 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
638 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
639 .pio_mask = 0x1f, /* pio0-4 */
640 .udma_mask = ATA_UDMA6,
641 .port_ops = &mv6_ops,
644 .flags = MV_GENIIE_FLAGS,
645 .pio_mask = 0x1f, /* pio0-4 */
646 .udma_mask = ATA_UDMA6,
647 .port_ops = &mv_iie_ops,
650 .flags = MV_GENIIE_FLAGS,
651 .pio_mask = 0x1f, /* pio0-4 */
652 .udma_mask = ATA_UDMA6,
653 .port_ops = &mv_iie_ops,
656 .flags = MV_GENIIE_FLAGS,
657 .pio_mask = 0x1f, /* pio0-4 */
658 .udma_mask = ATA_UDMA6,
659 .port_ops = &mv_iie_ops,
663 static const struct pci_device_id mv_pci_tbl[] = {
664 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
665 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
666 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
667 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
668 /* RocketRAID 1720/174x have different identifiers */
669 { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
670 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
671 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
673 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
674 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
675 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
676 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
677 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
679 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
682 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
684 /* Marvell 7042 support */
685 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
687 /* Highpoint RocketRAID PCIe series */
688 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
689 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
691 { } /* terminate list */
694 static const struct mv_hw_ops mv5xxx_ops = {
695 .phy_errata = mv5_phy_errata,
696 .enable_leds = mv5_enable_leds,
697 .read_preamp = mv5_read_preamp,
698 .reset_hc = mv5_reset_hc,
699 .reset_flash = mv5_reset_flash,
700 .reset_bus = mv5_reset_bus,
703 static const struct mv_hw_ops mv6xxx_ops = {
704 .phy_errata = mv6_phy_errata,
705 .enable_leds = mv6_enable_leds,
706 .read_preamp = mv6_read_preamp,
707 .reset_hc = mv6_reset_hc,
708 .reset_flash = mv6_reset_flash,
709 .reset_bus = mv_reset_pci_bus,
712 static const struct mv_hw_ops mv_soc_ops = {
713 .phy_errata = mv6_phy_errata,
714 .enable_leds = mv_soc_enable_leds,
715 .read_preamp = mv_soc_read_preamp,
716 .reset_hc = mv_soc_reset_hc,
717 .reset_flash = mv_soc_reset_flash,
718 .reset_bus = mv_soc_reset_bus,
725 static inline void writelfl(unsigned long data, void __iomem *addr)
728 (void) readl(addr); /* flush to avoid PCI posted write */
731 static inline unsigned int mv_hc_from_port(unsigned int port)
733 return port >> MV_PORT_HC_SHIFT;
736 static inline unsigned int mv_hardport_from_port(unsigned int port)
738 return port & MV_PORT_MASK;
742 * Consolidate some rather tricky bit shift calculations.
743 * This is hot-path stuff, so not a function.
744 * Simple code, with two return values, so macro rather than inline.
746 * port is the sole input, in range 0..7.
747 * shift is one output, for use with main_irq_cause / main_irq_mask registers.
748 * hardport is the other output, in range 0..3.
750 * Note that port and hardport may be the same variable in some cases.
752 #define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
754 shift = mv_hc_from_port(port) * HC_SHIFT; \
755 hardport = mv_hardport_from_port(port); \
756 shift += hardport * 2; \
759 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
761 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
764 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
767 return mv_hc_base(base, mv_hc_from_port(port));
770 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
772 return mv_hc_base_from_port(base, port) +
773 MV_SATAHC_ARBTR_REG_SZ +
774 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
777 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
779 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
780 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
782 return hc_mmio + ofs;
785 static inline void __iomem *mv_host_base(struct ata_host *host)
787 struct mv_host_priv *hpriv = host->private_data;
791 static inline void __iomem *mv_ap_base(struct ata_port *ap)
793 return mv_port_base(mv_host_base(ap->host), ap->port_no);
796 static inline int mv_get_hc_count(unsigned long port_flags)
798 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
801 static void mv_set_edma_ptrs(void __iomem *port_mmio,
802 struct mv_host_priv *hpriv,
803 struct mv_port_priv *pp)
808 * initialize request queue
810 pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
811 index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
813 WARN_ON(pp->crqb_dma & 0x3ff);
814 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
815 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
816 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
817 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
820 * initialize response queue
822 pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
823 index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
825 WARN_ON(pp->crpb_dma & 0xff);
826 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
827 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
828 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
829 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
832 static void mv_set_main_irq_mask(struct ata_host *host,
833 u32 disable_bits, u32 enable_bits)
835 struct mv_host_priv *hpriv = host->private_data;
836 u32 old_mask, new_mask;
838 old_mask = hpriv->main_irq_mask;
839 new_mask = (old_mask & ~disable_bits) | enable_bits;
840 if (new_mask != old_mask) {
841 hpriv->main_irq_mask = new_mask;
842 writelfl(new_mask, hpriv->main_irq_mask_addr);
846 static void mv_enable_port_irqs(struct ata_port *ap,
847 unsigned int port_bits)
849 unsigned int shift, hardport, port = ap->port_no;
850 u32 disable_bits, enable_bits;
852 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
854 disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
855 enable_bits = port_bits << shift;
856 mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
860 * mv_start_dma - Enable eDMA engine
861 * @base: port base address
862 * @pp: port private data
864 * Verify the local cache of the eDMA state is accurate with a
868 * Inherited from caller.
870 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
871 struct mv_port_priv *pp, u8 protocol)
873 int want_ncq = (protocol == ATA_PROT_NCQ);
875 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
876 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
877 if (want_ncq != using_ncq)
880 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
881 struct mv_host_priv *hpriv = ap->host->private_data;
882 int hardport = mv_hardport_from_port(ap->port_no);
883 void __iomem *hc_mmio = mv_hc_base_from_port(
884 mv_host_base(ap->host), ap->port_no);
887 /* clear EDMA event indicators, if any */
888 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
890 /* clear pending irq events */
891 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
892 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
894 mv_edma_cfg(ap, want_ncq);
896 /* clear FIS IRQ Cause */
897 if (IS_GEN_IIE(hpriv))
898 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
900 mv_set_edma_ptrs(port_mmio, hpriv, pp);
901 mv_enable_port_irqs(ap, DONE_IRQ|ERR_IRQ);
903 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
904 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
908 static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
910 void __iomem *port_mmio = mv_ap_base(ap);
911 const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
912 const int per_loop = 5, timeout = (15 * 1000 / per_loop);
916 * Wait for the EDMA engine to finish transactions in progress.
917 * No idea what a good "timeout" value might be, but measurements
918 * indicate that it often requires hundreds of microseconds
919 * with two drives in-use. So we use the 15msec value above
920 * as a rough guess at what even more drives might require.
922 for (i = 0; i < timeout; ++i) {
923 u32 edma_stat = readl(port_mmio + EDMA_STATUS_OFS);
924 if ((edma_stat & empty_idle) == empty_idle)
928 /* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */
932 * mv_stop_edma_engine - Disable eDMA engine
933 * @port_mmio: io base address
936 * Inherited from caller.
938 static int mv_stop_edma_engine(void __iomem *port_mmio)
942 /* Disable eDMA. The disable bit auto clears. */
943 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
945 /* Wait for the chip to confirm eDMA is off. */
946 for (i = 10000; i > 0; i--) {
947 u32 reg = readl(port_mmio + EDMA_CMD_OFS);
948 if (!(reg & EDMA_EN))
955 static int mv_stop_edma(struct ata_port *ap)
957 void __iomem *port_mmio = mv_ap_base(ap);
958 struct mv_port_priv *pp = ap->private_data;
960 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
962 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
963 mv_wait_for_edma_empty_idle(ap);
964 if (mv_stop_edma_engine(port_mmio)) {
965 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
972 static void mv_dump_mem(void __iomem *start, unsigned bytes)
975 for (b = 0; b < bytes; ) {
976 DPRINTK("%p: ", start + b);
977 for (w = 0; b < bytes && w < 4; w++) {
978 printk("%08x ", readl(start + b));
986 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
991 for (b = 0; b < bytes; ) {
992 DPRINTK("%02x: ", b);
993 for (w = 0; b < bytes && w < 4; w++) {
994 (void) pci_read_config_dword(pdev, b, &dw);
1002 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
1003 struct pci_dev *pdev)
1006 void __iomem *hc_base = mv_hc_base(mmio_base,
1007 port >> MV_PORT_HC_SHIFT);
1008 void __iomem *port_base;
1009 int start_port, num_ports, p, start_hc, num_hcs, hc;
1012 start_hc = start_port = 0;
1013 num_ports = 8; /* shld be benign for 4 port devs */
1016 start_hc = port >> MV_PORT_HC_SHIFT;
1018 num_ports = num_hcs = 1;
1020 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
1021 num_ports > 1 ? num_ports - 1 : start_port);
1024 DPRINTK("PCI config space regs:\n");
1025 mv_dump_pci_cfg(pdev, 0x68);
1027 DPRINTK("PCI regs:\n");
1028 mv_dump_mem(mmio_base+0xc00, 0x3c);
1029 mv_dump_mem(mmio_base+0xd00, 0x34);
1030 mv_dump_mem(mmio_base+0xf00, 0x4);
1031 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1032 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
1033 hc_base = mv_hc_base(mmio_base, hc);
1034 DPRINTK("HC regs (HC %i):\n", hc);
1035 mv_dump_mem(hc_base, 0x1c);
1037 for (p = start_port; p < start_port + num_ports; p++) {
1038 port_base = mv_port_base(mmio_base, p);
1039 DPRINTK("EDMA regs (port %i):\n", p);
1040 mv_dump_mem(port_base, 0x54);
1041 DPRINTK("SATA regs (port %i):\n", p);
1042 mv_dump_mem(port_base+0x300, 0x60);
1047 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1051 switch (sc_reg_in) {
1055 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1058 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1067 static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
1069 unsigned int ofs = mv_scr_offset(sc_reg_in);
1071 if (ofs != 0xffffffffU) {
1072 *val = readl(mv_ap_base(link->ap) + ofs);
1078 static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
1080 unsigned int ofs = mv_scr_offset(sc_reg_in);
1082 if (ofs != 0xffffffffU) {
1083 writelfl(val, mv_ap_base(link->ap) + ofs);
1089 static void mv6_dev_config(struct ata_device *adev)
1092 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1094 * Gen-II does not support NCQ over a port multiplier
1095 * (no FIS-based switching).
1097 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1098 * See mv_qc_prep() for more info.
1100 if (adev->flags & ATA_DFLAG_NCQ) {
1101 if (sata_pmp_attached(adev->link->ap)) {
1102 adev->flags &= ~ATA_DFLAG_NCQ;
1103 ata_dev_printk(adev, KERN_INFO,
1104 "NCQ disabled for command-based switching\n");
1105 } else if (adev->max_sectors > GEN_II_NCQ_MAX_SECTORS) {
1106 adev->max_sectors = GEN_II_NCQ_MAX_SECTORS;
1107 ata_dev_printk(adev, KERN_INFO,
1108 "max_sectors limited to %u for NCQ\n",
1114 static int mv_qc_defer(struct ata_queued_cmd *qc)
1116 struct ata_link *link = qc->dev->link;
1117 struct ata_port *ap = link->ap;
1118 struct mv_port_priv *pp = ap->private_data;
1121 * Don't allow new commands if we're in a delayed EH state
1122 * for NCQ and/or FIS-based switching.
1124 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
1125 return ATA_DEFER_PORT;
1127 * If the port is completely idle, then allow the new qc.
1129 if (ap->nr_active_links == 0)
1133 * The port is operating in host queuing mode (EDMA) with NCQ
1134 * enabled, allow multiple NCQ commands. EDMA also allows
1135 * queueing multiple DMA commands but libata core currently
1138 if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
1139 (pp->pp_flags & MV_PP_FLAG_NCQ_EN) && ata_is_ncq(qc->tf.protocol))
1142 return ATA_DEFER_PORT;
1145 static void mv_config_fbs(void __iomem *port_mmio, int want_ncq, int want_fbs)
1147 u32 new_fiscfg, old_fiscfg;
1148 u32 new_ltmode, old_ltmode;
1149 u32 new_haltcond, old_haltcond;
1151 old_fiscfg = readl(port_mmio + FISCFG_OFS);
1152 old_ltmode = readl(port_mmio + LTMODE_OFS);
1153 old_haltcond = readl(port_mmio + EDMA_HALTCOND_OFS);
1155 new_fiscfg = old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
1156 new_ltmode = old_ltmode & ~LTMODE_BIT8;
1157 new_haltcond = old_haltcond | EDMA_ERR_DEV;
1160 new_fiscfg = old_fiscfg | FISCFG_SINGLE_SYNC;
1161 new_ltmode = old_ltmode | LTMODE_BIT8;
1163 new_haltcond &= ~EDMA_ERR_DEV;
1165 new_fiscfg |= FISCFG_WAIT_DEV_ERR;
1168 if (new_fiscfg != old_fiscfg)
1169 writelfl(new_fiscfg, port_mmio + FISCFG_OFS);
1170 if (new_ltmode != old_ltmode)
1171 writelfl(new_ltmode, port_mmio + LTMODE_OFS);
1172 if (new_haltcond != old_haltcond)
1173 writelfl(new_haltcond, port_mmio + EDMA_HALTCOND_OFS);
1176 static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
1178 struct mv_host_priv *hpriv = ap->host->private_data;
1181 /* workaround for 88SX60x1 FEr SATA#25 (part 1) */
1182 old = readl(hpriv->base + MV_GPIO_PORT_CTL_OFS);
1184 new = old | (1 << 22);
1186 new = old & ~(1 << 22);
1188 writel(new, hpriv->base + MV_GPIO_PORT_CTL_OFS);
1191 static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
1194 struct mv_port_priv *pp = ap->private_data;
1195 struct mv_host_priv *hpriv = ap->host->private_data;
1196 void __iomem *port_mmio = mv_ap_base(ap);
1198 /* set up non-NCQ EDMA configuration */
1199 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1200 pp->pp_flags &= ~MV_PP_FLAG_FBS_EN;
1202 if (IS_GEN_I(hpriv))
1203 cfg |= (1 << 8); /* enab config burst size mask */
1205 else if (IS_GEN_II(hpriv)) {
1206 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1207 mv_60x1_errata_sata25(ap, want_ncq);
1209 } else if (IS_GEN_IIE(hpriv)) {
1210 int want_fbs = sata_pmp_attached(ap);
1212 * Possible future enhancement:
1214 * The chip can use FBS with non-NCQ, if we allow it,
1215 * But first we need to have the error handling in place
1216 * for this mode (datasheet section 7.3.15.4.2.3).
1217 * So disallow non-NCQ FBS for now.
1219 want_fbs &= want_ncq;
1221 mv_config_fbs(port_mmio, want_ncq, want_fbs);
1224 pp->pp_flags |= MV_PP_FLAG_FBS_EN;
1225 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1228 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1229 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1231 cfg |= (1 << 18); /* enab early completion */
1232 if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
1233 cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
1237 cfg |= EDMA_CFG_NCQ;
1238 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1240 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1242 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1245 static void mv_port_free_dma_mem(struct ata_port *ap)
1247 struct mv_host_priv *hpriv = ap->host->private_data;
1248 struct mv_port_priv *pp = ap->private_data;
1252 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1256 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1260 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1261 * For later hardware, we have one unique sg_tbl per NCQ tag.
1263 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1264 if (pp->sg_tbl[tag]) {
1265 if (tag == 0 || !IS_GEN_I(hpriv))
1266 dma_pool_free(hpriv->sg_tbl_pool,
1268 pp->sg_tbl_dma[tag]);
1269 pp->sg_tbl[tag] = NULL;
1275 * mv_port_start - Port specific init/start routine.
1276 * @ap: ATA channel to manipulate
1278 * Allocate and point to DMA memory, init port private memory,
1282 * Inherited from caller.
1284 static int mv_port_start(struct ata_port *ap)
1286 struct device *dev = ap->host->dev;
1287 struct mv_host_priv *hpriv = ap->host->private_data;
1288 struct mv_port_priv *pp;
1291 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1294 ap->private_data = pp;
1296 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1299 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1301 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1303 goto out_port_free_dma_mem;
1304 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1306 /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
1307 if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
1308 ap->flags |= ATA_FLAG_AN;
1310 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1311 * For later hardware, we need one unique sg_tbl per NCQ tag.
1313 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1314 if (tag == 0 || !IS_GEN_I(hpriv)) {
1315 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1316 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1317 if (!pp->sg_tbl[tag])
1318 goto out_port_free_dma_mem;
1320 pp->sg_tbl[tag] = pp->sg_tbl[0];
1321 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1326 out_port_free_dma_mem:
1327 mv_port_free_dma_mem(ap);
1332 * mv_port_stop - Port specific cleanup/stop routine.
1333 * @ap: ATA channel to manipulate
1335 * Stop DMA, cleanup port memory.
1338 * This routine uses the host lock to protect the DMA stop.
1340 static void mv_port_stop(struct ata_port *ap)
1343 mv_enable_port_irqs(ap, 0);
1344 mv_port_free_dma_mem(ap);
1348 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1349 * @qc: queued command whose SG list to source from
1351 * Populate the SG list and mark the last entry.
1354 * Inherited from caller.
1356 static void mv_fill_sg(struct ata_queued_cmd *qc)
1358 struct mv_port_priv *pp = qc->ap->private_data;
1359 struct scatterlist *sg;
1360 struct mv_sg *mv_sg, *last_sg = NULL;
1363 mv_sg = pp->sg_tbl[qc->tag];
1364 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1365 dma_addr_t addr = sg_dma_address(sg);
1366 u32 sg_len = sg_dma_len(sg);
1369 u32 offset = addr & 0xffff;
1372 if ((offset + sg_len > 0x10000))
1373 len = 0x10000 - offset;
1375 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1376 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1377 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1387 if (likely(last_sg))
1388 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1391 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1393 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1394 (last ? CRQB_CMD_LAST : 0);
1395 *cmdw = cpu_to_le16(tmp);
1399 * mv_qc_prep - Host specific command preparation.
1400 * @qc: queued command to prepare
1402 * This routine simply redirects to the general purpose routine
1403 * if command is not DMA. Else, it handles prep of the CRQB
1404 * (command request block), does some sanity checking, and calls
1405 * the SG load routine.
1408 * Inherited from caller.
1410 static void mv_qc_prep(struct ata_queued_cmd *qc)
1412 struct ata_port *ap = qc->ap;
1413 struct mv_port_priv *pp = ap->private_data;
1415 struct ata_taskfile *tf;
1419 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1420 (qc->tf.protocol != ATA_PROT_NCQ))
1423 /* Fill in command request block
1425 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1426 flags |= CRQB_FLAG_READ;
1427 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1428 flags |= qc->tag << CRQB_TAG_SHIFT;
1429 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
1431 /* get current queue index from software */
1432 in_index = pp->req_idx;
1434 pp->crqb[in_index].sg_addr =
1435 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1436 pp->crqb[in_index].sg_addr_hi =
1437 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1438 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1440 cw = &pp->crqb[in_index].ata_cmd[0];
1443 /* Sadly, the CRQB cannot accomodate all registers--there are
1444 * only 11 bytes...so we must pick and choose required
1445 * registers based on the command. So, we drop feature and
1446 * hob_feature for [RW] DMA commands, but they are needed for
1447 * NCQ. NCQ will drop hob_nsect.
1449 switch (tf->command) {
1451 case ATA_CMD_READ_EXT:
1453 case ATA_CMD_WRITE_EXT:
1454 case ATA_CMD_WRITE_FUA_EXT:
1455 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1457 case ATA_CMD_FPDMA_READ:
1458 case ATA_CMD_FPDMA_WRITE:
1459 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1460 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1463 /* The only other commands EDMA supports in non-queued and
1464 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1465 * of which are defined/used by Linux. If we get here, this
1466 * driver needs work.
1468 * FIXME: modify libata to give qc_prep a return value and
1469 * return error here.
1471 BUG_ON(tf->command);
1474 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1475 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1476 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1477 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1478 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1479 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1480 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1481 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1482 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1484 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1490 * mv_qc_prep_iie - Host specific command preparation.
1491 * @qc: queued command to prepare
1493 * This routine simply redirects to the general purpose routine
1494 * if command is not DMA. Else, it handles prep of the CRQB
1495 * (command request block), does some sanity checking, and calls
1496 * the SG load routine.
1499 * Inherited from caller.
1501 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1503 struct ata_port *ap = qc->ap;
1504 struct mv_port_priv *pp = ap->private_data;
1505 struct mv_crqb_iie *crqb;
1506 struct ata_taskfile *tf;
1510 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1511 (qc->tf.protocol != ATA_PROT_NCQ))
1514 /* Fill in Gen IIE command request block */
1515 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1516 flags |= CRQB_FLAG_READ;
1518 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1519 flags |= qc->tag << CRQB_TAG_SHIFT;
1520 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1521 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
1523 /* get current queue index from software */
1524 in_index = pp->req_idx;
1526 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1527 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1528 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1529 crqb->flags = cpu_to_le32(flags);
1532 crqb->ata_cmd[0] = cpu_to_le32(
1533 (tf->command << 16) |
1536 crqb->ata_cmd[1] = cpu_to_le32(
1542 crqb->ata_cmd[2] = cpu_to_le32(
1543 (tf->hob_lbal << 0) |
1544 (tf->hob_lbam << 8) |
1545 (tf->hob_lbah << 16) |
1546 (tf->hob_feature << 24)
1548 crqb->ata_cmd[3] = cpu_to_le32(
1550 (tf->hob_nsect << 8)
1553 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1559 * mv_qc_issue - Initiate a command to the host
1560 * @qc: queued command to start
1562 * This routine simply redirects to the general purpose routine
1563 * if command is not DMA. Else, it sanity checks our local
1564 * caches of the request producer/consumer indices then enables
1565 * DMA and bumps the request producer index.
1568 * Inherited from caller.
1570 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1572 struct ata_port *ap = qc->ap;
1573 void __iomem *port_mmio = mv_ap_base(ap);
1574 struct mv_port_priv *pp = ap->private_data;
1577 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1578 (qc->tf.protocol != ATA_PROT_NCQ)) {
1579 static int limit_warnings = 10;
1581 * Errata SATA#16, SATA#24: warn if multiple DRQs expected.
1583 * Someday, we might implement special polling workarounds
1584 * for these, but it all seems rather unnecessary since we
1585 * normally use only DMA for commands which transfer more
1586 * than a single block of data.
1588 * Much of the time, this could just work regardless.
1589 * So for now, just log the incident, and allow the attempt.
1591 if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
1593 ata_link_printk(qc->dev->link, KERN_WARNING, DRV_NAME
1594 ": attempting PIO w/multiple DRQ: "
1595 "this may fail due to h/w errata\n");
1598 * We're about to send a non-EDMA capable command to the
1599 * port. Turn off EDMA so there won't be problems accessing
1600 * shadow block, etc registers.
1603 mv_enable_port_irqs(ap, ERR_IRQ);
1604 mv_pmp_select(ap, qc->dev->link->pmp);
1605 return ata_sff_qc_issue(qc);
1608 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1610 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
1611 in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
1613 /* and write the request in pointer to kick the EDMA to life */
1614 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1615 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1620 static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
1622 struct mv_port_priv *pp = ap->private_data;
1623 struct ata_queued_cmd *qc;
1625 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
1627 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1628 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1633 static void mv_pmp_error_handler(struct ata_port *ap)
1635 unsigned int pmp, pmp_map;
1636 struct mv_port_priv *pp = ap->private_data;
1638 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
1640 * Perform NCQ error analysis on failed PMPs
1641 * before we freeze the port entirely.
1643 * The failed PMPs are marked earlier by mv_pmp_eh_prep().
1645 pmp_map = pp->delayed_eh_pmp_map;
1646 pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
1647 for (pmp = 0; pmp_map != 0; pmp++) {
1648 unsigned int this_pmp = (1 << pmp);
1649 if (pmp_map & this_pmp) {
1650 struct ata_link *link = &ap->pmp_link[pmp];
1651 pmp_map &= ~this_pmp;
1652 ata_eh_analyze_ncq_error(link);
1655 ata_port_freeze(ap);
1657 sata_pmp_error_handler(ap);
1660 static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
1662 void __iomem *port_mmio = mv_ap_base(ap);
1664 return readl(port_mmio + SATA_TESTCTL_OFS) >> 16;
1667 static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
1669 struct ata_eh_info *ehi;
1673 * Initialize EH info for PMPs which saw device errors
1675 ehi = &ap->link.eh_info;
1676 for (pmp = 0; pmp_map != 0; pmp++) {
1677 unsigned int this_pmp = (1 << pmp);
1678 if (pmp_map & this_pmp) {
1679 struct ata_link *link = &ap->pmp_link[pmp];
1681 pmp_map &= ~this_pmp;
1682 ehi = &link->eh_info;
1683 ata_ehi_clear_desc(ehi);
1684 ata_ehi_push_desc(ehi, "dev err");
1685 ehi->err_mask |= AC_ERR_DEV;
1686 ehi->action |= ATA_EH_RESET;
1687 ata_link_abort(link);
1692 static int mv_req_q_empty(struct ata_port *ap)
1694 void __iomem *port_mmio = mv_ap_base(ap);
1695 u32 in_ptr, out_ptr;
1697 in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS)
1698 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1699 out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1700 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1701 return (in_ptr == out_ptr); /* 1 == queue_is_empty */
1704 static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
1706 struct mv_port_priv *pp = ap->private_data;
1708 unsigned int old_map, new_map;
1711 * Device error during FBS+NCQ operation:
1713 * Set a port flag to prevent further I/O being enqueued.
1714 * Leave the EDMA running to drain outstanding commands from this port.
1715 * Perform the post-mortem/EH only when all responses are complete.
1716 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
1718 if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
1719 pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
1720 pp->delayed_eh_pmp_map = 0;
1722 old_map = pp->delayed_eh_pmp_map;
1723 new_map = old_map | mv_get_err_pmp_map(ap);
1725 if (old_map != new_map) {
1726 pp->delayed_eh_pmp_map = new_map;
1727 mv_pmp_eh_prep(ap, new_map & ~old_map);
1729 failed_links = hweight16(new_map);
1731 ata_port_printk(ap, KERN_INFO, "%s: pmp_map=%04x qc_map=%04x "
1732 "failed_links=%d nr_active_links=%d\n",
1733 __func__, pp->delayed_eh_pmp_map,
1734 ap->qc_active, failed_links,
1735 ap->nr_active_links);
1737 if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
1738 mv_process_crpb_entries(ap, pp);
1741 ata_port_printk(ap, KERN_INFO, "%s: done\n", __func__);
1742 return 1; /* handled */
1744 ata_port_printk(ap, KERN_INFO, "%s: waiting\n", __func__);
1745 return 1; /* handled */
1748 static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
1751 * Possible future enhancement:
1753 * FBS+non-NCQ operation is not yet implemented.
1754 * See related notes in mv_edma_cfg().
1756 * Device error during FBS+non-NCQ operation:
1758 * We need to snapshot the shadow registers for each failed command.
1759 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
1761 return 0; /* not handled */
1764 static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
1766 struct mv_port_priv *pp = ap->private_data;
1768 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
1769 return 0; /* EDMA was not active: not handled */
1770 if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
1771 return 0; /* FBS was not active: not handled */
1773 if (!(edma_err_cause & EDMA_ERR_DEV))
1774 return 0; /* non DEV error: not handled */
1775 edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
1776 if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
1777 return 0; /* other problems: not handled */
1779 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
1781 * EDMA should NOT have self-disabled for this case.
1782 * If it did, then something is wrong elsewhere,
1783 * and we cannot handle it here.
1785 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1786 ata_port_printk(ap, KERN_WARNING,
1787 "%s: err_cause=0x%x pp_flags=0x%x\n",
1788 __func__, edma_err_cause, pp->pp_flags);
1789 return 0; /* not handled */
1791 return mv_handle_fbs_ncq_dev_err(ap);
1794 * EDMA should have self-disabled for this case.
1795 * If it did not, then something is wrong elsewhere,
1796 * and we cannot handle it here.
1798 if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
1799 ata_port_printk(ap, KERN_WARNING,
1800 "%s: err_cause=0x%x pp_flags=0x%x\n",
1801 __func__, edma_err_cause, pp->pp_flags);
1802 return 0; /* not handled */
1804 return mv_handle_fbs_non_ncq_dev_err(ap);
1806 return 0; /* not handled */
1809 static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
1811 struct ata_eh_info *ehi = &ap->link.eh_info;
1812 char *when = "idle";
1814 ata_ehi_clear_desc(ehi);
1815 if (!ap || (ap->flags & ATA_FLAG_DISABLED)) {
1817 } else if (edma_was_enabled) {
1818 when = "EDMA enabled";
1820 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
1821 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1824 ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
1825 ehi->err_mask |= AC_ERR_OTHER;
1826 ehi->action |= ATA_EH_RESET;
1827 ata_port_freeze(ap);
1831 * mv_err_intr - Handle error interrupts on the port
1832 * @ap: ATA channel to manipulate
1834 * Most cases require a full reset of the chip's state machine,
1835 * which also performs a COMRESET.
1836 * Also, if the port disabled DMA, update our cached copy to match.
1839 * Inherited from caller.
1841 static void mv_err_intr(struct ata_port *ap)
1843 void __iomem *port_mmio = mv_ap_base(ap);
1844 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1846 struct mv_port_priv *pp = ap->private_data;
1847 struct mv_host_priv *hpriv = ap->host->private_data;
1848 unsigned int action = 0, err_mask = 0;
1849 struct ata_eh_info *ehi = &ap->link.eh_info;
1850 struct ata_queued_cmd *qc;
1854 * Read and clear the SError and err_cause bits.
1855 * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
1856 * the FIS_IRQ_CAUSE register before clearing edma_err_cause.
1858 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1859 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1861 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1862 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
1863 fis_cause = readl(port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
1864 writelfl(~fis_cause, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
1866 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1868 if (edma_err_cause & EDMA_ERR_DEV) {
1870 * Device errors during FIS-based switching operation
1871 * require special handling.
1873 if (mv_handle_dev_err(ap, edma_err_cause))
1877 qc = mv_get_active_qc(ap);
1878 ata_ehi_clear_desc(ehi);
1879 ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
1880 edma_err_cause, pp->pp_flags);
1882 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
1883 ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
1884 if (fis_cause & SATA_FIS_IRQ_AN) {
1885 u32 ec = edma_err_cause &
1886 ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
1887 sata_async_notification(ap);
1889 return; /* Just an AN; no need for the nukes */
1890 ata_ehi_push_desc(ehi, "SDB notify");
1894 * All generations share these EDMA error cause bits:
1896 if (edma_err_cause & EDMA_ERR_DEV) {
1897 err_mask |= AC_ERR_DEV;
1898 action |= ATA_EH_RESET;
1899 ata_ehi_push_desc(ehi, "dev error");
1901 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1902 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1903 EDMA_ERR_INTRL_PAR)) {
1904 err_mask |= AC_ERR_ATA_BUS;
1905 action |= ATA_EH_RESET;
1906 ata_ehi_push_desc(ehi, "parity error");
1908 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1909 ata_ehi_hotplugged(ehi);
1910 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1911 "dev disconnect" : "dev connect");
1912 action |= ATA_EH_RESET;
1916 * Gen-I has a different SELF_DIS bit,
1917 * different FREEZE bits, and no SERR bit:
1919 if (IS_GEN_I(hpriv)) {
1920 eh_freeze_mask = EDMA_EH_FREEZE_5;
1921 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1922 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1923 ata_ehi_push_desc(ehi, "EDMA self-disable");
1926 eh_freeze_mask = EDMA_EH_FREEZE;
1927 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1928 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1929 ata_ehi_push_desc(ehi, "EDMA self-disable");
1931 if (edma_err_cause & EDMA_ERR_SERR) {
1932 ata_ehi_push_desc(ehi, "SError=%08x", serr);
1933 err_mask |= AC_ERR_ATA_BUS;
1934 action |= ATA_EH_RESET;
1939 err_mask = AC_ERR_OTHER;
1940 action |= ATA_EH_RESET;
1943 ehi->serror |= serr;
1944 ehi->action |= action;
1947 qc->err_mask |= err_mask;
1949 ehi->err_mask |= err_mask;
1951 if (err_mask == AC_ERR_DEV) {
1953 * Cannot do ata_port_freeze() here,
1954 * because it would kill PIO access,
1955 * which is needed for further diagnosis.
1959 } else if (edma_err_cause & eh_freeze_mask) {
1961 * Note to self: ata_port_freeze() calls ata_port_abort()
1963 ata_port_freeze(ap);
1970 ata_link_abort(qc->dev->link);
1976 static void mv_process_crpb_response(struct ata_port *ap,
1977 struct mv_crpb *response, unsigned int tag, int ncq_enabled)
1979 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
1983 u16 edma_status = le16_to_cpu(response->flags);
1985 * edma_status from a response queue entry:
1986 * LSB is from EDMA_ERR_IRQ_CAUSE_OFS (non-NCQ only).
1987 * MSB is saved ATA status from command completion.
1990 u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
1993 * Error will be seen/handled by mv_err_intr().
1994 * So do nothing at all here.
1999 ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
2000 if (!ac_err_mask(ata_status))
2001 ata_qc_complete(qc);
2002 /* else: leave it for mv_err_intr() */
2004 ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n",
2009 static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
2011 void __iomem *port_mmio = mv_ap_base(ap);
2012 struct mv_host_priv *hpriv = ap->host->private_data;
2014 bool work_done = false;
2015 int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
2017 /* Get the hardware queue position index */
2018 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
2019 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2021 /* Process new responses from since the last time we looked */
2022 while (in_index != pp->resp_idx) {
2024 struct mv_crpb *response = &pp->crpb[pp->resp_idx];
2026 pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2028 if (IS_GEN_I(hpriv)) {
2029 /* 50xx: no NCQ, only one command active at a time */
2030 tag = ap->link.active_tag;
2032 /* Gen II/IIE: get command tag from CRPB entry */
2033 tag = le16_to_cpu(response->id) & 0x1f;
2035 mv_process_crpb_response(ap, response, tag, ncq_enabled);
2039 /* Update the software queue position index in hardware */
2041 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
2042 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
2043 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
2046 static void mv_port_intr(struct ata_port *ap, u32 port_cause)
2048 struct mv_port_priv *pp;
2049 int edma_was_enabled;
2051 if (!ap || (ap->flags & ATA_FLAG_DISABLED)) {
2052 mv_unexpected_intr(ap, 0);
2056 * Grab a snapshot of the EDMA_EN flag setting,
2057 * so that we have a consistent view for this port,
2058 * even if something we call of our routines changes it.
2060 pp = ap->private_data;
2061 edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2063 * Process completed CRPB response(s) before other events.
2065 if (edma_was_enabled && (port_cause & DONE_IRQ)) {
2066 mv_process_crpb_entries(ap, pp);
2067 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
2068 mv_handle_fbs_ncq_dev_err(ap);
2071 * Handle chip-reported errors, or continue on to handle PIO.
2073 if (unlikely(port_cause & ERR_IRQ)) {
2075 } else if (!edma_was_enabled) {
2076 struct ata_queued_cmd *qc = mv_get_active_qc(ap);
2078 ata_sff_host_intr(ap, qc);
2080 mv_unexpected_intr(ap, edma_was_enabled);
2085 * mv_host_intr - Handle all interrupts on the given host controller
2086 * @host: host specific structure
2087 * @main_irq_cause: Main interrupt cause register for the chip.
2090 * Inherited from caller.
2092 static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
2094 struct mv_host_priv *hpriv = host->private_data;
2095 void __iomem *mmio = hpriv->base, *hc_mmio;
2096 unsigned int handled = 0, port;
2098 for (port = 0; port < hpriv->n_ports; port++) {
2099 struct ata_port *ap = host->ports[port];
2100 unsigned int p, shift, hardport, port_cause;
2102 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
2104 * Each hc within the host has its own hc_irq_cause register,
2105 * where the interrupting ports bits get ack'd.
2107 if (hardport == 0) { /* first port on this hc ? */
2108 u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
2109 u32 port_mask, ack_irqs;
2111 * Skip this entire hc if nothing pending for any ports
2114 port += MV_PORTS_PER_HC - 1;
2118 * We don't need/want to read the hc_irq_cause register,
2119 * because doing so hurts performance, and
2120 * main_irq_cause already gives us everything we need.
2122 * But we do have to *write* to the hc_irq_cause to ack
2123 * the ports that we are handling this time through.
2125 * This requires that we create a bitmap for those
2126 * ports which interrupted us, and use that bitmap
2127 * to ack (only) those ports via hc_irq_cause.
2130 for (p = 0; p < MV_PORTS_PER_HC; ++p) {
2131 if ((port + p) >= hpriv->n_ports)
2133 port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
2134 if (hc_cause & port_mask)
2135 ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
2137 hc_mmio = mv_hc_base_from_port(mmio, port);
2138 writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE_OFS);
2142 * Handle interrupts signalled for this port:
2144 port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
2146 mv_port_intr(ap, port_cause);
2151 static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
2153 struct mv_host_priv *hpriv = host->private_data;
2154 struct ata_port *ap;
2155 struct ata_queued_cmd *qc;
2156 struct ata_eh_info *ehi;
2157 unsigned int i, err_mask, printed = 0;
2160 err_cause = readl(mmio + hpriv->irq_cause_ofs);
2162 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
2165 DPRINTK("All regs @ PCI error\n");
2166 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
2168 writelfl(0, mmio + hpriv->irq_cause_ofs);
2170 for (i = 0; i < host->n_ports; i++) {
2171 ap = host->ports[i];
2172 if (!ata_link_offline(&ap->link)) {
2173 ehi = &ap->link.eh_info;
2174 ata_ehi_clear_desc(ehi);
2176 ata_ehi_push_desc(ehi,
2177 "PCI err cause 0x%08x", err_cause);
2178 err_mask = AC_ERR_HOST_BUS;
2179 ehi->action = ATA_EH_RESET;
2180 qc = ata_qc_from_tag(ap, ap->link.active_tag);
2182 qc->err_mask |= err_mask;
2184 ehi->err_mask |= err_mask;
2186 ata_port_freeze(ap);
2189 return 1; /* handled */
2193 * mv_interrupt - Main interrupt event handler
2195 * @dev_instance: private data; in this case the host structure
2197 * Read the read only register to determine if any host
2198 * controllers have pending interrupts. If so, call lower level
2199 * routine to handle. Also check for PCI errors which are only
2203 * This routine holds the host lock while processing pending
2206 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
2208 struct ata_host *host = dev_instance;
2209 struct mv_host_priv *hpriv = host->private_data;
2210 unsigned int handled = 0;
2211 u32 main_irq_cause, pending_irqs;
2213 spin_lock(&host->lock);
2214 main_irq_cause = readl(hpriv->main_irq_cause_addr);
2215 pending_irqs = main_irq_cause & hpriv->main_irq_mask;
2217 * Deal with cases where we either have nothing pending, or have read
2218 * a bogus register value which can indicate HW removal or PCI fault.
2220 if (pending_irqs && main_irq_cause != 0xffffffffU) {
2221 if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
2222 handled = mv_pci_error(host, hpriv->base);
2224 handled = mv_host_intr(host, pending_irqs);
2226 spin_unlock(&host->lock);
2227 return IRQ_RETVAL(handled);
2230 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
2234 switch (sc_reg_in) {
2238 ofs = sc_reg_in * sizeof(u32);
2247 static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
2249 struct mv_host_priv *hpriv = link->ap->host->private_data;
2250 void __iomem *mmio = hpriv->base;
2251 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
2252 unsigned int ofs = mv5_scr_offset(sc_reg_in);
2254 if (ofs != 0xffffffffU) {
2255 *val = readl(addr + ofs);
2261 static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
2263 struct mv_host_priv *hpriv = link->ap->host->private_data;
2264 void __iomem *mmio = hpriv->base;
2265 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
2266 unsigned int ofs = mv5_scr_offset(sc_reg_in);
2268 if (ofs != 0xffffffffU) {
2269 writelfl(val, addr + ofs);
2275 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
2277 struct pci_dev *pdev = to_pci_dev(host->dev);
2280 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
2283 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
2285 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
2288 mv_reset_pci_bus(host, mmio);
2291 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2293 writel(0x0fcfffff, mmio + MV_FLASH_CTL_OFS);
2296 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
2299 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
2302 tmp = readl(phy_mmio + MV5_PHY_MODE);
2304 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
2305 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
2308 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2312 writel(0, mmio + MV_GPIO_PORT_CTL_OFS);
2314 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
2316 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
2318 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
2321 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2324 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
2325 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
2327 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
2330 tmp = readl(phy_mmio + MV5_LTMODE_OFS);
2332 writel(tmp, phy_mmio + MV5_LTMODE_OFS);
2334 tmp = readl(phy_mmio + MV5_PHY_CTL_OFS);
2337 writel(tmp, phy_mmio + MV5_PHY_CTL_OFS);
2340 tmp = readl(phy_mmio + MV5_PHY_MODE);
2342 tmp |= hpriv->signal[port].pre;
2343 tmp |= hpriv->signal[port].amps;
2344 writel(tmp, phy_mmio + MV5_PHY_MODE);
2349 #define ZERO(reg) writel(0, port_mmio + (reg))
2350 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
2353 void __iomem *port_mmio = mv_port_base(mmio, port);
2355 mv_reset_channel(hpriv, mmio, port);
2357 ZERO(0x028); /* command */
2358 writel(0x11f, port_mmio + EDMA_CFG_OFS);
2359 ZERO(0x004); /* timer */
2360 ZERO(0x008); /* irq err cause */
2361 ZERO(0x00c); /* irq err mask */
2362 ZERO(0x010); /* rq bah */
2363 ZERO(0x014); /* rq inp */
2364 ZERO(0x018); /* rq outp */
2365 ZERO(0x01c); /* respq bah */
2366 ZERO(0x024); /* respq outp */
2367 ZERO(0x020); /* respq inp */
2368 ZERO(0x02c); /* test control */
2369 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS);
2373 #define ZERO(reg) writel(0, hc_mmio + (reg))
2374 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2377 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2385 tmp = readl(hc_mmio + 0x20);
2388 writel(tmp, hc_mmio + 0x20);
2392 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2395 unsigned int hc, port;
2397 for (hc = 0; hc < n_hc; hc++) {
2398 for (port = 0; port < MV_PORTS_PER_HC; port++)
2399 mv5_reset_hc_port(hpriv, mmio,
2400 (hc * MV_PORTS_PER_HC) + port);
2402 mv5_reset_one_hc(hpriv, mmio, hc);
2409 #define ZERO(reg) writel(0, mmio + (reg))
2410 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
2412 struct mv_host_priv *hpriv = host->private_data;
2415 tmp = readl(mmio + MV_PCI_MODE_OFS);
2417 writel(tmp, mmio + MV_PCI_MODE_OFS);
2419 ZERO(MV_PCI_DISC_TIMER);
2420 ZERO(MV_PCI_MSI_TRIGGER);
2421 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT_OFS);
2422 ZERO(MV_PCI_SERR_MASK);
2423 ZERO(hpriv->irq_cause_ofs);
2424 ZERO(hpriv->irq_mask_ofs);
2425 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2426 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2427 ZERO(MV_PCI_ERR_ATTRIBUTE);
2428 ZERO(MV_PCI_ERR_COMMAND);
2432 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2436 mv5_reset_flash(hpriv, mmio);
2438 tmp = readl(mmio + MV_GPIO_PORT_CTL_OFS);
2440 tmp |= (1 << 5) | (1 << 6);
2441 writel(tmp, mmio + MV_GPIO_PORT_CTL_OFS);
2445 * mv6_reset_hc - Perform the 6xxx global soft reset
2446 * @mmio: base address of the HBA
2448 * This routine only applies to 6xxx parts.
2451 * Inherited from caller.
2453 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2456 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2460 /* Following procedure defined in PCI "main command and status
2464 writel(t | STOP_PCI_MASTER, reg);
2466 for (i = 0; i < 1000; i++) {
2469 if (PCI_MASTER_EMPTY & t)
2472 if (!(PCI_MASTER_EMPTY & t)) {
2473 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2481 writel(t | GLOB_SFT_RST, reg);
2484 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2486 if (!(GLOB_SFT_RST & t)) {
2487 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2492 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2495 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2498 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2500 if (GLOB_SFT_RST & t) {
2501 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2508 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2511 void __iomem *port_mmio;
2514 tmp = readl(mmio + MV_RESET_CFG_OFS);
2515 if ((tmp & (1 << 0)) == 0) {
2516 hpriv->signal[idx].amps = 0x7 << 8;
2517 hpriv->signal[idx].pre = 0x1 << 5;
2521 port_mmio = mv_port_base(mmio, idx);
2522 tmp = readl(port_mmio + PHY_MODE2);
2524 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2525 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2528 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2530 writel(0x00000060, mmio + MV_GPIO_PORT_CTL_OFS);
2533 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2536 void __iomem *port_mmio = mv_port_base(mmio, port);
2538 u32 hp_flags = hpriv->hp_flags;
2540 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2542 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2545 if (fix_phy_mode2) {
2546 m2 = readl(port_mmio + PHY_MODE2);
2549 writel(m2, port_mmio + PHY_MODE2);
2553 m2 = readl(port_mmio + PHY_MODE2);
2554 m2 &= ~((1 << 16) | (1 << 31));
2555 writel(m2, port_mmio + PHY_MODE2);
2561 * Gen-II/IIe PHY_MODE3 errata RM#2:
2562 * Achieves better receiver noise performance than the h/w default:
2564 m3 = readl(port_mmio + PHY_MODE3);
2565 m3 = (m3 & 0x1f) | (0x5555601 << 5);
2567 /* Guideline 88F5182 (GL# SATA-S11) */
2571 if (fix_phy_mode4) {
2572 u32 m4 = readl(port_mmio + PHY_MODE4);
2574 * Enforce reserved-bit restrictions on GenIIe devices only.
2575 * For earlier chipsets, force only the internal config field
2576 * (workaround for errata FEr SATA#10 part 1).
2578 if (IS_GEN_IIE(hpriv))
2579 m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES;
2581 m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE;
2582 writel(m4, port_mmio + PHY_MODE4);
2585 * Workaround for 60x1-B2 errata SATA#13:
2586 * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3,
2587 * so we must always rewrite PHY_MODE3 after PHY_MODE4.
2589 writel(m3, port_mmio + PHY_MODE3);
2591 /* Revert values of pre-emphasis and signal amps to the saved ones */
2592 m2 = readl(port_mmio + PHY_MODE2);
2594 m2 &= ~MV_M2_PREAMP_MASK;
2595 m2 |= hpriv->signal[port].amps;
2596 m2 |= hpriv->signal[port].pre;
2599 /* according to mvSata 3.6.1, some IIE values are fixed */
2600 if (IS_GEN_IIE(hpriv)) {
2605 writel(m2, port_mmio + PHY_MODE2);
2608 /* TODO: use the generic LED interface to configure the SATA Presence */
2609 /* & Acitivy LEDs on the board */
2610 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2616 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2619 void __iomem *port_mmio;
2622 port_mmio = mv_port_base(mmio, idx);
2623 tmp = readl(port_mmio + PHY_MODE2);
2625 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2626 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2630 #define ZERO(reg) writel(0, port_mmio + (reg))
2631 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2632 void __iomem *mmio, unsigned int port)
2634 void __iomem *port_mmio = mv_port_base(mmio, port);
2636 mv_reset_channel(hpriv, mmio, port);
2638 ZERO(0x028); /* command */
2639 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2640 ZERO(0x004); /* timer */
2641 ZERO(0x008); /* irq err cause */
2642 ZERO(0x00c); /* irq err mask */
2643 ZERO(0x010); /* rq bah */
2644 ZERO(0x014); /* rq inp */
2645 ZERO(0x018); /* rq outp */
2646 ZERO(0x01c); /* respq bah */
2647 ZERO(0x024); /* respq outp */
2648 ZERO(0x020); /* respq inp */
2649 ZERO(0x02c); /* test control */
2650 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS);
2655 #define ZERO(reg) writel(0, hc_mmio + (reg))
2656 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2659 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2669 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2670 void __iomem *mmio, unsigned int n_hc)
2674 for (port = 0; port < hpriv->n_ports; port++)
2675 mv_soc_reset_hc_port(hpriv, mmio, port);
2677 mv_soc_reset_one_hc(hpriv, mmio);
2682 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2688 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2693 static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
2695 u32 ifcfg = readl(port_mmio + SATA_INTERFACE_CFG_OFS);
2697 ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */
2699 ifcfg |= (1 << 7); /* enable gen2i speed */
2700 writelfl(ifcfg, port_mmio + SATA_INTERFACE_CFG_OFS);
2703 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
2704 unsigned int port_no)
2706 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2709 * The datasheet warns against setting EDMA_RESET when EDMA is active
2710 * (but doesn't say what the problem might be). So we first try
2711 * to disable the EDMA engine before doing the EDMA_RESET operation.
2713 mv_stop_edma_engine(port_mmio);
2714 writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS);
2716 if (!IS_GEN_I(hpriv)) {
2717 /* Enable 3.0gb/s link speed: this survives EDMA_RESET */
2718 mv_setup_ifcfg(port_mmio, 1);
2721 * Strobing EDMA_RESET here causes a hard reset of the SATA transport,
2722 * link, and physical layers. It resets all SATA interface registers
2723 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
2725 writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS);
2726 udelay(25); /* allow reset propagation */
2727 writelfl(0, port_mmio + EDMA_CMD_OFS);
2729 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2731 if (IS_GEN_I(hpriv))
2735 static void mv_pmp_select(struct ata_port *ap, int pmp)
2737 if (sata_pmp_supported(ap)) {
2738 void __iomem *port_mmio = mv_ap_base(ap);
2739 u32 reg = readl(port_mmio + SATA_IFCTL_OFS);
2740 int old = reg & 0xf;
2743 reg = (reg & ~0xf) | pmp;
2744 writelfl(reg, port_mmio + SATA_IFCTL_OFS);
2749 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
2750 unsigned long deadline)
2752 mv_pmp_select(link->ap, sata_srst_pmp(link));
2753 return sata_std_hardreset(link, class, deadline);
2756 static int mv_softreset(struct ata_link *link, unsigned int *class,
2757 unsigned long deadline)
2759 mv_pmp_select(link->ap, sata_srst_pmp(link));
2760 return ata_sff_softreset(link, class, deadline);
2763 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2764 unsigned long deadline)
2766 struct ata_port *ap = link->ap;
2767 struct mv_host_priv *hpriv = ap->host->private_data;
2768 struct mv_port_priv *pp = ap->private_data;
2769 void __iomem *mmio = hpriv->base;
2770 int rc, attempts = 0, extra = 0;
2774 mv_reset_channel(hpriv, mmio, ap->port_no);
2775 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2777 /* Workaround for errata FEr SATA#10 (part 2) */
2779 const unsigned long *timing =
2780 sata_ehc_deb_timing(&link->eh_context);
2782 rc = sata_link_hardreset(link, timing, deadline + extra,
2784 rc = online ? -EAGAIN : rc;
2787 sata_scr_read(link, SCR_STATUS, &sstatus);
2788 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
2789 /* Force 1.5gb/s link speed and try again */
2790 mv_setup_ifcfg(mv_ap_base(ap), 0);
2791 if (time_after(jiffies + HZ, deadline))
2792 extra = HZ; /* only extend it once, max */
2794 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
2799 static void mv_eh_freeze(struct ata_port *ap)
2802 mv_enable_port_irqs(ap, 0);
2805 static void mv_eh_thaw(struct ata_port *ap)
2807 struct mv_host_priv *hpriv = ap->host->private_data;
2808 unsigned int port = ap->port_no;
2809 unsigned int hardport = mv_hardport_from_port(port);
2810 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
2811 void __iomem *port_mmio = mv_ap_base(ap);
2814 /* clear EDMA errors on this port */
2815 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2817 /* clear pending irq events */
2818 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
2819 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2821 mv_enable_port_irqs(ap, ERR_IRQ);
2825 * mv_port_init - Perform some early initialization on a single port.
2826 * @port: libata data structure storing shadow register addresses
2827 * @port_mmio: base address of the port
2829 * Initialize shadow register mmio addresses, clear outstanding
2830 * interrupts on the port, and unmask interrupts for the future
2831 * start of the port.
2834 * Inherited from caller.
2836 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2838 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2841 /* PIO related setup
2843 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2845 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2846 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2847 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2848 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2849 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2850 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2852 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2853 /* special case: control/altstatus doesn't have ATA_REG_ address */
2854 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2857 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2859 /* Clear any currently outstanding port interrupt conditions */
2860 serr_ofs = mv_scr_offset(SCR_ERROR);
2861 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2862 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2864 /* unmask all non-transient EDMA error interrupts */
2865 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2867 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2868 readl(port_mmio + EDMA_CFG_OFS),
2869 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2870 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2873 static unsigned int mv_in_pcix_mode(struct ata_host *host)
2875 struct mv_host_priv *hpriv = host->private_data;
2876 void __iomem *mmio = hpriv->base;
2879 if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
2880 return 0; /* not PCI-X capable */
2881 reg = readl(mmio + MV_PCI_MODE_OFS);
2882 if ((reg & MV_PCI_MODE_MASK) == 0)
2883 return 0; /* conventional PCI mode */
2884 return 1; /* chip is in PCI-X mode */
2887 static int mv_pci_cut_through_okay(struct ata_host *host)
2889 struct mv_host_priv *hpriv = host->private_data;
2890 void __iomem *mmio = hpriv->base;
2893 if (!mv_in_pcix_mode(host)) {
2894 reg = readl(mmio + PCI_COMMAND_OFS);
2895 if (reg & PCI_COMMAND_MRDTRIG)
2896 return 0; /* not okay */
2898 return 1; /* okay */
2901 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2903 struct pci_dev *pdev = to_pci_dev(host->dev);
2904 struct mv_host_priv *hpriv = host->private_data;
2905 u32 hp_flags = hpriv->hp_flags;
2907 switch (board_idx) {
2909 hpriv->ops = &mv5xxx_ops;
2910 hp_flags |= MV_HP_GEN_I;
2912 switch (pdev->revision) {
2914 hp_flags |= MV_HP_ERRATA_50XXB0;
2917 hp_flags |= MV_HP_ERRATA_50XXB2;
2920 dev_printk(KERN_WARNING, &pdev->dev,
2921 "Applying 50XXB2 workarounds to unknown rev\n");
2922 hp_flags |= MV_HP_ERRATA_50XXB2;
2929 hpriv->ops = &mv5xxx_ops;
2930 hp_flags |= MV_HP_GEN_I;
2932 switch (pdev->revision) {
2934 hp_flags |= MV_HP_ERRATA_50XXB0;
2937 hp_flags |= MV_HP_ERRATA_50XXB2;
2940 dev_printk(KERN_WARNING, &pdev->dev,
2941 "Applying B2 workarounds to unknown rev\n");
2942 hp_flags |= MV_HP_ERRATA_50XXB2;
2949 hpriv->ops = &mv6xxx_ops;
2950 hp_flags |= MV_HP_GEN_II;
2952 switch (pdev->revision) {
2954 hp_flags |= MV_HP_ERRATA_60X1B2;
2957 hp_flags |= MV_HP_ERRATA_60X1C0;
2960 dev_printk(KERN_WARNING, &pdev->dev,
2961 "Applying B2 workarounds to unknown rev\n");
2962 hp_flags |= MV_HP_ERRATA_60X1B2;
2968 hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
2969 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2970 (pdev->device == 0x2300 || pdev->device == 0x2310))
2973 * Highpoint RocketRAID PCIe 23xx series cards:
2975 * Unconfigured drives are treated as "Legacy"
2976 * by the BIOS, and it overwrites sector 8 with
2977 * a "Lgcy" metadata block prior to Linux boot.
2979 * Configured drives (RAID or JBOD) leave sector 8
2980 * alone, but instead overwrite a high numbered
2981 * sector for the RAID metadata. This sector can
2982 * be determined exactly, by truncating the physical
2983 * drive capacity to a nice even GB value.
2985 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2987 * Warn the user, lest they think we're just buggy.
2989 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2990 " BIOS CORRUPTS DATA on all attached drives,"
2991 " regardless of if/how they are configured."
2993 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2994 " use sectors 8-9 on \"Legacy\" drives,"
2995 " and avoid the final two gigabytes on"
2996 " all RocketRAID BIOS initialized drives.\n");
3000 hpriv->ops = &mv6xxx_ops;
3001 hp_flags |= MV_HP_GEN_IIE;
3002 if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
3003 hp_flags |= MV_HP_CUT_THROUGH;
3005 switch (pdev->revision) {
3006 case 0x2: /* Rev.B0: the first/only public release */
3007 hp_flags |= MV_HP_ERRATA_60X1C0;
3010 dev_printk(KERN_WARNING, &pdev->dev,
3011 "Applying 60X1C0 workarounds to unknown rev\n");
3012 hp_flags |= MV_HP_ERRATA_60X1C0;
3017 hpriv->ops = &mv_soc_ops;
3018 hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE |
3019 MV_HP_ERRATA_60X1C0;
3023 dev_printk(KERN_ERR, host->dev,
3024 "BUG: invalid board index %u\n", board_idx);
3028 hpriv->hp_flags = hp_flags;
3029 if (hp_flags & MV_HP_PCIE) {
3030 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
3031 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
3032 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
3034 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
3035 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
3036 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
3043 * mv_init_host - Perform some early initialization of the host.
3044 * @host: ATA host to initialize
3045 * @board_idx: controller index
3047 * If possible, do an early global reset of the host. Then do
3048 * our port init and clear/unmask all/relevant host interrupts.
3051 * Inherited from caller.
3053 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
3055 int rc = 0, n_hc, port, hc;
3056 struct mv_host_priv *hpriv = host->private_data;
3057 void __iomem *mmio = hpriv->base;
3059 rc = mv_chip_id(host, board_idx);
3063 if (IS_SOC(hpriv)) {
3064 hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE_OFS;
3065 hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK_OFS;
3067 hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE_OFS;
3068 hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK_OFS;
3071 /* global interrupt mask: 0 == mask everything */
3072 mv_set_main_irq_mask(host, ~0, 0);
3074 n_hc = mv_get_hc_count(host->ports[0]->flags);
3076 for (port = 0; port < host->n_ports; port++)
3077 hpriv->ops->read_preamp(hpriv, port, mmio);
3079 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
3083 hpriv->ops->reset_flash(hpriv, mmio);
3084 hpriv->ops->reset_bus(host, mmio);
3085 hpriv->ops->enable_leds(hpriv, mmio);
3087 for (port = 0; port < host->n_ports; port++) {
3088 struct ata_port *ap = host->ports[port];
3089 void __iomem *port_mmio = mv_port_base(mmio, port);
3091 mv_port_init(&ap->ioaddr, port_mmio);
3094 if (!IS_SOC(hpriv)) {
3095 unsigned int offset = port_mmio - mmio;
3096 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
3097 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
3102 for (hc = 0; hc < n_hc; hc++) {
3103 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3105 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
3106 "(before clear)=0x%08x\n", hc,
3107 readl(hc_mmio + HC_CFG_OFS),
3108 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
3110 /* Clear any currently outstanding hc interrupt conditions */
3111 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
3114 if (!IS_SOC(hpriv)) {
3115 /* Clear any currently outstanding host interrupt conditions */
3116 writelfl(0, mmio + hpriv->irq_cause_ofs);
3118 /* and unmask interrupt generation for host regs */
3119 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
3122 * enable only global host interrupts for now.
3123 * The per-port interrupts get done later as ports are set up.
3125 mv_set_main_irq_mask(host, 0, PCI_ERR);
3131 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
3133 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
3135 if (!hpriv->crqb_pool)
3138 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
3140 if (!hpriv->crpb_pool)
3143 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
3145 if (!hpriv->sg_tbl_pool)
3151 static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
3152 struct mbus_dram_target_info *dram)
3156 for (i = 0; i < 4; i++) {
3157 writel(0, hpriv->base + WINDOW_CTRL(i));
3158 writel(0, hpriv->base + WINDOW_BASE(i));
3161 for (i = 0; i < dram->num_cs; i++) {
3162 struct mbus_dram_window *cs = dram->cs + i;
3164 writel(((cs->size - 1) & 0xffff0000) |
3165 (cs->mbus_attr << 8) |
3166 (dram->mbus_dram_target_id << 4) | 1,
3167 hpriv->base + WINDOW_CTRL(i));
3168 writel(cs->base, hpriv->base + WINDOW_BASE(i));
3173 * mv_platform_probe - handle a positive probe of an soc Marvell
3175 * @pdev: platform device found
3178 * Inherited from caller.
3180 static int mv_platform_probe(struct platform_device *pdev)
3182 static int printed_version;
3183 const struct mv_sata_platform_data *mv_platform_data;
3184 const struct ata_port_info *ppi[] =
3185 { &mv_port_info[chip_soc], NULL };
3186 struct ata_host *host;
3187 struct mv_host_priv *hpriv;
3188 struct resource *res;
3191 if (!printed_version++)
3192 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3195 * Simple resource validation ..
3197 if (unlikely(pdev->num_resources != 2)) {
3198 dev_err(&pdev->dev, "invalid number of resources\n");
3203 * Get the register base first
3205 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3210 mv_platform_data = pdev->dev.platform_data;
3211 n_ports = mv_platform_data->n_ports;
3213 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3214 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3216 if (!host || !hpriv)
3218 host->private_data = hpriv;
3219 hpriv->n_ports = n_ports;
3222 hpriv->base = devm_ioremap(&pdev->dev, res->start,
3223 res->end - res->start + 1);
3224 hpriv->base -= MV_SATAHC0_REG_BASE;
3227 * (Re-)program MBUS remapping windows if we are asked to.
3229 if (mv_platform_data->dram != NULL)
3230 mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
3232 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3236 /* initialize adapter */
3237 rc = mv_init_host(host, chip_soc);
3241 dev_printk(KERN_INFO, &pdev->dev,
3242 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
3245 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
3246 IRQF_SHARED, &mv6_sht);
3251 * mv_platform_remove - unplug a platform interface
3252 * @pdev: platform device
3254 * A platform bus SATA device has been unplugged. Perform the needed
3255 * cleanup. Also called on module unload for any active devices.
3257 static int __devexit mv_platform_remove(struct platform_device *pdev)
3259 struct device *dev = &pdev->dev;
3260 struct ata_host *host = dev_get_drvdata(dev);
3262 ata_host_detach(host);
3266 static struct platform_driver mv_platform_driver = {
3267 .probe = mv_platform_probe,
3268 .remove = __devexit_p(mv_platform_remove),
3271 .owner = THIS_MODULE,
3277 static int mv_pci_init_one(struct pci_dev *pdev,
3278 const struct pci_device_id *ent);
3281 static struct pci_driver mv_pci_driver = {
3283 .id_table = mv_pci_tbl,
3284 .probe = mv_pci_init_one,
3285 .remove = ata_pci_remove_one,
3291 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
3294 /* move to PCI layer or libata core? */
3295 static int pci_go_64(struct pci_dev *pdev)
3299 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3300 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3302 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3304 dev_printk(KERN_ERR, &pdev->dev,
3305 "64-bit DMA enable failed\n");
3310 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3312 dev_printk(KERN_ERR, &pdev->dev,
3313 "32-bit DMA enable failed\n");
3316 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3318 dev_printk(KERN_ERR, &pdev->dev,
3319 "32-bit consistent DMA enable failed\n");
3328 * mv_print_info - Dump key info to kernel log for perusal.
3329 * @host: ATA host to print info about
3331 * FIXME: complete this.
3334 * Inherited from caller.
3336 static void mv_print_info(struct ata_host *host)
3338 struct pci_dev *pdev = to_pci_dev(host->dev);
3339 struct mv_host_priv *hpriv = host->private_data;
3341 const char *scc_s, *gen;
3343 /* Use this to determine the HW stepping of the chip so we know
3344 * what errata to workaround
3346 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
3349 else if (scc == 0x01)
3354 if (IS_GEN_I(hpriv))
3356 else if (IS_GEN_II(hpriv))
3358 else if (IS_GEN_IIE(hpriv))
3363 dev_printk(KERN_INFO, &pdev->dev,
3364 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
3365 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
3366 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
3370 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
3371 * @pdev: PCI device found
3372 * @ent: PCI device ID entry for the matched host
3375 * Inherited from caller.
3377 static int mv_pci_init_one(struct pci_dev *pdev,
3378 const struct pci_device_id *ent)
3380 static int printed_version;
3381 unsigned int board_idx = (unsigned int)ent->driver_data;
3382 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3383 struct ata_host *host;
3384 struct mv_host_priv *hpriv;
3387 if (!printed_version++)
3388 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3391 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3393 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3394 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3395 if (!host || !hpriv)
3397 host->private_data = hpriv;
3398 hpriv->n_ports = n_ports;
3400 /* acquire resources */
3401 rc = pcim_enable_device(pdev);
3405 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3407 pcim_pin_device(pdev);
3410 host->iomap = pcim_iomap_table(pdev);
3411 hpriv->base = host->iomap[MV_PRIMARY_BAR];
3413 rc = pci_go_64(pdev);
3417 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3421 /* initialize adapter */
3422 rc = mv_init_host(host, board_idx);
3426 /* Enable interrupts */
3427 if (msi && pci_enable_msi(pdev))
3430 mv_dump_pci_cfg(pdev, 0x68);
3431 mv_print_info(host);
3433 pci_set_master(pdev);
3434 pci_try_set_mwi(pdev);
3435 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
3436 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
3440 static int mv_platform_probe(struct platform_device *pdev);
3441 static int __devexit mv_platform_remove(struct platform_device *pdev);
3443 static int __init mv_init(void)
3447 rc = pci_register_driver(&mv_pci_driver);
3451 rc = platform_driver_register(&mv_platform_driver);
3455 pci_unregister_driver(&mv_pci_driver);
3460 static void __exit mv_exit(void)
3463 pci_unregister_driver(&mv_pci_driver);
3465 platform_driver_unregister(&mv_platform_driver);
3468 MODULE_AUTHOR("Brett Russ");
3469 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3470 MODULE_LICENSE("GPL");
3471 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3472 MODULE_VERSION(DRV_VERSION);
3473 MODULE_ALIAS("platform:" DRV_NAME);
3476 module_param(msi, int, 0444);
3477 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
3480 module_init(mv_init);
3481 module_exit(mv_exit);