2 * drivers/mtd/nand/omap-hw.c
4 * This is the MTD driver for OMAP1710 internal HW NAND controller.
6 * Copyright (C) 2004-2006 Nokia Corporation
8 * Author: Jarkko Lavinen <jarkko.lavinen@nokia.com> and
9 * Juha Yrjölä <juha.yrjola@nokia.com>
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 as published by
13 * the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 * You should have received a copy of the GNU General Public License along with
21 * this program; see the file COPYING. If not, write to the Free Software
22 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 #include <linux/slab.h>
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/delay.h>
30 #include <linux/errno.h>
31 #include <linux/sched.h>
32 #include <linux/types.h>
33 #include <linux/wait.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/mtd/mtd.h>
37 #include <linux/mtd/nand.h>
38 #include <linux/mtd/partitions.h>
39 #include <linux/mtd/nand_ecc.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/clk.h>
45 #include <mach/board.h>
48 #define NAND_BASE 0xfffbcc00
49 #define NND_REVISION 0x00
50 #define NND_ACCESS 0x04
51 #define NND_ADDR_SRC 0x08
54 #define NND_STATUS 0x18
55 #define NND_READY 0x1c
56 #define NND_COMMAND 0x20
57 #define NND_COMMAND_SEC 0x24
58 #define NND_ECC_SELECT 0x28
59 #define NND_ECC_START 0x2c
60 #define NND_ECC_9 0x4c
61 #define NND_RESET 0x50
63 #define NND_FIFOCTRL 0x58
64 #define NND_PSC_CLK 0x5c
65 #define NND_SYSTEST 0x60
66 #define NND_SYSCFG 0x64
67 #define NND_SYSSTATUS 0x68
68 #define NND_FIFOTEST1 0x6c
69 #define NND_FIFOTEST2 0x70
70 #define NND_FIFOTEST3 0x74
71 #define NND_FIFOTEST4 0x78
72 #define NND_PSC1_CLK 0x8c
73 #define NND_PSC2_CLK 0x90
76 #define NND_CMD_READ1_LOWER 0x00
77 #define NND_CMD_WRITE1_LOWER 0x00
78 #define NND_CMD_READ1_UPPER 0x01
79 #define NND_CMD_WRITE1_UPPER 0x01
80 #define NND_CMD_PROGRAM_END 0x10
81 #define NND_CMD_READ2_SPARE 0x50
82 #define NND_CMD_WRITE2_SPARE 0x50
83 #define NND_CMD_ERASE 0x60
84 #define NND_CMD_STATUS 0x70
85 #define NND_CMD_PROGRAM 0x80
86 #define NND_CMD_READ_ID 0x90
87 #define NND_CMD_ERASE_END 0xD0
88 #define NND_CMD_RESET 0xFF
91 #define NAND_Ecc_P1e (1 << 0)
92 #define NAND_Ecc_P2e (1 << 1)
93 #define NAND_Ecc_P4e (1 << 2)
94 #define NAND_Ecc_P8e (1 << 3)
95 #define NAND_Ecc_P16e (1 << 4)
96 #define NAND_Ecc_P32e (1 << 5)
97 #define NAND_Ecc_P64e (1 << 6)
98 #define NAND_Ecc_P128e (1 << 7)
99 #define NAND_Ecc_P256e (1 << 8)
100 #define NAND_Ecc_P512e (1 << 9)
101 #define NAND_Ecc_P1024e (1 << 10)
102 #define NAND_Ecc_P2048e (1 << 11)
104 #define NAND_Ecc_P1o (1 << 16)
105 #define NAND_Ecc_P2o (1 << 17)
106 #define NAND_Ecc_P4o (1 << 18)
107 #define NAND_Ecc_P8o (1 << 19)
108 #define NAND_Ecc_P16o (1 << 20)
109 #define NAND_Ecc_P32o (1 << 21)
110 #define NAND_Ecc_P64o (1 << 22)
111 #define NAND_Ecc_P128o (1 << 23)
112 #define NAND_Ecc_P256o (1 << 24)
113 #define NAND_Ecc_P512o (1 << 25)
114 #define NAND_Ecc_P1024o (1 << 26)
115 #define NAND_Ecc_P2048o (1 << 27)
117 #define TF(value) (value ? 1 : 0)
119 #define P2048e(a) (TF(a & NAND_Ecc_P2048e) << 0 )
120 #define P2048o(a) (TF(a & NAND_Ecc_P2048o) << 1 )
121 #define P1e(a) (TF(a & NAND_Ecc_P1e) << 2 )
122 #define P1o(a) (TF(a & NAND_Ecc_P1o) << 3 )
123 #define P2e(a) (TF(a & NAND_Ecc_P2e) << 4 )
124 #define P2o(a) (TF(a & NAND_Ecc_P2o) << 5 )
125 #define P4e(a) (TF(a & NAND_Ecc_P4e) << 6 )
126 #define P4o(a) (TF(a & NAND_Ecc_P4o) << 7 )
128 #define P8e(a) (TF(a & NAND_Ecc_P8e) << 0 )
129 #define P8o(a) (TF(a & NAND_Ecc_P8o) << 1 )
130 #define P16e(a) (TF(a & NAND_Ecc_P16e) << 2 )
131 #define P16o(a) (TF(a & NAND_Ecc_P16o) << 3 )
132 #define P32e(a) (TF(a & NAND_Ecc_P32e) << 4 )
133 #define P32o(a) (TF(a & NAND_Ecc_P32o) << 5 )
134 #define P64e(a) (TF(a & NAND_Ecc_P64e) << 6 )
135 #define P64o(a) (TF(a & NAND_Ecc_P64o) << 7 )
137 #define P128e(a) (TF(a & NAND_Ecc_P128e) << 0 )
138 #define P128o(a) (TF(a & NAND_Ecc_P128o) << 1 )
139 #define P256e(a) (TF(a & NAND_Ecc_P256e) << 2 )
140 #define P256o(a) (TF(a & NAND_Ecc_P256o) << 3 )
141 #define P512e(a) (TF(a & NAND_Ecc_P512e) << 4 )
142 #define P512o(a) (TF(a & NAND_Ecc_P512o) << 5 )
143 #define P1024e(a) (TF(a & NAND_Ecc_P1024e) << 6 )
144 #define P1024o(a) (TF(a & NAND_Ecc_P1024o) << 7 )
146 #define P8e_s(a) (TF(a & NAND_Ecc_P8e) << 0 )
147 #define P8o_s(a) (TF(a & NAND_Ecc_P8o) << 1 )
148 #define P16e_s(a) (TF(a & NAND_Ecc_P16e) << 2 )
149 #define P16o_s(a) (TF(a & NAND_Ecc_P16o) << 3 )
150 #define P1e_s(a) (TF(a & NAND_Ecc_P1e) << 4 )
151 #define P1o_s(a) (TF(a & NAND_Ecc_P1o) << 5 )
152 #define P2e_s(a) (TF(a & NAND_Ecc_P2e) << 6 )
153 #define P2o_s(a) (TF(a & NAND_Ecc_P2o) << 7 )
155 #define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0 )
156 #define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1 )
158 extern struct nand_oobinfo jffs2_oobinfo;
161 * MTD structure for OMAP board
163 static struct mtd_info *omap_mtd;
164 static struct clk *omap_nand_clk;
165 static int omap_nand_dma_ch;
166 static struct completion omap_nand_dma_comp;
167 static unsigned long omap_nand_base = OMAP1_IO_ADDRESS(NAND_BASE);
169 static inline u32 nand_read_reg(int idx)
171 return __raw_readl(omap_nand_base + idx);
174 static inline void nand_write_reg(int idx, u32 val)
176 __raw_writel(val, omap_nand_base + idx);
179 static inline u8 nand_read_reg8(int idx)
181 return __raw_readb(omap_nand_base + idx);
184 static inline void nand_write_reg8(int idx, u8 val)
186 __raw_writeb(val, omap_nand_base + idx);
189 static void omap_nand_select_chip(struct mtd_info *mtd, int chip)
195 l = nand_read_reg(NND_CTRL);
196 l |= (1 << 8) | (1 << 10) | (1 << 12) | (1 << 14);
197 nand_write_reg(NND_CTRL, l);
200 /* Also CS1, CS2, CS4 would be available */
201 l = nand_read_reg(NND_CTRL);
203 nand_write_reg(NND_CTRL, l);
210 static void nand_dma_cb(int lch, u16 ch_status, void *data)
212 complete((struct completion *) data);
215 static void omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
216 unsigned int u32_count, int is_write)
218 const int block_size = 16;
219 unsigned int block_count, len;
221 unsigned long fifo_reg, timeout, jiffies_before, jiffies_spent;
222 static unsigned long max_jiffies = 0;
224 dma_ch = omap_nand_dma_ch;
225 block_count = u32_count * 4 / block_size;
226 nand_write_reg(NND_STATUS, 0x0f);
227 nand_write_reg(NND_FIFOCTRL, (block_size << 24) | block_count);
228 fifo_reg = NAND_BASE + NND_FIFO;
230 omap_set_dma_dest_params(dma_ch, OMAP_DMA_PORT_TIPB,
231 OMAP_DMA_AMODE_CONSTANT, fifo_reg,
233 omap_set_dma_src_params(dma_ch, OMAP_DMA_PORT_EMIFF,
234 OMAP_DMA_AMODE_POST_INC,
237 // omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
238 /* Set POSTWRITE bit */
239 nand_write_reg(NND_CTRL, nand_read_reg(NND_CTRL) | (1 << 16));
241 omap_set_dma_src_params(dma_ch, OMAP_DMA_PORT_TIPB,
242 OMAP_DMA_AMODE_CONSTANT, fifo_reg,
244 omap_set_dma_dest_params(dma_ch, OMAP_DMA_PORT_EMIFF,
245 OMAP_DMA_AMODE_POST_INC,
248 // omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_8);
249 /* Set PREFETCH bit */
250 nand_write_reg(NND_CTRL, nand_read_reg(NND_CTRL) | (1 << 17));
252 omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32, block_size / 4,
253 block_count, OMAP_DMA_SYNC_FRAME,
255 init_completion(&omap_nand_dma_comp);
257 len = u32_count << 2;
258 dma_cache_maint(addr, len, DMA_TO_DEVICE);
259 omap_start_dma(dma_ch);
260 jiffies_before = jiffies;
261 timeout = wait_for_completion_timeout(&omap_nand_dma_comp,
262 msecs_to_jiffies(1000));
263 jiffies_spent = (unsigned long)((long)jiffies - (long)jiffies_before);
264 if (jiffies_spent > max_jiffies)
265 max_jiffies = jiffies_spent;
268 printk(KERN_WARNING "omap-hw-nand: DMA timeout after %u ms, max. seen latency %u ms\n",
269 jiffies_to_msecs(jiffies_spent),
270 jiffies_to_msecs(max_jiffies));
273 dma_cache_maint(addr, len, DMA_FROM_DEVICE);
275 nand_write_reg(NND_CTRL, nand_read_reg(NND_CTRL) & ~((1 << 16) | (1 << 17)));
278 static void fifo_read(u32 *out, unsigned int len)
280 const int block_size = 16;
281 unsigned long status_reg, fifo_reg;
284 status_reg = omap_nand_base + NND_STATUS;
285 fifo_reg = omap_nand_base + NND_FIFO;
286 len = len * 4 / block_size;
287 nand_write_reg(NND_FIFOCTRL, (block_size << 24) | len);
288 nand_write_reg(NND_STATUS, 0x0f);
289 nand_write_reg(NND_CTRL, nand_read_reg(NND_CTRL) | (1 << 17));
294 while ((__raw_readl(status_reg) & (1 << 2)) == 0);
295 __raw_writel(0x0f, status_reg);
296 for (i = 0; i < c; i++) {
297 u32 l = __raw_readl(fifo_reg);
301 nand_write_reg(NND_CTRL, nand_read_reg(NND_CTRL) & ~(1 << 17));
302 nand_write_reg(NND_STATUS, 0x0f);
305 static void omap_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
307 unsigned long access_reg;
309 if (likely(((unsigned long) buf & 3) == 0 && (len & 3) == 0)) {
310 int u32_count = len >> 2;
311 u32 *dest = (u32 *) buf;
312 /* If the transfer is big enough and the length divisible by
313 * 16, we try to use DMA transfer, or FIFO copy in case of
314 * DMA failure (e.g. all channels busy) */
315 if (u32_count > 64 && (u32_count & 3) == 0) {
316 if (omap_nand_dma_ch >= 0) {
317 omap_nand_dma_transfer(mtd, buf, u32_count, 0);
320 /* In case of an error, fallback to FIFO copy */
321 fifo_read((u32 *) buf, u32_count);
324 access_reg = omap_nand_base + NND_ACCESS;
325 /* Small buffers we just read directly */
327 *dest++ = __raw_readl(access_reg);
329 /* If we're not word-aligned, we use byte copy */
330 access_reg = omap_nand_base + NND_ACCESS;
332 *buf++ = __raw_readb(access_reg);
336 static void omap_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
338 if (likely(((unsigned long) buf & 3) == 0 && (len & 3) == 0)) {
339 const u32 *src = (const u32 *) buf;
343 /* If the transfer is big enough and length divisible by 16,
344 * we try to use DMA transfer. */
345 if (len > 256 / 4 && (len & 3) == 0) {
346 if (omap_nand_dma_transfer(mtd, (void *) buf, len, 1) == 0)
348 /* In case of an error, fallback to CPU copy */
352 nand_write_reg(NND_ACCESS, *src++);
355 nand_write_reg8(NND_ACCESS, *buf++);
359 static int omap_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
361 if (likely(((unsigned long) buf & 3) == 0 && (len & 3) == 0)) {
362 const u32 *dest = (const u32 *) buf;
365 if (*dest++ != nand_read_reg(NND_ACCESS))
369 if (*buf++ != nand_read_reg8(NND_ACCESS))
375 static u_char omap_nand_read_byte(struct mtd_info *mtd)
377 return nand_read_reg8(NND_ACCESS);
380 static int omap_nand_dev_ready(struct mtd_info *mtd)
384 l = nand_read_reg(NND_READY);
388 static int nand_write_command(u8 cmd, u32 addr, int addr_valid)
391 nand_write_reg(NND_ADDR_SRC, addr);
392 nand_write_reg8(NND_COMMAND, cmd);
394 nand_write_reg(NND_ADDR_SRC, 0);
395 nand_write_reg8(NND_COMMAND_SEC, cmd);
397 while (!omap_nand_dev_ready(NULL));
402 * Send command to NAND device
404 static void omap_nand_command(struct mtd_info *mtd, unsigned command, int column, int page_addr)
406 struct nand_chip *this = mtd->priv;
409 * Write out the command to the device.
411 if (command == NAND_CMD_SEQIN) {
414 if (column >= mtd->writesize) {
416 column -= mtd->writesize;
417 readcmd = NAND_CMD_READOOB;
418 } else if (column < 256) {
419 /* First 256 bytes --> READ0 */
420 readcmd = NAND_CMD_READ0;
423 readcmd = NAND_CMD_READ1;
425 nand_write_command(readcmd, 0, 0);
429 case NAND_CMD_PAGEPROG:
430 case NAND_CMD_STATUS:
431 case NAND_CMD_ERASE2:
432 nand_write_command(command, 0, 0);
434 case NAND_CMD_ERASE1:
435 nand_write_command(command, ((page_addr & 0xFFFFFF00) << 1) | (page_addr & 0XFF), 1);
438 nand_write_command(command, (page_addr << this->page_shift) | column, 1);
442 static void omap_nand_command_lp(struct mtd_info *mtd, unsigned command, int column, int page_addr)
444 struct nand_chip *this = mtd->priv;
446 if (command == NAND_CMD_READOOB) {
447 column += mtd->writesize;
448 command = NAND_CMD_READ0;
452 case NAND_CMD_PAGEPROG:
453 case NAND_CMD_STATUS:
454 case NAND_CMD_ERASE2:
455 nand_write_command(command, 0, 0);
457 case NAND_CMD_ERASE1:
458 nand_write_command(command, page_addr << this->page_shift >> 11, 1);
461 nand_write_command(command, (page_addr << 16) | column, 1);
463 if (command == NAND_CMD_READ0)
464 nand_write_command(NAND_CMD_READSTART, 0, 0);
468 * Generate non-inverted ECC bytes.
470 * Using noninverted ECC can be considered ugly since writing a blank
471 * page ie. padding will clear the ECC bytes. This is no problem as long
472 * nobody is trying to write data on the seemingly unused page.
474 * Reading an erased page will produce an ECC mismatch between
475 * generated and read ECC bytes that has to be dealt with separately.
477 static int omap_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
482 struct nand_chip *this = mtd->priv;
484 /* Ex NAND_ECC_HW12_2048 */
485 if ((this->ecc.mode == NAND_ECC_HW) && (this->ecc.size == 2048))
491 l = nand_read_reg(reg);
492 *ecc_code++ = l; // P128e, ..., P1e
493 *ecc_code++ = l >> 16; // P128o, ..., P1o
494 // P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e
495 *ecc_code++ = ((l >> 8) & 0x0f) | ((l >> 20) & 0xf0);
502 * This function will generate true ECC value, which can be used
503 * when correcting data read from NAND flash memory core
505 static void gen_true_ecc(u8 *ecc_buf)
507 u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) | ((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
509 ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) | P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp) );
510 ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) | P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
511 ecc_buf[2] = ~( P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) | P1e(tmp) | P2048o(tmp) | P2048e(tmp));
515 * This function compares two ECC's and indicates if there is an error.
516 * If the error can be corrected it will be corrected to the buffer
518 static int omap_nand_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
519 u8 *ecc_data2, /* read from register */
523 u8 tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
524 u8 comp0_bit[8], comp1_bit[8], comp2_bit[8];
531 isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
533 gen_true_ecc(ecc_data1);
534 gen_true_ecc(ecc_data2);
536 for (i = 0; i <= 2; i++) {
537 *(ecc_data1 + i) = ~(*(ecc_data1 + i));
538 *(ecc_data2 + i) = ~(*(ecc_data2 + i));
541 for (i = 0; i < 8; i++) {
542 tmp0_bit[i] = *ecc_data1 % 2;
543 *ecc_data1 = *ecc_data1 / 2;
546 for (i = 0; i < 8; i++) {
547 tmp1_bit[i] = *(ecc_data1 + 1) % 2;
548 *(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
551 for (i = 0; i < 8; i++) {
552 tmp2_bit[i] = *(ecc_data1 + 2) % 2;
553 *(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
556 for (i = 0; i < 8; i++) {
557 comp0_bit[i] = *ecc_data2 % 2;
558 *ecc_data2 = *ecc_data2 / 2;
561 for (i = 0; i < 8; i++) {
562 comp1_bit[i] = *(ecc_data2 + 1) % 2;
563 *(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
566 for (i = 0; i < 8; i++) {
567 comp2_bit[i] = *(ecc_data2 + 2) % 2;
568 *(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
571 for (i = 0; i< 6; i++ )
572 ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
574 for (i = 0; i < 8; i++)
575 ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
577 for (i = 0; i < 8; i++)
578 ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
580 ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
581 ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
583 for (i = 0; i < 24; i++)
584 ecc_sum += ecc_bit[i];
588 /* Not reached because this function is not called if
589 ECC values are equal */
593 /* Uncorrectable error */
594 DEBUG (MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR 1\n");
598 /* Correctable error */
599 find_byte = (ecc_bit[23] << 8) +
609 find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
611 DEBUG (MTD_DEBUG_LEVEL0, "Correcting single bit ECC error at offset: %d, bit: %d\n", find_byte, find_bit);
613 page_data[find_byte] ^= (1 << find_bit);
618 if (ecc_data2[0] == 0 && ecc_data2[1] == 0 && ecc_data2[2] == 0)
621 DEBUG (MTD_DEBUG_LEVEL0, "UNCORRECTED_ERROR default\n");
626 static int omap_nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc)
628 struct nand_chip *this;
629 int block_count = 0, i, r;
632 /* Ex NAND_ECC_HW12_2048 */
633 if ((this->ecc.mode == NAND_ECC_HW) && (this->ecc.size == 2048))
637 for (i = 0; i < block_count; i++) {
638 if (memcmp(read_ecc, calc_ecc, 3) != 0) {
639 r = omap_nand_compare_ecc(read_ecc, calc_ecc, dat);
650 static void omap_nand_enable_hwecc(struct mtd_info *mtd, int mode)
652 nand_write_reg(NND_RESET, 0x01);
655 #ifdef CONFIG_MTD_CMDLINE_PARTS
657 extern int mtdpart_setup(char *);
659 static int __init add_dynamic_parts(struct mtd_info *mtd)
661 static const char *part_parsers[] = { "cmdlinepart", NULL };
662 struct mtd_partition *parts;
663 const struct omap_flash_part_str_config *cfg;
664 char *part_str = NULL;
668 cfg = omap_get_var_config(OMAP_TAG_FLASH_PART_STR, &part_str_len);
670 part_str = kmalloc(part_str_len + 1, GFP_KERNEL);
671 if (part_str == NULL)
673 memcpy(part_str, cfg->part_table, part_str_len);
674 part_str[part_str_len] = '\0';
675 mtdpart_setup(part_str);
677 c = parse_mtd_partitions(omap_mtd, part_parsers, &parts, 0);
678 if (part_str != NULL) {
685 add_mtd_partitions(mtd, parts, c);
692 static inline int add_dynamic_parts(struct mtd_info *mtd)
699 static inline int calc_psc(int ns, int cycle_ps)
701 return (ns * 1000 + (cycle_ps - 1)) / cycle_ps;
704 static void set_psc_regs(int psc_ns, int psc1_ns, int psc2_ns)
707 unsigned long rate, ps;
709 rate = clk_get_rate(omap_nand_clk);
710 ps = 1000000000 / (rate / 1000);
711 psc[0] = calc_psc(psc_ns, ps);
712 psc[1] = calc_psc(psc1_ns, ps);
713 psc[2] = calc_psc(psc2_ns, ps);
714 for (i = 0; i < 3; i++) {
717 else if (psc[i] > 256)
720 nand_write_reg(NND_PSC_CLK, psc[0] - 1);
721 nand_write_reg(NND_PSC1_CLK, psc[1] - 1);
722 nand_write_reg(NND_PSC2_CLK, psc[2] - 1);
723 printk(KERN_INFO "omap-hw-nand: using PSC values %d, %d, %d\n", psc[0], psc[1], psc[2]);
727 * Main initialization routine
729 static int __init omap_nand_init(void)
731 struct nand_chip *this;
735 omap_nand_clk = clk_get(NULL, "armper_ck");
736 BUG_ON(omap_nand_clk == NULL);
737 clk_enable(omap_nand_clk);
739 l = nand_read_reg(NND_REVISION);
740 printk(KERN_INFO "omap-hw-nand: OMAP NAND Controller rev. %d.%d\n", l>>4, l & 0xf);
742 /* Reset the NAND Controller */
743 nand_write_reg(NND_SYSCFG, 0x02);
744 while ((nand_read_reg(NND_SYSSTATUS) & 0x01) == 0);
746 /* No Prefetch, no postwrite, write prot & enable pairs disabled,
747 addres counter set to send 4 byte addresses to flash,
748 A8 is set not to be sent to flash (erase addre needs formatting),
749 choose little endian, enable 512 byte ECC logic,
751 nand_write_reg(NND_CTRL, 0xFF01);
753 /* Allocate memory for MTD device structure and private data */
754 omap_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
756 printk(KERN_WARNING "omap-hw-nand: Unable to allocate OMAP NAND MTD device structure.\n");
761 err = omap_request_dma(OMAP_DMA_NAND, "NAND", nand_dma_cb,
762 &omap_nand_dma_comp, &omap_nand_dma_ch);
764 printk(KERN_WARNING "omap-hw-nand: Unable to reserve DMA channel\n");
765 omap_nand_dma_ch = -1;
768 omap_nand_dma_ch = -1;
770 /* Get pointer to private data */
771 this = (struct nand_chip *) (&omap_mtd[1]);
773 /* Initialize structures */
774 memset((char *) omap_mtd, 0, sizeof(struct mtd_info));
775 memset((char *) this, 0, sizeof(struct nand_chip));
777 /* Link the private data with the MTD structure */
778 omap_mtd->priv = this;
779 omap_mtd->name = "omap-nand";
781 this->options = NAND_SKIP_BBTSCAN;
783 /* Used from chip select and nand_command() */
784 this->read_byte = omap_nand_read_byte;
786 this->select_chip = omap_nand_select_chip;
787 this->dev_ready = omap_nand_dev_ready;
788 this->chip_delay = 0;
789 this->ecc.mode = NAND_ECC_HW;
791 this->ecc.size = 512;
792 this->cmdfunc = omap_nand_command;
793 this->write_buf = omap_nand_write_buf;
794 this->read_buf = omap_nand_read_buf;
795 this->verify_buf = omap_nand_verify_buf;
796 this->ecc.calculate = omap_nand_calculate_ecc;
797 this->ecc.correct = omap_nand_correct_data;
798 this->ecc.hwctl = omap_nand_enable_hwecc;
800 nand_write_reg(NND_SYSCFG, 0x1); /* Enable auto idle */
801 nand_write_reg(NND_PSC_CLK, 10);
802 /* Scan to find existance of the device */
803 if (nand_scan(omap_mtd, 1)) {
808 set_psc_regs(25, 15, 35);
809 if (this->page_shift == 11) {
810 this->cmdfunc = omap_nand_command_lp;
811 l = nand_read_reg(NND_CTRL);
812 l |= 1 << 4; /* Set the A8 bit in CTRL reg */
813 nand_write_reg(NND_CTRL, l);
814 this->ecc.mode = NAND_ECC_HW;
816 this->ecc.size = 2048;
817 this->ecc.bytes = 12;
818 nand_write_reg(NND_ECC_SELECT, 6);
821 /* We have to do bbt scanning ourselves */
822 if (this->scan_bbt (omap_mtd)) {
827 err = add_dynamic_parts(omap_mtd);
829 printk(KERN_ERR "omap-hw-nand: no partitions defined\n");
831 nand_release(omap_mtd);
837 if (omap_nand_dma_ch >= 0)
838 omap_free_dma(omap_nand_dma_ch);
841 clk_put(omap_nand_clk);
845 module_init(omap_nand_init);
850 static void __exit omap_nand_cleanup (void)
852 clk_disable(omap_nand_clk);
853 clk_put(omap_nand_clk);
854 nand_release(omap_mtd);
858 module_exit(omap_nand_cleanup);