2 * drivers/mtd/nand/omap-hw.c
4 * This is the MTD driver for OMAP1710 internal HW NAND controller.
6 * Copyright (C) 2004-2006 Nokia Corporation
8 * Author: Jarkko Lavinen <jarkko.lavinen@nokia.com> and
9 * Juha Yrjölä <juha.yrjola@nokia.com>
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 as published by
13 * the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 * You should have received a copy of the GNU General Public License along with
21 * this program; see the file COPYING. If not, write to the Free Software
22 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 #include <linux/slab.h>
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/delay.h>
30 #include <linux/delay.h>
31 #include <linux/errno.h>
32 #include <linux/sched.h>
33 #include <linux/types.h>
34 #include <linux/wait.h>
35 #include <linux/spinlock.h>
36 #include <linux/interrupt.h>
37 #include <linux/mtd/mtd.h>
38 #include <linux/mtd/nand.h>
39 #include <linux/mtd/partitions.h>
40 #include <linux/mtd/nand_ecc.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/clk.h>
46 #include <mach/board.h>
49 #define NAND_BASE 0xfffbcc00
50 #define NND_REVISION 0x00
51 #define NND_ACCESS 0x04
52 #define NND_ADDR_SRC 0x08
55 #define NND_STATUS 0x18
56 #define NND_READY 0x1c
57 #define NND_COMMAND 0x20
58 #define NND_COMMAND_SEC 0x24
59 #define NND_ECC_SELECT 0x28
60 #define NND_ECC_START 0x2c
61 #define NND_ECC_9 0x4c
62 #define NND_RESET 0x50
64 #define NND_FIFOCTRL 0x58
65 #define NND_PSC_CLK 0x5c
66 #define NND_SYSTEST 0x60
67 #define NND_SYSCFG 0x64
68 #define NND_SYSSTATUS 0x68
69 #define NND_FIFOTEST1 0x6c
70 #define NND_FIFOTEST2 0x70
71 #define NND_FIFOTEST3 0x74
72 #define NND_FIFOTEST4 0x78
73 #define NND_PSC1_CLK 0x8c
74 #define NND_PSC2_CLK 0x90
77 #define NND_CMD_READ1_LOWER 0x00
78 #define NND_CMD_WRITE1_LOWER 0x00
79 #define NND_CMD_READ1_UPPER 0x01
80 #define NND_CMD_WRITE1_UPPER 0x01
81 #define NND_CMD_PROGRAM_END 0x10
82 #define NND_CMD_READ2_SPARE 0x50
83 #define NND_CMD_WRITE2_SPARE 0x50
84 #define NND_CMD_ERASE 0x60
85 #define NND_CMD_STATUS 0x70
86 #define NND_CMD_PROGRAM 0x80
87 #define NND_CMD_READ_ID 0x90
88 #define NND_CMD_ERASE_END 0xD0
89 #define NND_CMD_RESET 0xFF
92 #define NAND_Ecc_P1e (1 << 0)
93 #define NAND_Ecc_P2e (1 << 1)
94 #define NAND_Ecc_P4e (1 << 2)
95 #define NAND_Ecc_P8e (1 << 3)
96 #define NAND_Ecc_P16e (1 << 4)
97 #define NAND_Ecc_P32e (1 << 5)
98 #define NAND_Ecc_P64e (1 << 6)
99 #define NAND_Ecc_P128e (1 << 7)
100 #define NAND_Ecc_P256e (1 << 8)
101 #define NAND_Ecc_P512e (1 << 9)
102 #define NAND_Ecc_P1024e (1 << 10)
103 #define NAND_Ecc_P2048e (1 << 11)
105 #define NAND_Ecc_P1o (1 << 16)
106 #define NAND_Ecc_P2o (1 << 17)
107 #define NAND_Ecc_P4o (1 << 18)
108 #define NAND_Ecc_P8o (1 << 19)
109 #define NAND_Ecc_P16o (1 << 20)
110 #define NAND_Ecc_P32o (1 << 21)
111 #define NAND_Ecc_P64o (1 << 22)
112 #define NAND_Ecc_P128o (1 << 23)
113 #define NAND_Ecc_P256o (1 << 24)
114 #define NAND_Ecc_P512o (1 << 25)
115 #define NAND_Ecc_P1024o (1 << 26)
116 #define NAND_Ecc_P2048o (1 << 27)
118 #define TF(value) (value ? 1 : 0)
120 #define P2048e(a) (TF(a & NAND_Ecc_P2048e) << 0 )
121 #define P2048o(a) (TF(a & NAND_Ecc_P2048o) << 1 )
122 #define P1e(a) (TF(a & NAND_Ecc_P1e) << 2 )
123 #define P1o(a) (TF(a & NAND_Ecc_P1o) << 3 )
124 #define P2e(a) (TF(a & NAND_Ecc_P2e) << 4 )
125 #define P2o(a) (TF(a & NAND_Ecc_P2o) << 5 )
126 #define P4e(a) (TF(a & NAND_Ecc_P4e) << 6 )
127 #define P4o(a) (TF(a & NAND_Ecc_P4o) << 7 )
129 #define P8e(a) (TF(a & NAND_Ecc_P8e) << 0 )
130 #define P8o(a) (TF(a & NAND_Ecc_P8o) << 1 )
131 #define P16e(a) (TF(a & NAND_Ecc_P16e) << 2 )
132 #define P16o(a) (TF(a & NAND_Ecc_P16o) << 3 )
133 #define P32e(a) (TF(a & NAND_Ecc_P32e) << 4 )
134 #define P32o(a) (TF(a & NAND_Ecc_P32o) << 5 )
135 #define P64e(a) (TF(a & NAND_Ecc_P64e) << 6 )
136 #define P64o(a) (TF(a & NAND_Ecc_P64o) << 7 )
138 #define P128e(a) (TF(a & NAND_Ecc_P128e) << 0 )
139 #define P128o(a) (TF(a & NAND_Ecc_P128o) << 1 )
140 #define P256e(a) (TF(a & NAND_Ecc_P256e) << 2 )
141 #define P256o(a) (TF(a & NAND_Ecc_P256o) << 3 )
142 #define P512e(a) (TF(a & NAND_Ecc_P512e) << 4 )
143 #define P512o(a) (TF(a & NAND_Ecc_P512o) << 5 )
144 #define P1024e(a) (TF(a & NAND_Ecc_P1024e) << 6 )
145 #define P1024o(a) (TF(a & NAND_Ecc_P1024o) << 7 )
147 #define P8e_s(a) (TF(a & NAND_Ecc_P8e) << 0 )
148 #define P8o_s(a) (TF(a & NAND_Ecc_P8o) << 1 )
149 #define P16e_s(a) (TF(a & NAND_Ecc_P16e) << 2 )
150 #define P16o_s(a) (TF(a & NAND_Ecc_P16o) << 3 )
151 #define P1e_s(a) (TF(a & NAND_Ecc_P1e) << 4 )
152 #define P1o_s(a) (TF(a & NAND_Ecc_P1o) << 5 )
153 #define P2e_s(a) (TF(a & NAND_Ecc_P2e) << 6 )
154 #define P2o_s(a) (TF(a & NAND_Ecc_P2o) << 7 )
156 #define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0 )
157 #define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1 )
159 extern struct nand_oobinfo jffs2_oobinfo;
162 * MTD structure for OMAP board
164 static struct mtd_info *omap_mtd;
165 static struct clk *omap_nand_clk;
166 static int omap_nand_dma_ch;
167 static struct completion omap_nand_dma_comp;
168 static unsigned long omap_nand_base = io_p2v(NAND_BASE);
170 static inline u32 nand_read_reg(int idx)
172 return __raw_readl(omap_nand_base + idx);
175 static inline void nand_write_reg(int idx, u32 val)
177 __raw_writel(val, omap_nand_base + idx);
180 static inline u8 nand_read_reg8(int idx)
182 return __raw_readb(omap_nand_base + idx);
185 static inline void nand_write_reg8(int idx, u8 val)
187 __raw_writeb(val, omap_nand_base + idx);
190 static void omap_nand_select_chip(struct mtd_info *mtd, int chip)
196 l = nand_read_reg(NND_CTRL);
197 l |= (1 << 8) | (1 << 10) | (1 << 12) | (1 << 14);
198 nand_write_reg(NND_CTRL, l);
201 /* Also CS1, CS2, CS4 would be available */
202 l = nand_read_reg(NND_CTRL);
204 nand_write_reg(NND_CTRL, l);
211 static void nand_dma_cb(int lch, u16 ch_status, void *data)
213 complete((struct completion *) data);
216 static void omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
217 unsigned int u32_count, int is_write)
219 const int block_size = 16;
220 unsigned int block_count, len;
222 unsigned long fifo_reg, timeout, jiffies_before, jiffies_spent;
223 static unsigned long max_jiffies = 0;
225 dma_ch = omap_nand_dma_ch;
226 block_count = u32_count * 4 / block_size;
227 nand_write_reg(NND_STATUS, 0x0f);
228 nand_write_reg(NND_FIFOCTRL, (block_size << 24) | block_count);
229 fifo_reg = NAND_BASE + NND_FIFO;
231 omap_set_dma_dest_params(dma_ch, OMAP_DMA_PORT_TIPB,
232 OMAP_DMA_AMODE_CONSTANT, fifo_reg,
234 omap_set_dma_src_params(dma_ch, OMAP_DMA_PORT_EMIFF,
235 OMAP_DMA_AMODE_POST_INC,
238 // omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
239 /* Set POSTWRITE bit */
240 nand_write_reg(NND_CTRL, nand_read_reg(NND_CTRL) | (1 << 16));
242 omap_set_dma_src_params(dma_ch, OMAP_DMA_PORT_TIPB,
243 OMAP_DMA_AMODE_CONSTANT, fifo_reg,
245 omap_set_dma_dest_params(dma_ch, OMAP_DMA_PORT_EMIFF,
246 OMAP_DMA_AMODE_POST_INC,
249 // omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_8);
250 /* Set PREFETCH bit */
251 nand_write_reg(NND_CTRL, nand_read_reg(NND_CTRL) | (1 << 17));
253 omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32, block_size / 4,
254 block_count, OMAP_DMA_SYNC_FRAME,
256 init_completion(&omap_nand_dma_comp);
258 len = u32_count << 2;
259 dma_cache_maint(addr, len, DMA_TO_DEVICE);
260 omap_start_dma(dma_ch);
261 jiffies_before = jiffies;
262 timeout = wait_for_completion_timeout(&omap_nand_dma_comp,
263 msecs_to_jiffies(1000));
264 jiffies_spent = (unsigned long)((long)jiffies - (long)jiffies_before);
265 if (jiffies_spent > max_jiffies)
266 max_jiffies = jiffies_spent;
269 printk(KERN_WARNING "omap-hw-nand: DMA timeout after %u ms, max. seen latency %u ms\n",
270 jiffies_to_msecs(jiffies_spent),
271 jiffies_to_msecs(max_jiffies));
274 dma_cache_maint(addr, len, DMA_FROM_DEVICE);
276 nand_write_reg(NND_CTRL, nand_read_reg(NND_CTRL) & ~((1 << 16) | (1 << 17)));
279 static void fifo_read(u32 *out, unsigned int len)
281 const int block_size = 16;
282 unsigned long status_reg, fifo_reg;
285 status_reg = omap_nand_base + NND_STATUS;
286 fifo_reg = omap_nand_base + NND_FIFO;
287 len = len * 4 / block_size;
288 nand_write_reg(NND_FIFOCTRL, (block_size << 24) | len);
289 nand_write_reg(NND_STATUS, 0x0f);
290 nand_write_reg(NND_CTRL, nand_read_reg(NND_CTRL) | (1 << 17));
295 while ((__raw_readl(status_reg) & (1 << 2)) == 0);
296 __raw_writel(0x0f, status_reg);
297 for (i = 0; i < c; i++) {
298 u32 l = __raw_readl(fifo_reg);
302 nand_write_reg(NND_CTRL, nand_read_reg(NND_CTRL) & ~(1 << 17));
303 nand_write_reg(NND_STATUS, 0x0f);
306 static void omap_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
308 unsigned long access_reg;
310 if (likely(((unsigned long) buf & 3) == 0 && (len & 3) == 0)) {
311 int u32_count = len >> 2;
312 u32 *dest = (u32 *) buf;
313 /* If the transfer is big enough and the length divisible by
314 * 16, we try to use DMA transfer, or FIFO copy in case of
315 * DMA failure (e.g. all channels busy) */
316 if (u32_count > 64 && (u32_count & 3) == 0) {
317 if (omap_nand_dma_ch >= 0) {
318 omap_nand_dma_transfer(mtd, buf, u32_count, 0);
321 /* In case of an error, fallback to FIFO copy */
322 fifo_read((u32 *) buf, u32_count);
325 access_reg = omap_nand_base + NND_ACCESS;
326 /* Small buffers we just read directly */
328 *dest++ = __raw_readl(access_reg);
330 /* If we're not word-aligned, we use byte copy */
331 access_reg = omap_nand_base + NND_ACCESS;
333 *buf++ = __raw_readb(access_reg);
337 static void omap_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
339 if (likely(((unsigned long) buf & 3) == 0 && (len & 3) == 0)) {
340 const u32 *src = (const u32 *) buf;
344 /* If the transfer is big enough and length divisible by 16,
345 * we try to use DMA transfer. */
346 if (len > 256 / 4 && (len & 3) == 0) {
347 if (omap_nand_dma_transfer(mtd, (void *) buf, len, 1) == 0)
349 /* In case of an error, fallback to CPU copy */
353 nand_write_reg(NND_ACCESS, *src++);
356 nand_write_reg8(NND_ACCESS, *buf++);
360 static int omap_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
362 if (likely(((unsigned long) buf & 3) == 0 && (len & 3) == 0)) {
363 const u32 *dest = (const u32 *) buf;
366 if (*dest++ != nand_read_reg(NND_ACCESS))
370 if (*buf++ != nand_read_reg8(NND_ACCESS))
376 static u_char omap_nand_read_byte(struct mtd_info *mtd)
378 return nand_read_reg8(NND_ACCESS);
381 static int omap_nand_dev_ready(struct mtd_info *mtd)
385 l = nand_read_reg(NND_READY);
389 static int nand_write_command(u8 cmd, u32 addr, int addr_valid)
392 nand_write_reg(NND_ADDR_SRC, addr);
393 nand_write_reg8(NND_COMMAND, cmd);
395 nand_write_reg(NND_ADDR_SRC, 0);
396 nand_write_reg8(NND_COMMAND_SEC, cmd);
398 while (!omap_nand_dev_ready(NULL));
403 * Send command to NAND device
405 static void omap_nand_command(struct mtd_info *mtd, unsigned command, int column, int page_addr)
407 struct nand_chip *this = mtd->priv;
410 * Write out the command to the device.
412 if (command == NAND_CMD_SEQIN) {
415 if (column >= mtd->writesize) {
417 column -= mtd->writesize;
418 readcmd = NAND_CMD_READOOB;
419 } else if (column < 256) {
420 /* First 256 bytes --> READ0 */
421 readcmd = NAND_CMD_READ0;
424 readcmd = NAND_CMD_READ1;
426 nand_write_command(readcmd, 0, 0);
430 case NAND_CMD_PAGEPROG:
431 case NAND_CMD_STATUS:
432 case NAND_CMD_ERASE2:
433 nand_write_command(command, 0, 0);
435 case NAND_CMD_ERASE1:
436 nand_write_command(command, ((page_addr & 0xFFFFFF00) << 1) | (page_addr & 0XFF), 1);
439 nand_write_command(command, (page_addr << this->page_shift) | column, 1);
443 static void omap_nand_command_lp(struct mtd_info *mtd, unsigned command, int column, int page_addr)
445 struct nand_chip *this = mtd->priv;
447 if (command == NAND_CMD_READOOB) {
448 column += mtd->writesize;
449 command = NAND_CMD_READ0;
453 case NAND_CMD_PAGEPROG:
454 case NAND_CMD_STATUS:
455 case NAND_CMD_ERASE2:
456 nand_write_command(command, 0, 0);
458 case NAND_CMD_ERASE1:
459 nand_write_command(command, page_addr << this->page_shift >> 11, 1);
462 nand_write_command(command, (page_addr << 16) | column, 1);
464 if (command == NAND_CMD_READ0)
465 nand_write_command(NAND_CMD_READSTART, 0, 0);
469 * Generate non-inverted ECC bytes.
471 * Using noninverted ECC can be considered ugly since writing a blank
472 * page ie. padding will clear the ECC bytes. This is no problem as long
473 * nobody is trying to write data on the seemingly unused page.
475 * Reading an erased page will produce an ECC mismatch between
476 * generated and read ECC bytes that has to be dealt with separately.
478 static int omap_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
483 struct nand_chip *this = mtd->priv;
485 /* Ex NAND_ECC_HW12_2048 */
486 if ((this->ecc.mode == NAND_ECC_HW) && (this->ecc.size == 2048))
492 l = nand_read_reg(reg);
493 *ecc_code++ = l; // P128e, ..., P1e
494 *ecc_code++ = l >> 16; // P128o, ..., P1o
495 // P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e
496 *ecc_code++ = ((l >> 8) & 0x0f) | ((l >> 20) & 0xf0);
503 * This function will generate true ECC value, which can be used
504 * when correcting data read from NAND flash memory core
506 static void gen_true_ecc(u8 *ecc_buf)
508 u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) | ((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
510 ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) | P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp) );
511 ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) | P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
512 ecc_buf[2] = ~( P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) | P1e(tmp) | P2048o(tmp) | P2048e(tmp));
516 * This function compares two ECC's and indicates if there is an error.
517 * If the error can be corrected it will be corrected to the buffer
519 static int omap_nand_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
520 u8 *ecc_data2, /* read from register */
524 u8 tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
525 u8 comp0_bit[8], comp1_bit[8], comp2_bit[8];
532 isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
534 gen_true_ecc(ecc_data1);
535 gen_true_ecc(ecc_data2);
537 for (i = 0; i <= 2; i++) {
538 *(ecc_data1 + i) = ~(*(ecc_data1 + i));
539 *(ecc_data2 + i) = ~(*(ecc_data2 + i));
542 for (i = 0; i < 8; i++) {
543 tmp0_bit[i] = *ecc_data1 % 2;
544 *ecc_data1 = *ecc_data1 / 2;
547 for (i = 0; i < 8; i++) {
548 tmp1_bit[i] = *(ecc_data1 + 1) % 2;
549 *(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
552 for (i = 0; i < 8; i++) {
553 tmp2_bit[i] = *(ecc_data1 + 2) % 2;
554 *(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
557 for (i = 0; i < 8; i++) {
558 comp0_bit[i] = *ecc_data2 % 2;
559 *ecc_data2 = *ecc_data2 / 2;
562 for (i = 0; i < 8; i++) {
563 comp1_bit[i] = *(ecc_data2 + 1) % 2;
564 *(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
567 for (i = 0; i < 8; i++) {
568 comp2_bit[i] = *(ecc_data2 + 2) % 2;
569 *(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
572 for (i = 0; i< 6; i++ )
573 ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
575 for (i = 0; i < 8; i++)
576 ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
578 for (i = 0; i < 8; i++)
579 ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
581 ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
582 ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
584 for (i = 0; i < 24; i++)
585 ecc_sum += ecc_bit[i];
589 /* Not reached because this function is not called if
590 ECC values are equal */
594 /* Uncorrectable error */
595 DEBUG (MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR 1\n");
599 /* Correctable error */
600 find_byte = (ecc_bit[23] << 8) +
610 find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
612 DEBUG (MTD_DEBUG_LEVEL0, "Correcting single bit ECC error at offset: %d, bit: %d\n", find_byte, find_bit);
614 page_data[find_byte] ^= (1 << find_bit);
619 if (ecc_data2[0] == 0 && ecc_data2[1] == 0 && ecc_data2[2] == 0)
622 DEBUG (MTD_DEBUG_LEVEL0, "UNCORRECTED_ERROR default\n");
627 static int omap_nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc)
629 struct nand_chip *this;
630 int block_count = 0, i, r;
633 /* Ex NAND_ECC_HW12_2048 */
634 if ((this->ecc.mode == NAND_ECC_HW) && (this->ecc.size == 2048))
638 for (i = 0; i < block_count; i++) {
639 if (memcmp(read_ecc, calc_ecc, 3) != 0) {
640 r = omap_nand_compare_ecc(read_ecc, calc_ecc, dat);
651 static void omap_nand_enable_hwecc(struct mtd_info *mtd, int mode)
653 nand_write_reg(NND_RESET, 0x01);
656 #ifdef CONFIG_MTD_CMDLINE_PARTS
658 extern int mtdpart_setup(char *);
660 static int __init add_dynamic_parts(struct mtd_info *mtd)
662 static const char *part_parsers[] = { "cmdlinepart", NULL };
663 struct mtd_partition *parts;
664 const struct omap_flash_part_str_config *cfg;
665 char *part_str = NULL;
669 cfg = omap_get_var_config(OMAP_TAG_FLASH_PART_STR, &part_str_len);
671 part_str = kmalloc(part_str_len + 1, GFP_KERNEL);
672 if (part_str == NULL)
674 memcpy(part_str, cfg->part_table, part_str_len);
675 part_str[part_str_len] = '\0';
676 mtdpart_setup(part_str);
678 c = parse_mtd_partitions(omap_mtd, part_parsers, &parts, 0);
679 if (part_str != NULL) {
686 add_mtd_partitions(mtd, parts, c);
693 static inline int add_dynamic_parts(struct mtd_info *mtd)
700 static inline int calc_psc(int ns, int cycle_ps)
702 return (ns * 1000 + (cycle_ps - 1)) / cycle_ps;
705 static void set_psc_regs(int psc_ns, int psc1_ns, int psc2_ns)
708 unsigned long rate, ps;
710 rate = clk_get_rate(omap_nand_clk);
711 ps = 1000000000 / (rate / 1000);
712 psc[0] = calc_psc(psc_ns, ps);
713 psc[1] = calc_psc(psc1_ns, ps);
714 psc[2] = calc_psc(psc2_ns, ps);
715 for (i = 0; i < 3; i++) {
718 else if (psc[i] > 256)
721 nand_write_reg(NND_PSC_CLK, psc[0] - 1);
722 nand_write_reg(NND_PSC1_CLK, psc[1] - 1);
723 nand_write_reg(NND_PSC2_CLK, psc[2] - 1);
724 printk(KERN_INFO "omap-hw-nand: using PSC values %d, %d, %d\n", psc[0], psc[1], psc[2]);
728 * Main initialization routine
730 static int __init omap_nand_init(void)
732 struct nand_chip *this;
736 omap_nand_clk = clk_get(NULL, "armper_ck");
737 BUG_ON(omap_nand_clk == NULL);
738 clk_enable(omap_nand_clk);
740 l = nand_read_reg(NND_REVISION);
741 printk(KERN_INFO "omap-hw-nand: OMAP NAND Controller rev. %d.%d\n", l>>4, l & 0xf);
743 /* Reset the NAND Controller */
744 nand_write_reg(NND_SYSCFG, 0x02);
745 while ((nand_read_reg(NND_SYSSTATUS) & 0x01) == 0);
747 /* No Prefetch, no postwrite, write prot & enable pairs disabled,
748 addres counter set to send 4 byte addresses to flash,
749 A8 is set not to be sent to flash (erase addre needs formatting),
750 choose little endian, enable 512 byte ECC logic,
752 nand_write_reg(NND_CTRL, 0xFF01);
754 /* Allocate memory for MTD device structure and private data */
755 omap_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
757 printk(KERN_WARNING "omap-hw-nand: Unable to allocate OMAP NAND MTD device structure.\n");
762 err = omap_request_dma(OMAP_DMA_NAND, "NAND", nand_dma_cb,
763 &omap_nand_dma_comp, &omap_nand_dma_ch);
765 printk(KERN_WARNING "omap-hw-nand: Unable to reserve DMA channel\n");
766 omap_nand_dma_ch = -1;
769 omap_nand_dma_ch = -1;
771 /* Get pointer to private data */
772 this = (struct nand_chip *) (&omap_mtd[1]);
774 /* Initialize structures */
775 memset((char *) omap_mtd, 0, sizeof(struct mtd_info));
776 memset((char *) this, 0, sizeof(struct nand_chip));
778 /* Link the private data with the MTD structure */
779 omap_mtd->priv = this;
780 omap_mtd->name = "omap-nand";
782 this->options = NAND_SKIP_BBTSCAN;
784 /* Used from chip select and nand_command() */
785 this->read_byte = omap_nand_read_byte;
787 this->select_chip = omap_nand_select_chip;
788 this->dev_ready = omap_nand_dev_ready;
789 this->chip_delay = 0;
790 this->ecc.mode = NAND_ECC_HW;
792 this->ecc.size = 512;
793 this->cmdfunc = omap_nand_command;
794 this->write_buf = omap_nand_write_buf;
795 this->read_buf = omap_nand_read_buf;
796 this->verify_buf = omap_nand_verify_buf;
797 this->ecc.calculate = omap_nand_calculate_ecc;
798 this->ecc.correct = omap_nand_correct_data;
799 this->ecc.hwctl = omap_nand_enable_hwecc;
801 nand_write_reg(NND_SYSCFG, 0x1); /* Enable auto idle */
802 nand_write_reg(NND_PSC_CLK, 10);
803 /* Scan to find existance of the device */
804 if (nand_scan(omap_mtd, 1)) {
809 set_psc_regs(25, 15, 35);
810 if (this->page_shift == 11) {
811 this->cmdfunc = omap_nand_command_lp;
812 l = nand_read_reg(NND_CTRL);
813 l |= 1 << 4; /* Set the A8 bit in CTRL reg */
814 nand_write_reg(NND_CTRL, l);
815 this->ecc.mode = NAND_ECC_HW;
817 this->ecc.size = 2048;
818 this->ecc.bytes = 12;
819 nand_write_reg(NND_ECC_SELECT, 6);
822 /* We have to do bbt scanning ourselves */
823 if (this->scan_bbt (omap_mtd)) {
828 err = add_dynamic_parts(omap_mtd);
830 printk(KERN_ERR "omap-hw-nand: no partitions defined\n");
832 nand_release(omap_mtd);
838 if (omap_nand_dma_ch >= 0)
839 omap_free_dma(omap_nand_dma_ch);
842 clk_put(omap_nand_clk);
846 module_init(omap_nand_init);
851 static void __exit omap_nand_cleanup (void)
853 clk_disable(omap_nand_clk);
854 clk_put(omap_nand_clk);
855 nand_release(omap_mtd);
859 module_exit(omap_nand_cleanup);