2 * Simple MTD partitioning layer
4 * (C) 2000 Nicolas Pitre <nico@cam.org>
8 * 02-21-2002 Thomas Gleixner <gleixner@autronix.de>
9 * added support for read_oob, write_oob
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/list.h>
17 #include <linux/kmod.h>
18 #include <linux/mtd/mtd.h>
19 #include <linux/mtd/partitions.h>
20 #include <linux/mtd/compatmac.h>
22 /* Our partition linked list */
23 static LIST_HEAD(mtd_partitions);
25 /* Our partition node structure */
28 struct mtd_info *master;
31 struct list_head list;
36 * Given a pointer to the MTD object in the mtd_part structure, we can retrieve
37 * the pointer to that structure with this macro.
39 #define PART(x) ((struct mtd_part *)(x))
43 * MTD methods which simply translate the effective address and pass through
44 * to the _real_ device.
47 static int part_read (struct mtd_info *mtd, loff_t from, size_t len,
48 size_t *retlen, u_char *buf)
50 struct mtd_part *part = PART(mtd);
53 if (from >= mtd->size)
55 else if (from + len > mtd->size)
56 len = mtd->size - from;
57 res = part->master->read (part->master, from + part->offset,
61 mtd->ecc_stats.corrected++;
63 mtd->ecc_stats.failed++;
68 static int part_point (struct mtd_info *mtd, loff_t from, size_t len,
69 size_t *retlen, void **virt, resource_size_t *phys)
71 struct mtd_part *part = PART(mtd);
72 if (from >= mtd->size)
74 else if (from + len > mtd->size)
75 len = mtd->size - from;
76 return part->master->point (part->master, from + part->offset,
77 len, retlen, virt, phys);
80 static void part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
82 struct mtd_part *part = PART(mtd);
84 part->master->unpoint(part->master, from + part->offset, len);
87 static int part_read_oob(struct mtd_info *mtd, loff_t from,
88 struct mtd_oob_ops *ops)
90 struct mtd_part *part = PART(mtd);
93 if (from >= mtd->size)
95 if (ops->datbuf && from + ops->len > mtd->size)
97 res = part->master->read_oob(part->master, from + part->offset, ops);
101 mtd->ecc_stats.corrected++;
103 mtd->ecc_stats.failed++;
108 static int part_read_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len,
109 size_t *retlen, u_char *buf)
111 struct mtd_part *part = PART(mtd);
112 return part->master->read_user_prot_reg (part->master, from,
116 static int part_get_user_prot_info (struct mtd_info *mtd,
117 struct otp_info *buf, size_t len)
119 struct mtd_part *part = PART(mtd);
120 return part->master->get_user_prot_info (part->master, buf, len);
123 static int part_read_fact_prot_reg (struct mtd_info *mtd, loff_t from, size_t len,
124 size_t *retlen, u_char *buf)
126 struct mtd_part *part = PART(mtd);
127 return part->master->read_fact_prot_reg (part->master, from,
131 static int part_get_fact_prot_info (struct mtd_info *mtd,
132 struct otp_info *buf, size_t len)
134 struct mtd_part *part = PART(mtd);
135 return part->master->get_fact_prot_info (part->master, buf, len);
138 static int part_write (struct mtd_info *mtd, loff_t to, size_t len,
139 size_t *retlen, const u_char *buf)
141 struct mtd_part *part = PART(mtd);
142 if (!(mtd->flags & MTD_WRITEABLE))
146 else if (to + len > mtd->size)
147 len = mtd->size - to;
148 return part->master->write (part->master, to + part->offset,
152 static int part_panic_write (struct mtd_info *mtd, loff_t to, size_t len,
153 size_t *retlen, const u_char *buf)
155 struct mtd_part *part = PART(mtd);
156 if (!(mtd->flags & MTD_WRITEABLE))
160 else if (to + len > mtd->size)
161 len = mtd->size - to;
162 return part->master->panic_write (part->master, to + part->offset,
166 static int part_write_oob(struct mtd_info *mtd, loff_t to,
167 struct mtd_oob_ops *ops)
169 struct mtd_part *part = PART(mtd);
171 if (!(mtd->flags & MTD_WRITEABLE))
176 if (ops->datbuf && to + ops->len > mtd->size)
178 return part->master->write_oob(part->master, to + part->offset, ops);
181 static int part_write_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len,
182 size_t *retlen, u_char *buf)
184 struct mtd_part *part = PART(mtd);
185 return part->master->write_user_prot_reg (part->master, from,
189 static int part_lock_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len)
191 struct mtd_part *part = PART(mtd);
192 return part->master->lock_user_prot_reg (part->master, from, len);
195 static int part_writev (struct mtd_info *mtd, const struct kvec *vecs,
196 unsigned long count, loff_t to, size_t *retlen)
198 struct mtd_part *part = PART(mtd);
199 if (!(mtd->flags & MTD_WRITEABLE))
201 return part->master->writev (part->master, vecs, count,
202 to + part->offset, retlen);
205 static int part_erase (struct mtd_info *mtd, struct erase_info *instr)
207 struct mtd_part *part = PART(mtd);
209 if (!(mtd->flags & MTD_WRITEABLE))
211 if (instr->addr >= mtd->size)
213 instr->addr += part->offset;
214 ret = part->master->erase(part->master, instr);
216 if (instr->fail_addr != 0xffffffff)
217 instr->fail_addr -= part->offset;
218 instr->addr -= part->offset;
223 void mtd_erase_callback(struct erase_info *instr)
225 if (instr->mtd->erase == part_erase) {
226 struct mtd_part *part = PART(instr->mtd);
228 if (instr->fail_addr != 0xffffffff)
229 instr->fail_addr -= part->offset;
230 instr->addr -= part->offset;
233 instr->callback(instr);
235 EXPORT_SYMBOL_GPL(mtd_erase_callback);
237 static int part_lock (struct mtd_info *mtd, loff_t ofs, size_t len)
239 struct mtd_part *part = PART(mtd);
240 if ((len + ofs) > mtd->size)
242 return part->master->lock(part->master, ofs + part->offset, len);
245 static int part_unlock (struct mtd_info *mtd, loff_t ofs, size_t len)
247 struct mtd_part *part = PART(mtd);
248 if ((len + ofs) > mtd->size)
250 return part->master->unlock(part->master, ofs + part->offset, len);
253 static void part_sync(struct mtd_info *mtd)
255 struct mtd_part *part = PART(mtd);
256 part->master->sync(part->master);
259 static int part_suspend(struct mtd_info *mtd)
261 struct mtd_part *part = PART(mtd);
262 return part->master->suspend(part->master);
265 static void part_resume(struct mtd_info *mtd)
267 struct mtd_part *part = PART(mtd);
268 part->master->resume(part->master);
271 static int part_block_isbad (struct mtd_info *mtd, loff_t ofs)
273 struct mtd_part *part = PART(mtd);
274 if (ofs >= mtd->size)
277 return part->master->block_isbad(part->master, ofs);
280 static int part_block_markbad (struct mtd_info *mtd, loff_t ofs)
282 struct mtd_part *part = PART(mtd);
285 if (!(mtd->flags & MTD_WRITEABLE))
287 if (ofs >= mtd->size)
290 res = part->master->block_markbad(part->master, ofs);
292 mtd->ecc_stats.badblocks++;
297 * This function unregisters and destroy all slave MTD objects which are
298 * attached to the given master MTD object.
301 int del_mtd_partitions(struct mtd_info *master)
303 struct mtd_part *slave, *next;
305 list_for_each_entry_safe(slave, next, &mtd_partitions, list)
306 if (slave->master == master) {
307 list_del(&slave->list);
308 if(slave->registered)
309 del_mtd_device(&slave->mtd);
316 static struct mtd_part *add_one_partition(struct mtd_info *master,
317 const struct mtd_partition *part, int partno,
318 u_int32_t cur_offset)
320 struct mtd_part *slave;
322 /* allocate the partition structure */
323 slave = kzalloc (sizeof(*slave), GFP_KERNEL);
325 printk("memory allocation error while creating partitions for \"%s\"\n",
327 del_mtd_partitions(master);
330 list_add(&slave->list, &mtd_partitions);
332 /* set up the MTD object for this partition */
333 slave->mtd.type = master->type;
334 slave->mtd.flags = master->flags & ~part->mask_flags;
335 slave->mtd.size = part->size;
336 slave->mtd.writesize = master->writesize;
337 slave->mtd.oobsize = master->oobsize;
338 slave->mtd.oobavail = master->oobavail;
339 slave->mtd.subpage_sft = master->subpage_sft;
341 slave->mtd.name = part->name;
342 slave->mtd.owner = master->owner;
344 slave->mtd.read = part_read;
345 slave->mtd.write = part_write;
347 if (master->panic_write)
348 slave->mtd.panic_write = part_panic_write;
350 if(master->point && master->unpoint){
351 slave->mtd.point = part_point;
352 slave->mtd.unpoint = part_unpoint;
355 if (master->read_oob)
356 slave->mtd.read_oob = part_read_oob;
357 if (master->write_oob)
358 slave->mtd.write_oob = part_write_oob;
359 if(master->read_user_prot_reg)
360 slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
361 if(master->read_fact_prot_reg)
362 slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
363 if(master->write_user_prot_reg)
364 slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
365 if(master->lock_user_prot_reg)
366 slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
367 if(master->get_user_prot_info)
368 slave->mtd.get_user_prot_info = part_get_user_prot_info;
369 if(master->get_fact_prot_info)
370 slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
372 slave->mtd.sync = part_sync;
373 if (!partno && master->suspend && master->resume) {
374 slave->mtd.suspend = part_suspend;
375 slave->mtd.resume = part_resume;
378 slave->mtd.writev = part_writev;
380 slave->mtd.lock = part_lock;
382 slave->mtd.unlock = part_unlock;
383 if (master->block_isbad)
384 slave->mtd.block_isbad = part_block_isbad;
385 if (master->block_markbad)
386 slave->mtd.block_markbad = part_block_markbad;
387 slave->mtd.erase = part_erase;
388 slave->master = master;
389 slave->offset = part->offset;
390 slave->index = partno;
392 if (slave->offset == MTDPART_OFS_APPEND)
393 slave->offset = cur_offset;
394 if (slave->offset == MTDPART_OFS_NXTBLK) {
395 slave->offset = cur_offset;
396 if ((cur_offset % master->erasesize) != 0) {
397 /* Round up to next erasesize */
398 slave->offset = ((cur_offset / master->erasesize) + 1) * master->erasesize;
399 printk(KERN_NOTICE "Moving partition %d: "
400 "0x%08x -> 0x%08x\n", partno,
401 cur_offset, slave->offset);
404 if (slave->mtd.size == MTDPART_SIZ_FULL)
405 slave->mtd.size = master->size - slave->offset;
407 printk (KERN_NOTICE "0x%08x-0x%08x : \"%s\"\n", slave->offset,
408 slave->offset + slave->mtd.size, slave->mtd.name);
410 /* let's do some sanity checks */
411 if (slave->offset >= master->size) {
412 /* let's register it anyway to preserve ordering */
415 printk ("mtd: partition \"%s\" is out of reach -- disabled\n",
418 if (slave->offset + slave->mtd.size > master->size) {
419 slave->mtd.size = master->size - slave->offset;
420 printk ("mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#x\n",
421 part->name, master->name, slave->mtd.size);
423 if (master->numeraseregions>1) {
424 /* Deal with variable erase size stuff */
426 struct mtd_erase_region_info *regions = master->eraseregions;
428 /* Find the first erase regions which is part of this partition. */
429 for (i=0; i < master->numeraseregions && regions[i].offset <= slave->offset; i++)
432 for (i--; i < master->numeraseregions && regions[i].offset < slave->offset + slave->mtd.size; i++) {
433 if (slave->mtd.erasesize < regions[i].erasesize) {
434 slave->mtd.erasesize = regions[i].erasesize;
438 /* Single erase size */
439 slave->mtd.erasesize = master->erasesize;
442 if ((slave->mtd.flags & MTD_WRITEABLE) &&
443 (slave->offset % slave->mtd.erasesize)) {
444 /* Doesn't start on a boundary of major erase size */
445 /* FIXME: Let it be writable if it is on a boundary of _minor_ erase size though */
446 slave->mtd.flags &= ~MTD_WRITEABLE;
447 printk ("mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
450 if ((slave->mtd.flags & MTD_WRITEABLE) &&
451 (slave->mtd.size % slave->mtd.erasesize)) {
452 slave->mtd.flags &= ~MTD_WRITEABLE;
453 printk ("mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
457 slave->mtd.ecclayout = master->ecclayout;
458 if (master->block_isbad) {
461 while(offs < slave->mtd.size) {
462 if (master->block_isbad(master,
463 offs + slave->offset))
464 slave->mtd.ecc_stats.badblocks++;
465 offs += slave->mtd.erasesize;
469 if(part->mtdp) { /* store the object pointer (caller may or may not register it */
470 *part->mtdp = &slave->mtd;
471 slave->registered = 0;
473 /* register our partition */
474 add_mtd_device(&slave->mtd);
475 slave->registered = 1;
481 * This function, given a master MTD object and a partition table, creates
482 * and registers slave MTD objects which are bound to the master according to
483 * the partition definitions.
484 * (Q: should we register the master MTD object as well?)
487 int add_mtd_partitions(struct mtd_info *master,
488 const struct mtd_partition *parts,
491 struct mtd_part *slave;
492 u_int32_t cur_offset = 0;
495 printk (KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
497 for (i = 0; i < nbparts; i++) {
498 slave = add_one_partition(master, parts + i, i, cur_offset);
501 cur_offset = slave->offset + slave->mtd.size;
507 EXPORT_SYMBOL(add_mtd_partitions);
508 EXPORT_SYMBOL(del_mtd_partitions);
510 static DEFINE_SPINLOCK(part_parser_lock);
511 static LIST_HEAD(part_parsers);
513 static struct mtd_part_parser *get_partition_parser(const char *name)
515 struct mtd_part_parser *p, *ret = NULL;
517 spin_lock(&part_parser_lock);
519 list_for_each_entry(p, &part_parsers, list)
520 if (!strcmp(p->name, name) && try_module_get(p->owner)) {
525 spin_unlock(&part_parser_lock);
530 int register_mtd_parser(struct mtd_part_parser *p)
532 spin_lock(&part_parser_lock);
533 list_add(&p->list, &part_parsers);
534 spin_unlock(&part_parser_lock);
539 int deregister_mtd_parser(struct mtd_part_parser *p)
541 spin_lock(&part_parser_lock);
543 spin_unlock(&part_parser_lock);
547 int parse_mtd_partitions(struct mtd_info *master, const char **types,
548 struct mtd_partition **pparts, unsigned long origin)
550 struct mtd_part_parser *parser;
553 for ( ; ret <= 0 && *types; types++) {
554 parser = get_partition_parser(*types);
556 if (!parser && !request_module("%s", *types))
557 parser = get_partition_parser(*types);
560 printk(KERN_NOTICE "%s partition parsing not available\n",
564 ret = (*parser->parse_fn)(master, pparts, origin);
566 printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
567 ret, parser->name, master->name);
569 put_partition_parser(parser);
574 EXPORT_SYMBOL_GPL(parse_mtd_partitions);
575 EXPORT_SYMBOL_GPL(register_mtd_parser);
576 EXPORT_SYMBOL_GPL(deregister_mtd_parser);