lua: Fixed some cross-platform issues for PPC (and probably other architectures)
[openwrt.git] / target / linux / generic-2.6 / patches-2.6.26 / 065-rootfs_split.patch
1 --- a/drivers/mtd/Kconfig
2 +++ b/drivers/mtd/Kconfig
3 @@ -47,6 +47,16 @@
4 devices. Partitioning on NFTL 'devices' is a different - that's the
5 'normal' form of partitioning used on a block device.
6
7 +config MTD_ROOTFS_ROOT_DEV
8 + bool "Automatically set 'rootfs' partition to be root filesystem"
9 + depends on MTD_PARTITIONS
10 + default y
11 +
12 +config MTD_ROOTFS_SPLIT
13 + bool "Automatically split 'rootfs' partition for squashfs"
14 + depends on MTD_PARTITIONS
15 + default y
16 +
17 config MTD_REDBOOT_PARTS
18 tristate "RedBoot partition table parsing"
19 depends on MTD_PARTITIONS
20 --- a/drivers/mtd/mtdpart.c
21 +++ b/drivers/mtd/mtdpart.c
22 @@ -20,6 +20,8 @@
23 #include <linux/mtd/mtd.h>
24 #include <linux/mtd/partitions.h>
25 #include <linux/mtd/compatmac.h>
26 +#include <linux/squashfs_fs.h>
27 +#include <linux/root_dev.h>
28
29 /* Our partition linked list */
30 static LIST_HEAD(mtd_partitions);
31 @@ -39,7 +41,7 @@
32 * the pointer to that structure with this macro.
33 */
34 #define PART(x) ((struct mtd_part *)(x))
35 -
36 +#define IS_PART(mtd) (mtd->read == part_read)
37
38 /*
39 * MTD methods which simply translate the effective address and pass through
40 @@ -322,6 +324,316 @@
41 return 0;
42 }
43
44 +static u_int32_t cur_offset = 0;
45 +static int add_one_partition(struct mtd_info *master, const struct mtd_partition *part,
46 + int i, struct mtd_part **slp)
47 +{
48 + struct mtd_part *slave;
49 +
50 + /* allocate the partition structure */
51 + slave = kzalloc (sizeof(*slave), GFP_KERNEL);
52 + if (!slave) {
53 + printk ("memory allocation error while creating partitions for \"%s\"\n",
54 + master->name);
55 + del_mtd_partitions(master);
56 + return -ENOMEM;
57 + }
58 + list_add(&slave->list, &mtd_partitions);
59 +
60 + /* set up the MTD object for this partition */
61 + slave->mtd.type = master->type;
62 + slave->mtd.flags = master->flags & ~part->mask_flags;
63 + slave->mtd.size = part->size;
64 + slave->mtd.writesize = master->writesize;
65 + slave->mtd.oobsize = master->oobsize;
66 + slave->mtd.oobavail = master->oobavail;
67 + slave->mtd.subpage_sft = master->subpage_sft;
68 +
69 + slave->mtd.name = part->name;
70 + slave->mtd.owner = master->owner;
71 +
72 + slave->mtd.read = part_read;
73 + slave->mtd.write = part_write;
74 +
75 + if (master->panic_write)
76 + slave->mtd.panic_write = part_panic_write;
77 +
78 + slave->mtd.refresh_device = part->refresh_partition;
79 +
80 + if(master->point && master->unpoint){
81 + slave->mtd.point = part_point;
82 + slave->mtd.unpoint = part_unpoint;
83 + }
84 +
85 + if (master->read_oob)
86 + slave->mtd.read_oob = part_read_oob;
87 + if (master->write_oob)
88 + slave->mtd.write_oob = part_write_oob;
89 + if(master->read_user_prot_reg)
90 + slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
91 + if(master->read_fact_prot_reg)
92 + slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
93 + if(master->write_user_prot_reg)
94 + slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
95 + if(master->lock_user_prot_reg)
96 + slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
97 + if(master->get_user_prot_info)
98 + slave->mtd.get_user_prot_info = part_get_user_prot_info;
99 + if(master->get_fact_prot_info)
100 + slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
101 + if (master->sync)
102 + slave->mtd.sync = part_sync;
103 + if (!i && master->suspend && master->resume) {
104 + slave->mtd.suspend = part_suspend;
105 + slave->mtd.resume = part_resume;
106 + }
107 + if (master->writev)
108 + slave->mtd.writev = part_writev;
109 + if (master->lock)
110 + slave->mtd.lock = part_lock;
111 + if (master->unlock)
112 + slave->mtd.unlock = part_unlock;
113 + if (master->block_isbad)
114 + slave->mtd.block_isbad = part_block_isbad;
115 + if (master->block_markbad)
116 + slave->mtd.block_markbad = part_block_markbad;
117 + slave->mtd.erase = part_erase;
118 + slave->master = master;
119 + slave->offset = part->offset;
120 + slave->index = i;
121 +
122 + if (slave->offset == MTDPART_OFS_APPEND)
123 + slave->offset = cur_offset;
124 + if (slave->offset == MTDPART_OFS_NXTBLK) {
125 + slave->offset = cur_offset;
126 + if ((cur_offset % master->erasesize) != 0) {
127 + /* Round up to next erasesize */
128 + slave->offset = ((cur_offset / master->erasesize) + 1) * master->erasesize;
129 + printk(KERN_NOTICE "Moving partition %d: "
130 + "0x%08x -> 0x%08x\n", i,
131 + cur_offset, slave->offset);
132 + }
133 + }
134 + if (slave->mtd.size == MTDPART_SIZ_FULL)
135 + slave->mtd.size = master->size - slave->offset;
136 + cur_offset = slave->offset + slave->mtd.size;
137 +
138 + printk (KERN_NOTICE "0x%08x-0x%08x : \"%s\"\n", slave->offset,
139 + slave->offset + slave->mtd.size, slave->mtd.name);
140 +
141 + /* let's do some sanity checks */
142 + if (slave->offset >= master->size) {
143 + /* let's register it anyway to preserve ordering */
144 + slave->offset = 0;
145 + slave->mtd.size = 0;
146 + printk ("mtd: partition \"%s\" is out of reach -- disabled\n",
147 + part->name);
148 + }
149 + if (slave->offset + slave->mtd.size > master->size) {
150 + slave->mtd.size = master->size - slave->offset;
151 + printk ("mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#x\n",
152 + part->name, master->name, slave->mtd.size);
153 + }
154 + if (master->numeraseregions>1) {
155 + /* Deal with variable erase size stuff */
156 + int i;
157 + struct mtd_erase_region_info *regions = master->eraseregions;
158 +
159 + /* Find the first erase regions which is part of this partition. */
160 + for (i=0; i < master->numeraseregions && slave->offset >= regions[i].offset; i++)
161 + ;
162 +
163 + for (i--; i < master->numeraseregions && slave->offset + slave->mtd.size > regions[i].offset; i++) {
164 + if (slave->mtd.erasesize < regions[i].erasesize) {
165 + slave->mtd.erasesize = regions[i].erasesize;
166 + }
167 + }
168 + } else {
169 + /* Single erase size */
170 + slave->mtd.erasesize = master->erasesize;
171 + }
172 +
173 + if ((slave->mtd.flags & MTD_WRITEABLE) &&
174 + (slave->offset % slave->mtd.erasesize)) {
175 + /* Doesn't start on a boundary of major erase size */
176 + /* FIXME: Let it be writable if it is on a boundary of _minor_ erase size though */
177 + slave->mtd.flags &= ~MTD_WRITEABLE;
178 + printk ("mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
179 + part->name);
180 + }
181 + if ((slave->mtd.flags & MTD_WRITEABLE) &&
182 + (slave->mtd.size % slave->mtd.erasesize)) {
183 + slave->mtd.flags &= ~MTD_WRITEABLE;
184 + printk ("mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
185 + part->name);
186 + }
187 +
188 + slave->mtd.ecclayout = master->ecclayout;
189 + if (master->block_isbad) {
190 + uint32_t offs = 0;
191 +
192 + while(offs < slave->mtd.size) {
193 + if (master->block_isbad(master,
194 + offs + slave->offset))
195 + slave->mtd.ecc_stats.badblocks++;
196 + offs += slave->mtd.erasesize;
197 + }
198 + }
199 +
200 + if(part->mtdp)
201 + { /* store the object pointer (caller may or may not register it */
202 + *part->mtdp = &slave->mtd;
203 + slave->registered = 0;
204 + }
205 + else
206 + {
207 + /* register our partition */
208 + add_mtd_device(&slave->mtd);
209 + slave->registered = 1;
210 + }
211 +
212 + if (slp)
213 + *slp = slave;
214 +
215 + return 0;
216 +}
217 +
218 +#ifdef CONFIG_MTD_ROOTFS_SPLIT
219 +#define ROOTFS_SPLIT_NAME "rootfs_data"
220 +#define ROOTFS_REMOVED_NAME "<removed>"
221 +static int split_squashfs(struct mtd_info *master, int offset, int *split_offset)
222 +{
223 + char buf[512];
224 + struct squashfs_super_block *sb = (struct squashfs_super_block *) buf;
225 + int len, ret;
226 +
227 + ret = master->read(master, offset, sizeof(*sb), &len, buf);
228 + if (ret || (len != sizeof(*sb))) {
229 + printk(KERN_ALERT "split_squashfs: error occured while reading "
230 + "from \"%s\"\n", master->name);
231 + return -EINVAL;
232 + }
233 +
234 + if (*((u32 *) buf) != SQUASHFS_MAGIC) {
235 + printk(KERN_ALERT "split_squashfs: no squashfs found in \"%s\"\n",
236 + master->name);
237 + *split_offset = 0;
238 + return 0;
239 + }
240 +
241 + if (sb->bytes_used <= 0) {
242 + printk(KERN_ALERT "split_squashfs: squashfs is empty in \"%s\"\n",
243 + master->name);
244 + *split_offset = 0;
245 + return 0;
246 + }
247 +
248 + len = (u32) sb->bytes_used;
249 + len += (offset & 0x000fffff);
250 + len += (master->erasesize - 1);
251 + len &= ~(master->erasesize - 1);
252 + len -= (offset & 0x000fffff);
253 + *split_offset = offset + len;
254 +
255 + return 0;
256 +}
257 +
258 +static int split_rootfs_data(struct mtd_info *master, struct mtd_info *rpart, struct mtd_partition *part,
259 + int index)
260 +{
261 + struct mtd_partition *dpart;
262 + struct mtd_part *slave = NULL;
263 + int split_offset = 0;
264 + int ret;
265 +
266 + ret = split_squashfs(master, part->offset, &split_offset);
267 + if (ret)
268 + return ret;
269 +
270 + if (split_offset <= 0)
271 + return 0;
272 +
273 + dpart = kmalloc(sizeof(*part)+sizeof(ROOTFS_SPLIT_NAME)+1, GFP_KERNEL);
274 + if (dpart == NULL) {
275 + printk(KERN_INFO "split_squashfs: no memory for partition \"%s\"\n",
276 + ROOTFS_SPLIT_NAME);
277 + return -ENOMEM;
278 + }
279 +
280 + memcpy(dpart, part, sizeof(*part));
281 + dpart->name = (unsigned char *)&dpart[1];
282 + strcpy(dpart->name, ROOTFS_SPLIT_NAME);
283 +
284 + dpart->size -= split_offset - dpart->offset;
285 + dpart->offset = split_offset;
286 +
287 + if (dpart == NULL)
288 + return 1;
289 +
290 + printk(KERN_INFO "mtd: partition \"%s\" created automatically, ofs=%X, len=%X \n",
291 + ROOTFS_SPLIT_NAME, dpart->offset, dpart->size);
292 +
293 + ret = add_one_partition(master, dpart, index, &slave);
294 + if (ret)
295 + kfree(dpart);
296 + else if (slave)
297 + rpart->split = &slave->mtd;
298 +
299 + return ret;
300 +}
301 +
302 +static int refresh_rootfs_split(struct mtd_info *mtd)
303 +{
304 + struct mtd_partition tpart;
305 + struct mtd_part *part;
306 + int index = 0;
307 + int offset, size;
308 + int ret;
309 +
310 + part = PART(mtd);
311 +
312 + /* check for the new squashfs offset first */
313 + ret = split_squashfs(part->master, part->offset, &offset);
314 + if (ret)
315 + return ret;
316 +
317 + if ((offset > 0) && !mtd->split) {
318 + printk(KERN_INFO "%s: creating new split partition for \"%s\"\n", __func__, mtd->name);
319 + /* if we don't have a rootfs split partition, create a new one */
320 + tpart.name = mtd->name;
321 + tpart.size = mtd->size;
322 + tpart.offset = part->offset;
323 +
324 + /* find the index of the last partition */
325 + if (!list_empty(&mtd_partitions))
326 + index = list_first_entry(&mtd_partitions, struct mtd_part, list)->index + 1;
327 +
328 + return split_rootfs_data(part->master, &part->mtd, &tpart, index);
329 + } else if ((offset > 0) && mtd->split) {
330 + /* update the offsets of the existing partition */
331 + size = mtd->size + part->offset - offset;
332 +
333 + part = PART(mtd->split);
334 + part->offset = offset;
335 + part->mtd.size = size;
336 + printk(KERN_INFO "%s: %s partition \"" ROOTFS_SPLIT_NAME "\", offset: 0x%06x (0x%06x)\n",
337 + __func__, (!strcmp(part->mtd.name, ROOTFS_SPLIT_NAME) ? "updating" : "creating"),
338 + part->offset, part->mtd.size);
339 + strcpy(part->mtd.name, ROOTFS_SPLIT_NAME);
340 + } else if ((offset <= 0) && mtd->split) {
341 + printk(KERN_INFO "%s: removing partition \"%s\"\n", __func__, mtd->split->name);
342 +
343 + /* mark existing partition as removed */
344 + part = PART(mtd->split);
345 + strcpy(part->mtd.name, ROOTFS_REMOVED_NAME);
346 + part->offset = 0;
347 + part->mtd.size = 0;
348 + }
349 +
350 + return 0;
351 +}
352 +#endif /* CONFIG_MTD_ROOTFS_SPLIT */
353 +
354 /*
355 * This function, given a master MTD object and a partition table, creates
356 * and registers slave MTD objects which are bound to the master according to
357 @@ -334,171 +646,31 @@
358 int nbparts)
359 {
360 struct mtd_part *slave;
361 - u_int32_t cur_offset = 0;
362 - int i;
363 + struct mtd_partition *part;
364 + int i, j, ret = 0;
365
366 printk (KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
367
368 - for (i = 0; i < nbparts; i++) {
369 -
370 - /* allocate the partition structure */
371 - slave = kzalloc (sizeof(*slave), GFP_KERNEL);
372 - if (!slave) {
373 - printk ("memory allocation error while creating partitions for \"%s\"\n",
374 - master->name);
375 - del_mtd_partitions(master);
376 - return -ENOMEM;
377 - }
378 - list_add(&slave->list, &mtd_partitions);
379 -
380 - /* set up the MTD object for this partition */
381 - slave->mtd.type = master->type;
382 - slave->mtd.flags = master->flags & ~parts[i].mask_flags;
383 - slave->mtd.size = parts[i].size;
384 - slave->mtd.writesize = master->writesize;
385 - slave->mtd.oobsize = master->oobsize;
386 - slave->mtd.oobavail = master->oobavail;
387 - slave->mtd.subpage_sft = master->subpage_sft;
388 -
389 - slave->mtd.name = parts[i].name;
390 - slave->mtd.owner = master->owner;
391 -
392 - slave->mtd.read = part_read;
393 - slave->mtd.write = part_write;
394 -
395 - if (master->panic_write)
396 - slave->mtd.panic_write = part_panic_write;
397 -
398 - if(master->point && master->unpoint){
399 - slave->mtd.point = part_point;
400 - slave->mtd.unpoint = part_unpoint;
401 - }
402 -
403 - if (master->read_oob)
404 - slave->mtd.read_oob = part_read_oob;
405 - if (master->write_oob)
406 - slave->mtd.write_oob = part_write_oob;
407 - if(master->read_user_prot_reg)
408 - slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
409 - if(master->read_fact_prot_reg)
410 - slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
411 - if(master->write_user_prot_reg)
412 - slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
413 - if(master->lock_user_prot_reg)
414 - slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
415 - if(master->get_user_prot_info)
416 - slave->mtd.get_user_prot_info = part_get_user_prot_info;
417 - if(master->get_fact_prot_info)
418 - slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
419 - if (master->sync)
420 - slave->mtd.sync = part_sync;
421 - if (!i && master->suspend && master->resume) {
422 - slave->mtd.suspend = part_suspend;
423 - slave->mtd.resume = part_resume;
424 - }
425 - if (master->writev)
426 - slave->mtd.writev = part_writev;
427 - if (master->lock)
428 - slave->mtd.lock = part_lock;
429 - if (master->unlock)
430 - slave->mtd.unlock = part_unlock;
431 - if (master->block_isbad)
432 - slave->mtd.block_isbad = part_block_isbad;
433 - if (master->block_markbad)
434 - slave->mtd.block_markbad = part_block_markbad;
435 - slave->mtd.erase = part_erase;
436 - slave->master = master;
437 - slave->offset = parts[i].offset;
438 - slave->index = i;
439 -
440 - if (slave->offset == MTDPART_OFS_APPEND)
441 - slave->offset = cur_offset;
442 - if (slave->offset == MTDPART_OFS_NXTBLK) {
443 - slave->offset = cur_offset;
444 - if ((cur_offset % master->erasesize) != 0) {
445 - /* Round up to next erasesize */
446 - slave->offset = ((cur_offset / master->erasesize) + 1) * master->erasesize;
447 - printk(KERN_NOTICE "Moving partition %d: "
448 - "0x%08x -> 0x%08x\n", i,
449 - cur_offset, slave->offset);
450 - }
451 - }
452 - if (slave->mtd.size == MTDPART_SIZ_FULL)
453 - slave->mtd.size = master->size - slave->offset;
454 - cur_offset = slave->offset + slave->mtd.size;
455 -
456 - printk (KERN_NOTICE "0x%08x-0x%08x : \"%s\"\n", slave->offset,
457 - slave->offset + slave->mtd.size, slave->mtd.name);
458 -
459 - /* let's do some sanity checks */
460 - if (slave->offset >= master->size) {
461 - /* let's register it anyway to preserve ordering */
462 - slave->offset = 0;
463 - slave->mtd.size = 0;
464 - printk ("mtd: partition \"%s\" is out of reach -- disabled\n",
465 - parts[i].name);
466 - }
467 - if (slave->offset + slave->mtd.size > master->size) {
468 - slave->mtd.size = master->size - slave->offset;
469 - printk ("mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#x\n",
470 - parts[i].name, master->name, slave->mtd.size);
471 - }
472 - if (master->numeraseregions>1) {
473 - /* Deal with variable erase size stuff */
474 - int i;
475 - struct mtd_erase_region_info *regions = master->eraseregions;
476 -
477 - /* Find the first erase regions which is part of this partition. */
478 - for (i=0; i < master->numeraseregions && slave->offset >= regions[i].offset; i++)
479 - ;
480 -
481 - for (i--; i < master->numeraseregions && slave->offset + slave->mtd.size > regions[i].offset; i++) {
482 - if (slave->mtd.erasesize < regions[i].erasesize) {
483 - slave->mtd.erasesize = regions[i].erasesize;
484 - }
485 + for (i = 0, j = 0; i < nbparts; i++) {
486 + part = (struct mtd_partition *) &parts[i];
487 + ret = add_one_partition(master, part, j, &slave);
488 + if (ret)
489 + return ret;
490 + j++;
491 +
492 + if (strcmp(part->name, "rootfs") == 0 && slave->registered) {
493 +#ifdef CONFIG_MTD_ROOTFS_ROOT_DEV
494 + if (ROOT_DEV == 0) {
495 + printk(KERN_NOTICE "mtd: partition \"rootfs\" "
496 + "set to be root filesystem\n");
497 + ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, slave->mtd.index);
498 }
499 - } else {
500 - /* Single erase size */
501 - slave->mtd.erasesize = master->erasesize;
502 - }
503 -
504 - if ((slave->mtd.flags & MTD_WRITEABLE) &&
505 - (slave->offset % slave->mtd.erasesize)) {
506 - /* Doesn't start on a boundary of major erase size */
507 - /* FIXME: Let it be writable if it is on a boundary of _minor_ erase size though */
508 - slave->mtd.flags &= ~MTD_WRITEABLE;
509 - printk ("mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
510 - parts[i].name);
511 - }
512 - if ((slave->mtd.flags & MTD_WRITEABLE) &&
513 - (slave->mtd.size % slave->mtd.erasesize)) {
514 - slave->mtd.flags &= ~MTD_WRITEABLE;
515 - printk ("mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
516 - parts[i].name);
517 - }
518 -
519 - slave->mtd.ecclayout = master->ecclayout;
520 - if (master->block_isbad) {
521 - uint32_t offs = 0;
522 -
523 - while(offs < slave->mtd.size) {
524 - if (master->block_isbad(master,
525 - offs + slave->offset))
526 - slave->mtd.ecc_stats.badblocks++;
527 - offs += slave->mtd.erasesize;
528 - }
529 - }
530 -
531 - if(parts[i].mtdp)
532 - { /* store the object pointer (caller may or may not register it */
533 - *parts[i].mtdp = &slave->mtd;
534 - slave->registered = 0;
535 - }
536 - else
537 - {
538 - /* register our partition */
539 - add_mtd_device(&slave->mtd);
540 - slave->registered = 1;
541 +#endif
542 +#ifdef CONFIG_MTD_ROOTFS_SPLIT
543 + ret = split_rootfs_data(master, &slave->mtd, part, j);
544 + if (ret == 0)
545 + j++;
546 +#endif
547 }
548 }
549
550 @@ -574,6 +746,32 @@
551 return ret;
552 }
553
554 +int refresh_mtd_partitions(struct mtd_info *mtd)
555 +{
556 + int ret = 0;
557 +
558 + if (IS_PART(mtd)) {
559 + struct mtd_part *part;
560 + struct mtd_info *master;
561 +
562 + part = PART(mtd);
563 + master = part->master;
564 + if (master->refresh_device)
565 + ret = master->refresh_device(master);
566 + }
567 +
568 + if (!ret && mtd->refresh_device)
569 + ret = mtd->refresh_device(mtd);
570 +
571 +#ifdef CONFIG_MTD_ROOTFS_SPLIT
572 + if (!ret && IS_PART(mtd) && !strcmp(mtd->name, "rootfs"))
573 + refresh_rootfs_split(mtd);
574 +#endif
575 +
576 + return 0;
577 +}
578 +
579 EXPORT_SYMBOL_GPL(parse_mtd_partitions);
580 +EXPORT_SYMBOL_GPL(refresh_mtd_partitions);
581 EXPORT_SYMBOL_GPL(register_mtd_parser);
582 EXPORT_SYMBOL_GPL(deregister_mtd_parser);
583 --- a/drivers/mtd/devices/block2mtd.c
584 +++ b/drivers/mtd/devices/block2mtd.c
585 @@ -34,6 +34,8 @@
586 struct block_device *blkdev;
587 struct mtd_info mtd;
588 struct mutex write_mutex;
589 + rwlock_t bdev_mutex;
590 + char devname[0];
591 };
592
593
594 @@ -86,6 +88,12 @@
595 size_t len = instr->len;
596 int err;
597
598 + read_lock(&dev->bdev_mutex);
599 + if (!dev->blkdev) {
600 + err = -EINVAL;
601 + goto done;
602 + }
603 +
604 instr->state = MTD_ERASING;
605 mutex_lock(&dev->write_mutex);
606 err = _block2mtd_erase(dev, from, len);
607 @@ -98,6 +106,10 @@
608
609 instr->state = MTD_ERASE_DONE;
610 mtd_erase_callback(instr);
611 +
612 +done:
613 + read_unlock(&dev->bdev_mutex);
614 +
615 return err;
616 }
617
618 @@ -109,10 +121,14 @@
619 struct page *page;
620 int index = from >> PAGE_SHIFT;
621 int offset = from & (PAGE_SIZE-1);
622 - int cpylen;
623 + int cpylen, err = 0;
624 +
625 + read_lock(&dev->bdev_mutex);
626 + if (!dev->blkdev || (from > mtd->size)) {
627 + err = -EINVAL;
628 + goto done;
629 + }
630
631 - if (from > mtd->size)
632 - return -EINVAL;
633 if (from + len > mtd->size)
634 len = mtd->size - from;
635
636 @@ -127,10 +143,14 @@
637 len = len - cpylen;
638
639 page = page_read(dev->blkdev->bd_inode->i_mapping, index);
640 - if (!page)
641 - return -ENOMEM;
642 - if (IS_ERR(page))
643 - return PTR_ERR(page);
644 + if (!page) {
645 + err = -ENOMEM;
646 + goto done;
647 + }
648 + if (IS_ERR(page)) {
649 + err = PTR_ERR(page);
650 + goto done;
651 + }
652
653 memcpy(buf, page_address(page) + offset, cpylen);
654 page_cache_release(page);
655 @@ -141,7 +161,10 @@
656 offset = 0;
657 index++;
658 }
659 - return 0;
660 +
661 +done:
662 + read_unlock(&dev->bdev_mutex);
663 + return err;
664 }
665
666
667 @@ -193,12 +216,22 @@
668 size_t *retlen, const u_char *buf)
669 {
670 struct block2mtd_dev *dev = mtd->priv;
671 - int err;
672 + int err = 0;
673 +
674 + read_lock(&dev->bdev_mutex);
675 + if (!dev->blkdev) {
676 + err = -EINVAL;
677 + goto done;
678 + }
679
680 if (!len)
681 - return 0;
682 - if (to >= mtd->size)
683 - return -ENOSPC;
684 + goto done;
685 +
686 + if (to >= mtd->size) {
687 + err = -ENOSPC;
688 + goto done;
689 + }
690 +
691 if (to + len > mtd->size)
692 len = mtd->size - to;
693
694 @@ -207,6 +240,9 @@
695 mutex_unlock(&dev->write_mutex);
696 if (err > 0)
697 err = 0;
698 +
699 +done:
700 + read_unlock(&dev->bdev_mutex);
701 return err;
702 }
703
704 @@ -215,51 +251,29 @@
705 static void block2mtd_sync(struct mtd_info *mtd)
706 {
707 struct block2mtd_dev *dev = mtd->priv;
708 - sync_blockdev(dev->blkdev);
709 - return;
710 -}
711 -
712 -
713 -static void block2mtd_free_device(struct block2mtd_dev *dev)
714 -{
715 - if (!dev)
716 - return;
717 -
718 - kfree(dev->mtd.name);
719
720 - if (dev->blkdev) {
721 - invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping,
722 - 0, -1);
723 - close_bdev_excl(dev->blkdev);
724 - }
725 + read_lock(&dev->bdev_mutex);
726 + if (dev->blkdev)
727 + sync_blockdev(dev->blkdev);
728 + read_unlock(&dev->bdev_mutex);
729
730 - kfree(dev);
731 + return;
732 }
733
734
735 -/* FIXME: ensure that mtd->size % erase_size == 0 */
736 -static struct block2mtd_dev *add_device(char *devname, int erase_size, char *mtdname)
737 +static int _open_bdev(struct block2mtd_dev *dev)
738 {
739 struct block_device *bdev;
740 - struct block2mtd_dev *dev;
741 - struct mtd_partition *part;
742 -
743 - if (!devname)
744 - return NULL;
745 -
746 - dev = kzalloc(sizeof(struct block2mtd_dev), GFP_KERNEL);
747 - if (!dev)
748 - return NULL;
749
750 /* Get a handle on the device */
751 - bdev = open_bdev_excl(devname, O_RDWR, NULL);
752 + bdev = open_bdev_excl(dev->devname, O_RDWR, NULL);
753 #ifndef MODULE
754 if (IS_ERR(bdev)) {
755
756 /* We might not have rootfs mounted at this point. Try
757 to resolve the device name by other means. */
758
759 - dev_t devt = name_to_dev_t(devname);
760 + dev_t devt = name_to_dev_t(dev->devname);
761 if (devt) {
762 bdev = open_by_devnum(devt, FMODE_WRITE | FMODE_READ);
763 }
764 @@ -267,17 +281,96 @@
765 #endif
766
767 if (IS_ERR(bdev)) {
768 - ERROR("error: cannot open device %s", devname);
769 - goto devinit_err;
770 + ERROR("error: cannot open device %s", dev->devname);
771 + return 1;
772 }
773 dev->blkdev = bdev;
774
775 if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
776 ERROR("attempting to use an MTD device as a block device");
777 - goto devinit_err;
778 + return 1;
779 }
780
781 + return 0;
782 +}
783 +
784 +static void _close_bdev(struct block2mtd_dev *dev)
785 +{
786 + struct block_device *bdev;
787 +
788 + if (!dev->blkdev)
789 + return;
790 +
791 + bdev = dev->blkdev;
792 + invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping, 0, -1);
793 + close_bdev_excl(dev->blkdev);
794 + dev->blkdev = NULL;
795 +}
796 +
797 +static void block2mtd_free_device(struct block2mtd_dev *dev)
798 +{
799 + if (!dev)
800 + return;
801 +
802 + kfree(dev->mtd.name);
803 + _close_bdev(dev);
804 + kfree(dev);
805 +}
806 +
807 +
808 +static int block2mtd_refresh(struct mtd_info *mtd)
809 +{
810 + struct block2mtd_dev *dev = mtd->priv;
811 + struct block_device *bdev;
812 + dev_t devt;
813 + int err = 0;
814 +
815 + /* no other mtd function can run at this point */
816 + write_lock(&dev->bdev_mutex);
817 +
818 + /* get the device number for the whole disk */
819 + devt = MKDEV(MAJOR(dev->blkdev->bd_dev), 0);
820 +
821 + /* close the old block device */
822 + _close_bdev(dev);
823 +
824 + /* open the whole disk, issue a partition rescan, then */
825 + bdev = open_by_devnum(devt, FMODE_WRITE | FMODE_READ);
826 + if (!bdev || !bdev->bd_disk)
827 + err = -EINVAL;
828 + else {
829 + err = rescan_partitions(bdev->bd_disk, bdev);
830 + }
831 + if (bdev)
832 + close_bdev_excl(bdev);
833 +
834 + /* try to open the partition block device again */
835 + _open_bdev(dev);
836 + write_unlock(&dev->bdev_mutex);
837 +
838 + return err;
839 +}
840 +
841 +/* FIXME: ensure that mtd->size % erase_size == 0 */
842 +static struct block2mtd_dev *add_device(char *devname, int erase_size, char *mtdname)
843 +{
844 + struct block2mtd_dev *dev;
845 + struct mtd_partition *part;
846 +
847 + if (!devname)
848 + return NULL;
849 +
850 + dev = kzalloc(sizeof(struct block2mtd_dev) + strlen(devname) + 1, GFP_KERNEL);
851 + if (!dev)
852 + return NULL;
853 +
854 + strcpy(dev->devname, devname);
855 +
856 + if (_open_bdev(dev))
857 + goto devinit_err;
858 +
859 mutex_init(&dev->write_mutex);
860 + rwlock_init(&dev->bdev_mutex);
861
862 /* Setup the MTD structure */
863 /* make the name contain the block device in */
864 @@ -304,6 +397,7 @@
865 dev->mtd.read = block2mtd_read;
866 dev->mtd.priv = dev;
867 dev->mtd.owner = THIS_MODULE;
868 + dev->mtd.refresh_device = block2mtd_refresh;
869
870 part = kzalloc(sizeof(struct mtd_partition), GFP_KERNEL);
871 part->name = dev->mtd.name;
872 --- a/drivers/mtd/mtdchar.c
873 +++ b/drivers/mtd/mtdchar.c
874 @@ -17,6 +17,7 @@
875
876 #include <linux/mtd/mtd.h>
877 #include <linux/mtd/compatmac.h>
878 +#include <linux/mtd/partitions.h>
879
880 #include <asm/uaccess.h>
881
882 @@ -756,6 +757,13 @@
883 file->f_pos = 0;
884 break;
885 }
886 +#ifdef CONFIG_MTD_PARTITIONS
887 + case MTDREFRESH:
888 + {
889 + ret = refresh_mtd_partitions(mtd);
890 + break;
891 + }
892 +#endif
893
894 default:
895 ret = -ENOTTY;
896 --- a/include/linux/mtd/mtd.h
897 +++ b/include/linux/mtd/mtd.h
898 @@ -98,6 +98,7 @@
899 uint8_t *oobbuf;
900 };
901
902 +struct mtd_info;
903 struct mtd_info {
904 u_char type;
905 u_int32_t flags;
906 @@ -213,6 +214,9 @@
907 struct module *owner;
908 int usecount;
909
910 + int (*refresh_device)(struct mtd_info *mtd);
911 + struct mtd_info *split;
912 +
913 /* If the driver is something smart, like UBI, it may need to maintain
914 * its own reference counting. The below functions are only for driver.
915 * The driver may register its callbacks. These callbacks are not
916 --- a/include/linux/mtd/partitions.h
917 +++ b/include/linux/mtd/partitions.h
918 @@ -36,6 +36,7 @@
919 * erasesize aligned (e.g. use MTDPART_OFS_NEXTBLK).
920 */
921
922 +struct mtd_partition;
923 struct mtd_partition {
924 char *name; /* identifier string */
925 u_int32_t size; /* partition size */
926 @@ -43,6 +44,7 @@
927 u_int32_t mask_flags; /* master MTD flags to mask out for this partition */
928 struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only)*/
929 struct mtd_info **mtdp; /* pointer to store the MTD object */
930 + int (*refresh_partition)(struct mtd_info *);
931 };
932
933 #define MTDPART_OFS_NXTBLK (-2)
934 @@ -52,6 +54,7 @@
935
936 int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
937 int del_mtd_partitions(struct mtd_info *);
938 +int refresh_mtd_partitions(struct mtd_info *);
939
940 /*
941 * Functions dealing with the various ways of partitioning the space
942 --- a/include/mtd/mtd-abi.h
943 +++ b/include/mtd/mtd-abi.h
944 @@ -95,6 +95,7 @@
945 #define ECCGETLAYOUT _IOR('M', 17, struct nand_ecclayout)
946 #define ECCGETSTATS _IOR('M', 18, struct mtd_ecc_stats)
947 #define MTDFILEMODE _IO('M', 19)
948 +#define MTDREFRESH _IO('M', 23)
949
950 /*
951 * Obsolete legacy interface. Keep it in order not to break userspace
This page took 0.082507 seconds and 5 git commands to generate.