package: kernel: add handling for 3.2 renames (and some missed ones for 3.1)
[openwrt.git] / target / linux / generic / patches-2.6.31 / 065-rootfs_split.patch
1 --- a/drivers/mtd/Kconfig
2 +++ b/drivers/mtd/Kconfig
3 @@ -53,6 +53,16 @@ config MTD_TESTS
4 should normally be compiled as kernel modules. The modules perform
5 various checks and verifications when loaded.
6
7 +config MTD_ROOTFS_ROOT_DEV
8 + bool "Automatically set 'rootfs' partition to be root filesystem"
9 + depends on MTD_PARTITIONS
10 + default y
11 +
12 +config MTD_ROOTFS_SPLIT
13 + bool "Automatically split 'rootfs' partition for squashfs"
14 + depends on MTD_PARTITIONS
15 + default y
16 +
17 config MTD_REDBOOT_PARTS
18 tristate "RedBoot partition table parsing"
19 depends on MTD_PARTITIONS
20 --- a/drivers/mtd/mtdpart.c
21 +++ b/drivers/mtd/mtdpart.c
22 @@ -18,6 +18,8 @@
23 #include <linux/mtd/mtd.h>
24 #include <linux/mtd/partitions.h>
25 #include <linux/mtd/compatmac.h>
26 +#include <linux/root_dev.h>
27 +#include <linux/magic.h>
28
29 /* Our partition linked list */
30 static LIST_HEAD(mtd_partitions);
31 @@ -35,7 +37,7 @@ struct mtd_part {
32 * the pointer to that structure with this macro.
33 */
34 #define PART(x) ((struct mtd_part *)(x))
35 -
36 +#define IS_PART(mtd) (mtd->read == part_read)
37
38 /*
39 * MTD methods which simply translate the effective address and pass through
40 @@ -502,6 +504,152 @@ out_register:
41 return slave;
42 }
43
44 +#ifdef CONFIG_MTD_ROOTFS_SPLIT
45 +#define ROOTFS_SPLIT_NAME "rootfs_data"
46 +#define ROOTFS_REMOVED_NAME "<removed>"
47 +
48 +struct squashfs_super_block {
49 + __le32 s_magic;
50 + __le32 pad0[9];
51 + __le64 bytes_used;
52 +};
53 +
54 +
55 +static int split_squashfs(struct mtd_info *master, int offset, int *split_offset)
56 +{
57 + struct squashfs_super_block sb;
58 + int len, ret;
59 +
60 + ret = master->read(master, offset, sizeof(sb), &len, (void *) &sb);
61 + if (ret || (len != sizeof(sb))) {
62 + printk(KERN_ALERT "split_squashfs: error occured while reading "
63 + "from \"%s\"\n", master->name);
64 + return -EINVAL;
65 + }
66 +
67 + if (SQUASHFS_MAGIC != le32_to_cpu(sb.s_magic) ) {
68 + printk(KERN_ALERT "split_squashfs: no squashfs found in \"%s\"\n",
69 + master->name);
70 + *split_offset = 0;
71 + return 0;
72 + }
73 +
74 + if (le64_to_cpu((sb.bytes_used)) <= 0) {
75 + printk(KERN_ALERT "split_squashfs: squashfs is empty in \"%s\"\n",
76 + master->name);
77 + *split_offset = 0;
78 + return 0;
79 + }
80 +
81 + len = (u32) le64_to_cpu(sb.bytes_used);
82 + len += (offset & 0x000fffff);
83 + len += (master->erasesize - 1);
84 + len &= ~(master->erasesize - 1);
85 + len -= (offset & 0x000fffff);
86 + *split_offset = offset + len;
87 +
88 + return 0;
89 +}
90 +
91 +static int split_rootfs_data(struct mtd_info *master, struct mtd_info *rpart, const struct mtd_partition *part)
92 +{
93 + struct mtd_partition *dpart;
94 + struct mtd_part *slave = NULL;
95 + struct mtd_part *spart;
96 + int split_offset = 0;
97 + int ret;
98 +
99 + spart = PART(rpart);
100 + ret = split_squashfs(master, spart->offset, &split_offset);
101 + if (ret)
102 + return ret;
103 +
104 + if (split_offset <= 0)
105 + return 0;
106 +
107 + dpart = kmalloc(sizeof(*part)+sizeof(ROOTFS_SPLIT_NAME)+1, GFP_KERNEL);
108 + if (dpart == NULL) {
109 + printk(KERN_INFO "split_squashfs: no memory for partition \"%s\"\n",
110 + ROOTFS_SPLIT_NAME);
111 + return -ENOMEM;
112 + }
113 +
114 + memcpy(dpart, part, sizeof(*part));
115 + dpart->name = (unsigned char *)&dpart[1];
116 + strcpy(dpart->name, ROOTFS_SPLIT_NAME);
117 +
118 + dpart->size = rpart->size - (split_offset - spart->offset);
119 + dpart->offset = split_offset;
120 +
121 + if (dpart == NULL)
122 + return 1;
123 +
124 + printk(KERN_INFO "mtd: partition \"%s\" created automatically, ofs=%llX, len=%llX \n",
125 + ROOTFS_SPLIT_NAME, dpart->offset, dpart->size);
126 +
127 + slave = add_one_partition(master, dpart, 0, split_offset);
128 + if (!slave) {
129 + kfree(dpart);
130 + return -ENOMEM;
131 + }
132 + rpart->split = &slave->mtd;
133 +
134 + return 0;
135 +}
136 +
137 +static int refresh_rootfs_split(struct mtd_info *mtd)
138 +{
139 + struct mtd_partition tpart;
140 + struct mtd_part *part;
141 + char *name;
142 + //int index = 0;
143 + int offset, size;
144 + int ret;
145 +
146 + part = PART(mtd);
147 +
148 + /* check for the new squashfs offset first */
149 + ret = split_squashfs(part->master, part->offset, &offset);
150 + if (ret)
151 + return ret;
152 +
153 + if ((offset > 0) && !mtd->split) {
154 + printk(KERN_INFO "%s: creating new split partition for \"%s\"\n", __func__, mtd->name);
155 + /* if we don't have a rootfs split partition, create a new one */
156 + tpart.name = (char *) mtd->name;
157 + tpart.size = mtd->size;
158 + tpart.offset = part->offset;
159 +
160 + return split_rootfs_data(part->master, &part->mtd, &tpart);
161 + } else if ((offset > 0) && mtd->split) {
162 + /* update the offsets of the existing partition */
163 + size = mtd->size + part->offset - offset;
164 +
165 + part = PART(mtd->split);
166 + part->offset = offset;
167 + part->mtd.size = size;
168 + printk(KERN_INFO "%s: %s partition \"" ROOTFS_SPLIT_NAME "\", offset: 0x%06x (0x%06x)\n",
169 + __func__, (!strcmp(part->mtd.name, ROOTFS_SPLIT_NAME) ? "updating" : "creating"),
170 + (u32) part->offset, (u32) part->mtd.size);
171 + name = kmalloc(sizeof(ROOTFS_SPLIT_NAME) + 1, GFP_KERNEL);
172 + strcpy(name, ROOTFS_SPLIT_NAME);
173 + part->mtd.name = name;
174 + } else if ((offset <= 0) && mtd->split) {
175 + printk(KERN_INFO "%s: removing partition \"%s\"\n", __func__, mtd->split->name);
176 +
177 + /* mark existing partition as removed */
178 + part = PART(mtd->split);
179 + name = kmalloc(sizeof(ROOTFS_SPLIT_NAME) + 1, GFP_KERNEL);
180 + strcpy(name, ROOTFS_REMOVED_NAME);
181 + part->mtd.name = name;
182 + part->offset = 0;
183 + part->mtd.size = 0;
184 + }
185 +
186 + return 0;
187 +}
188 +#endif /* CONFIG_MTD_ROOTFS_SPLIT */
189 +
190 /*
191 * This function, given a master MTD object and a partition table, creates
192 * and registers slave MTD objects which are bound to the master according to
193 @@ -517,7 +665,7 @@ int add_mtd_partitions(struct mtd_info *
194 {
195 struct mtd_part *slave;
196 uint64_t cur_offset = 0;
197 - int i;
198 + int i, ret;
199
200 printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
201
202 @@ -525,6 +673,21 @@ int add_mtd_partitions(struct mtd_info *
203 slave = add_one_partition(master, parts + i, i, cur_offset);
204 if (!slave)
205 return -ENOMEM;
206 +
207 + if (!strcmp(parts[i].name, "rootfs")) {
208 +#ifdef CONFIG_MTD_ROOTFS_ROOT_DEV
209 + if (ROOT_DEV == 0) {
210 + printk(KERN_NOTICE "mtd: partition \"rootfs\" "
211 + "set to be root filesystem\n");
212 + ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, slave->mtd.index);
213 + }
214 +#endif
215 +#ifdef CONFIG_MTD_ROOTFS_SPLIT
216 + ret = split_rootfs_data(master, &slave->mtd, &parts[i]);
217 + /* if (ret == 0)
218 + j++; */
219 +#endif
220 + }
221 cur_offset = slave->offset + slave->mtd.size;
222 }
223
224 @@ -532,6 +695,32 @@ int add_mtd_partitions(struct mtd_info *
225 }
226 EXPORT_SYMBOL(add_mtd_partitions);
227
228 +int refresh_mtd_partitions(struct mtd_info *mtd)
229 +{
230 + int ret = 0;
231 +
232 + if (IS_PART(mtd)) {
233 + struct mtd_part *part;
234 + struct mtd_info *master;
235 +
236 + part = PART(mtd);
237 + master = part->master;
238 + if (master->refresh_device)
239 + ret = master->refresh_device(master);
240 + }
241 +
242 + if (!ret && mtd->refresh_device)
243 + ret = mtd->refresh_device(mtd);
244 +
245 +#ifdef CONFIG_MTD_ROOTFS_SPLIT
246 + if (!ret && IS_PART(mtd) && !strcmp(mtd->name, "rootfs"))
247 + refresh_rootfs_split(mtd);
248 +#endif
249 +
250 + return 0;
251 +}
252 +EXPORT_SYMBOL_GPL(refresh_mtd_partitions);
253 +
254 static DEFINE_SPINLOCK(part_parser_lock);
255 static LIST_HEAD(part_parsers);
256
257 --- a/drivers/mtd/devices/block2mtd.c
258 +++ b/drivers/mtd/devices/block2mtd.c
259 @@ -29,6 +29,8 @@ struct block2mtd_dev {
260 struct block_device *blkdev;
261 struct mtd_info mtd;
262 struct mutex write_mutex;
263 + rwlock_t bdev_mutex;
264 + char devname[0];
265 };
266
267
268 @@ -81,6 +83,12 @@ static int block2mtd_erase(struct mtd_in
269 size_t len = instr->len;
270 int err;
271
272 + read_lock(&dev->bdev_mutex);
273 + if (!dev->blkdev) {
274 + err = -EINVAL;
275 + goto done;
276 + }
277 +
278 instr->state = MTD_ERASING;
279 mutex_lock(&dev->write_mutex);
280 err = _block2mtd_erase(dev, from, len);
281 @@ -93,6 +101,10 @@ static int block2mtd_erase(struct mtd_in
282
283 instr->state = MTD_ERASE_DONE;
284 mtd_erase_callback(instr);
285 +
286 +done:
287 + read_unlock(&dev->bdev_mutex);
288 +
289 return err;
290 }
291
292 @@ -104,10 +116,14 @@ static int block2mtd_read(struct mtd_inf
293 struct page *page;
294 int index = from >> PAGE_SHIFT;
295 int offset = from & (PAGE_SIZE-1);
296 - int cpylen;
297 + int cpylen, err = 0;
298 +
299 + read_lock(&dev->bdev_mutex);
300 + if (!dev->blkdev || (from > mtd->size)) {
301 + err = -EINVAL;
302 + goto done;
303 + }
304
305 - if (from > mtd->size)
306 - return -EINVAL;
307 if (from + len > mtd->size)
308 len = mtd->size - from;
309
310 @@ -122,10 +138,14 @@ static int block2mtd_read(struct mtd_inf
311 len = len - cpylen;
312
313 page = page_read(dev->blkdev->bd_inode->i_mapping, index);
314 - if (!page)
315 - return -ENOMEM;
316 - if (IS_ERR(page))
317 - return PTR_ERR(page);
318 + if (!page) {
319 + err = -ENOMEM;
320 + goto done;
321 + }
322 + if (IS_ERR(page)) {
323 + err = PTR_ERR(page);
324 + goto done;
325 + }
326
327 memcpy(buf, page_address(page) + offset, cpylen);
328 page_cache_release(page);
329 @@ -136,7 +156,10 @@ static int block2mtd_read(struct mtd_inf
330 offset = 0;
331 index++;
332 }
333 - return 0;
334 +
335 +done:
336 + read_unlock(&dev->bdev_mutex);
337 + return err;
338 }
339
340
341 @@ -188,12 +211,22 @@ static int block2mtd_write(struct mtd_in
342 size_t *retlen, const u_char *buf)
343 {
344 struct block2mtd_dev *dev = mtd->priv;
345 - int err;
346 + int err = 0;
347 +
348 + read_lock(&dev->bdev_mutex);
349 + if (!dev->blkdev) {
350 + err = -EINVAL;
351 + goto done;
352 + }
353
354 if (!len)
355 - return 0;
356 - if (to >= mtd->size)
357 - return -ENOSPC;
358 + goto done;
359 +
360 + if (to >= mtd->size) {
361 + err = -ENOSPC;
362 + goto done;
363 + }
364 +
365 if (to + len > mtd->size)
366 len = mtd->size - to;
367
368 @@ -202,6 +235,9 @@ static int block2mtd_write(struct mtd_in
369 mutex_unlock(&dev->write_mutex);
370 if (err > 0)
371 err = 0;
372 +
373 +done:
374 + read_unlock(&dev->bdev_mutex);
375 return err;
376 }
377
378 @@ -210,52 +246,29 @@ static int block2mtd_write(struct mtd_in
379 static void block2mtd_sync(struct mtd_info *mtd)
380 {
381 struct block2mtd_dev *dev = mtd->priv;
382 - sync_blockdev(dev->blkdev);
383 - return;
384 -}
385 -
386 -
387 -static void block2mtd_free_device(struct block2mtd_dev *dev)
388 -{
389 - if (!dev)
390 - return;
391 -
392 - kfree(dev->mtd.name);
393
394 - if (dev->blkdev) {
395 - invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping,
396 - 0, -1);
397 - close_bdev_exclusive(dev->blkdev, FMODE_READ|FMODE_WRITE);
398 - }
399 + read_lock(&dev->bdev_mutex);
400 + if (dev->blkdev)
401 + sync_blockdev(dev->blkdev);
402 + read_unlock(&dev->bdev_mutex);
403
404 - kfree(dev);
405 + return;
406 }
407
408
409 -/* FIXME: ensure that mtd->size % erase_size == 0 */
410 -static struct block2mtd_dev *add_device(char *devname, int erase_size, const char *mtdname)
411 +static int _open_bdev(struct block2mtd_dev *dev)
412 {
413 struct block_device *bdev;
414 - struct block2mtd_dev *dev;
415 - struct mtd_partition *part;
416 - char *name;
417 -
418 - if (!devname)
419 - return NULL;
420 -
421 - dev = kzalloc(sizeof(struct block2mtd_dev), GFP_KERNEL);
422 - if (!dev)
423 - return NULL;
424
425 /* Get a handle on the device */
426 - bdev = open_bdev_exclusive(devname, FMODE_READ|FMODE_WRITE, NULL);
427 + bdev = open_bdev_exclusive(dev->devname, FMODE_READ|FMODE_WRITE, NULL);
428 #ifndef MODULE
429 if (IS_ERR(bdev)) {
430
431 /* We might not have rootfs mounted at this point. Try
432 to resolve the device name by other means. */
433
434 - dev_t devt = name_to_dev_t(devname);
435 + dev_t devt = name_to_dev_t(dev->devname);
436 if (devt) {
437 bdev = open_by_devnum(devt, FMODE_WRITE | FMODE_READ);
438 }
439 @@ -263,17 +276,98 @@ static struct block2mtd_dev *add_device(
440 #endif
441
442 if (IS_ERR(bdev)) {
443 - ERROR("error: cannot open device %s", devname);
444 - goto devinit_err;
445 + ERROR("error: cannot open device %s", dev->devname);
446 + return 1;
447 }
448 dev->blkdev = bdev;
449
450 if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
451 ERROR("attempting to use an MTD device as a block device");
452 - goto devinit_err;
453 + return 1;
454 }
455
456 + return 0;
457 +}
458 +
459 +static void _close_bdev(struct block2mtd_dev *dev)
460 +{
461 + struct block_device *bdev;
462 +
463 + if (!dev->blkdev)
464 + return;
465 +
466 + bdev = dev->blkdev;
467 + invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping, 0, -1);
468 + close_bdev_exclusive(dev->blkdev, FMODE_READ|FMODE_WRITE);
469 + dev->blkdev = NULL;
470 +}
471 +
472 +static void block2mtd_free_device(struct block2mtd_dev *dev)
473 +{
474 + if (!dev)
475 + return;
476 +
477 + kfree(dev->mtd.name);
478 + _close_bdev(dev);
479 + kfree(dev);
480 +}
481 +
482 +
483 +static int block2mtd_refresh(struct mtd_info *mtd)
484 +{
485 + struct block2mtd_dev *dev = mtd->priv;
486 + struct block_device *bdev;
487 + dev_t devt;
488 + int err = 0;
489 +
490 + /* no other mtd function can run at this point */
491 + write_lock(&dev->bdev_mutex);
492 +
493 + /* get the device number for the whole disk */
494 + devt = MKDEV(MAJOR(dev->blkdev->bd_dev), 0);
495 +
496 + /* close the old block device */
497 + _close_bdev(dev);
498 +
499 + /* open the whole disk, issue a partition rescan, then */
500 + bdev = open_by_devnum(devt, FMODE_WRITE | FMODE_READ);
501 + if (!bdev || !bdev->bd_disk)
502 + err = -EINVAL;
503 +#ifndef CONFIG_MTD_BLOCK2MTD_MODULE
504 + else
505 + err = rescan_partitions(bdev->bd_disk, bdev);
506 +#endif
507 + if (bdev)
508 + close_bdev_exclusive(bdev, FMODE_READ|FMODE_WRITE);
509 +
510 + /* try to open the partition block device again */
511 + _open_bdev(dev);
512 + write_unlock(&dev->bdev_mutex);
513 +
514 + return err;
515 +}
516 +
517 +/* FIXME: ensure that mtd->size % erase_size == 0 */
518 +static struct block2mtd_dev *add_device(char *devname, int erase_size, char *mtdname)
519 +{
520 + struct block2mtd_dev *dev;
521 + struct mtd_partition *part;
522 + char *name;
523 +
524 + if (!devname)
525 + return NULL;
526 +
527 + dev = kzalloc(sizeof(struct block2mtd_dev) + strlen(devname) + 1, GFP_KERNEL);
528 + if (!dev)
529 + return NULL;
530 +
531 + strcpy(dev->devname, devname);
532 +
533 + if (_open_bdev(dev))
534 + goto devinit_err;
535 +
536 mutex_init(&dev->write_mutex);
537 + rwlock_init(&dev->bdev_mutex);
538
539 if (!mtdname)
540 mtdname = devname;
541 @@ -297,6 +391,7 @@ static struct block2mtd_dev *add_device(
542 dev->mtd.read = block2mtd_read;
543 dev->mtd.priv = dev;
544 dev->mtd.owner = THIS_MODULE;
545 + dev->mtd.refresh_device = block2mtd_refresh;
546
547 part = kzalloc(sizeof(struct mtd_partition), GFP_KERNEL);
548 part->name = dev->mtd.name;
549 --- a/drivers/mtd/mtdchar.c
550 +++ b/drivers/mtd/mtdchar.c
551 @@ -18,6 +18,7 @@
552
553 #include <linux/mtd/mtd.h>
554 #include <linux/mtd/compatmac.h>
555 +#include <linux/mtd/partitions.h>
556
557 #include <asm/uaccess.h>
558
559 @@ -814,6 +815,13 @@ static int mtd_ioctl(struct inode *inode
560 file->f_pos = 0;
561 break;
562 }
563 +#ifdef CONFIG_MTD_PARTITIONS
564 + case MTDREFRESH:
565 + {
566 + ret = refresh_mtd_partitions(mtd);
567 + break;
568 + }
569 +#endif
570
571 default:
572 ret = -ENOTTY;
573 --- a/include/linux/mtd/mtd.h
574 +++ b/include/linux/mtd/mtd.h
575 @@ -101,6 +101,7 @@ struct mtd_oob_ops {
576 uint8_t *oobbuf;
577 };
578
579 +struct mtd_info;
580 struct mtd_info {
581 u_char type;
582 uint32_t flags;
583 @@ -241,6 +242,9 @@ struct mtd_info {
584 struct device dev;
585 int usecount;
586
587 + int (*refresh_device)(struct mtd_info *mtd);
588 + struct mtd_info *split;
589 +
590 /* If the driver is something smart, like UBI, it may need to maintain
591 * its own reference counting. The below functions are only for driver.
592 * The driver may register its callbacks. These callbacks are not
593 --- a/include/linux/mtd/partitions.h
594 +++ b/include/linux/mtd/partitions.h
595 @@ -34,12 +34,14 @@
596 * erasesize aligned (e.g. use MTDPART_OFS_NEXTBLK).
597 */
598
599 +struct mtd_partition;
600 struct mtd_partition {
601 char *name; /* identifier string */
602 uint64_t size; /* partition size */
603 uint64_t offset; /* offset within the master MTD space */
604 uint32_t mask_flags; /* master MTD flags to mask out for this partition */
605 struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only)*/
606 + int (*refresh_partition)(struct mtd_info *);
607 };
608
609 #define MTDPART_OFS_NXTBLK (-2)
610 @@ -51,6 +53,7 @@ struct mtd_info;
611
612 int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
613 int del_mtd_partitions(struct mtd_info *);
614 +int refresh_mtd_partitions(struct mtd_info *);
615
616 /*
617 * Functions dealing with the various ways of partitioning the space
618 --- a/include/mtd/mtd-abi.h
619 +++ b/include/mtd/mtd-abi.h
620 @@ -110,6 +110,7 @@ struct otp_info {
621 #define MEMERASE64 _IOW('M', 20, struct erase_info_user64)
622 #define MEMWRITEOOB64 _IOWR('M', 21, struct mtd_oob_buf64)
623 #define MEMREADOOB64 _IOWR('M', 22, struct mtd_oob_buf64)
624 +#define MTDREFRESH _IO('M', 50)
625
626 /*
627 * Obsolete legacy interface. Keep it in order not to break userspace
This page took 0.082566 seconds and 5 git commands to generate.