ia64/linux-2.6.18-xen.hg

view drivers/mtd/mtdconcat.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /*
2 * MTD device concatenation layer
3 *
4 * (C) 2002 Robert Kaiser <rkaiser@sysgo.de>
5 *
6 * NAND support by Christian Gan <cgan@iders.ca>
7 *
8 * This code is GPL
9 *
10 * $Id: mtdconcat.c,v 1.11 2005/11/07 11:14:20 gleixner Exp $
11 */
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/sched.h>
17 #include <linux/types.h>
19 #include <linux/mtd/mtd.h>
20 #include <linux/mtd/concat.h>
22 #include <asm/div64.h>
24 /*
25 * Our storage structure:
26 * Subdev points to an array of pointers to struct mtd_info objects
27 * which is allocated along with this structure
28 *
29 */
30 struct mtd_concat {
31 struct mtd_info mtd;
32 int num_subdev;
33 struct mtd_info **subdev;
34 };
36 /*
37 * how to calculate the size required for the above structure,
38 * including the pointer array subdev points to:
39 */
40 #define SIZEOF_STRUCT_MTD_CONCAT(num_subdev) \
41 ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
43 /*
44 * Given a pointer to the MTD object in the mtd_concat structure,
45 * we can retrieve the pointer to that structure with this macro.
46 */
47 #define CONCAT(x) ((struct mtd_concat *)(x))
49 /*
50 * MTD methods which look up the relevant subdevice, translate the
51 * effective address and pass through to the subdevice.
52 */
54 static int
55 concat_read(struct mtd_info *mtd, loff_t from, size_t len,
56 size_t * retlen, u_char * buf)
57 {
58 struct mtd_concat *concat = CONCAT(mtd);
59 int ret = 0, err;
60 int i;
62 *retlen = 0;
64 for (i = 0; i < concat->num_subdev; i++) {
65 struct mtd_info *subdev = concat->subdev[i];
66 size_t size, retsize;
68 if (from >= subdev->size) {
69 /* Not destined for this subdev */
70 size = 0;
71 from -= subdev->size;
72 continue;
73 }
74 if (from + len > subdev->size)
75 /* First part goes into this subdev */
76 size = subdev->size - from;
77 else
78 /* Entire transaction goes into this subdev */
79 size = len;
81 err = subdev->read(subdev, from, size, &retsize, buf);
83 /* Save information about bitflips! */
84 if (unlikely(err)) {
85 if (err == -EBADMSG) {
86 mtd->ecc_stats.failed++;
87 ret = err;
88 } else if (err == -EUCLEAN) {
89 mtd->ecc_stats.corrected++;
90 /* Do not overwrite -EBADMSG !! */
91 if (!ret)
92 ret = err;
93 } else
94 return err;
95 }
97 *retlen += retsize;
98 len -= size;
99 if (len == 0)
100 return ret;
102 buf += size;
103 from = 0;
104 }
105 return -EINVAL;
106 }
108 static int
109 concat_write(struct mtd_info *mtd, loff_t to, size_t len,
110 size_t * retlen, const u_char * buf)
111 {
112 struct mtd_concat *concat = CONCAT(mtd);
113 int err = -EINVAL;
114 int i;
116 if (!(mtd->flags & MTD_WRITEABLE))
117 return -EROFS;
119 *retlen = 0;
121 for (i = 0; i < concat->num_subdev; i++) {
122 struct mtd_info *subdev = concat->subdev[i];
123 size_t size, retsize;
125 if (to >= subdev->size) {
126 size = 0;
127 to -= subdev->size;
128 continue;
129 }
130 if (to + len > subdev->size)
131 size = subdev->size - to;
132 else
133 size = len;
135 if (!(subdev->flags & MTD_WRITEABLE))
136 err = -EROFS;
137 else
138 err = subdev->write(subdev, to, size, &retsize, buf);
140 if (err)
141 break;
143 *retlen += retsize;
144 len -= size;
145 if (len == 0)
146 break;
148 err = -EINVAL;
149 buf += size;
150 to = 0;
151 }
152 return err;
153 }
155 static int
156 concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
157 unsigned long count, loff_t to, size_t * retlen)
158 {
159 struct mtd_concat *concat = CONCAT(mtd);
160 struct kvec *vecs_copy;
161 unsigned long entry_low, entry_high;
162 size_t total_len = 0;
163 int i;
164 int err = -EINVAL;
166 if (!(mtd->flags & MTD_WRITEABLE))
167 return -EROFS;
169 *retlen = 0;
171 /* Calculate total length of data */
172 for (i = 0; i < count; i++)
173 total_len += vecs[i].iov_len;
175 /* Do not allow write past end of device */
176 if ((to + total_len) > mtd->size)
177 return -EINVAL;
179 /* Check alignment */
180 if (mtd->writesize > 1) {
181 loff_t __to = to;
182 if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize))
183 return -EINVAL;
184 }
186 /* make a copy of vecs */
187 vecs_copy = kmalloc(sizeof(struct kvec) * count, GFP_KERNEL);
188 if (!vecs_copy)
189 return -ENOMEM;
190 memcpy(vecs_copy, vecs, sizeof(struct kvec) * count);
192 entry_low = 0;
193 for (i = 0; i < concat->num_subdev; i++) {
194 struct mtd_info *subdev = concat->subdev[i];
195 size_t size, wsize, retsize, old_iov_len;
197 if (to >= subdev->size) {
198 to -= subdev->size;
199 continue;
200 }
202 size = min(total_len, (size_t)(subdev->size - to));
203 wsize = size; /* store for future use */
205 entry_high = entry_low;
206 while (entry_high < count) {
207 if (size <= vecs_copy[entry_high].iov_len)
208 break;
209 size -= vecs_copy[entry_high++].iov_len;
210 }
212 old_iov_len = vecs_copy[entry_high].iov_len;
213 vecs_copy[entry_high].iov_len = size;
215 if (!(subdev->flags & MTD_WRITEABLE))
216 err = -EROFS;
217 else
218 err = subdev->writev(subdev, &vecs_copy[entry_low],
219 entry_high - entry_low + 1, to, &retsize);
221 vecs_copy[entry_high].iov_len = old_iov_len - size;
222 vecs_copy[entry_high].iov_base += size;
224 entry_low = entry_high;
226 if (err)
227 break;
229 *retlen += retsize;
230 total_len -= wsize;
232 if (total_len == 0)
233 break;
235 err = -EINVAL;
236 to = 0;
237 }
239 kfree(vecs_copy);
240 return err;
241 }
243 static int
244 concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
245 {
246 struct mtd_concat *concat = CONCAT(mtd);
247 struct mtd_oob_ops devops = *ops;
248 int i, err, ret = 0;
250 ops->retlen = 0;
252 for (i = 0; i < concat->num_subdev; i++) {
253 struct mtd_info *subdev = concat->subdev[i];
255 if (from >= subdev->size) {
256 from -= subdev->size;
257 continue;
258 }
260 /* partial read ? */
261 if (from + devops.len > subdev->size)
262 devops.len = subdev->size - from;
264 err = subdev->read_oob(subdev, from, &devops);
265 ops->retlen += devops.retlen;
267 /* Save information about bitflips! */
268 if (unlikely(err)) {
269 if (err == -EBADMSG) {
270 mtd->ecc_stats.failed++;
271 ret = err;
272 } else if (err == -EUCLEAN) {
273 mtd->ecc_stats.corrected++;
274 /* Do not overwrite -EBADMSG !! */
275 if (!ret)
276 ret = err;
277 } else
278 return err;
279 }
281 devops.len = ops->len - ops->retlen;
282 if (!devops.len)
283 return ret;
285 if (devops.datbuf)
286 devops.datbuf += devops.retlen;
287 if (devops.oobbuf)
288 devops.oobbuf += devops.ooblen;
290 from = 0;
291 }
292 return -EINVAL;
293 }
295 static int
296 concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
297 {
298 struct mtd_concat *concat = CONCAT(mtd);
299 struct mtd_oob_ops devops = *ops;
300 int i, err;
302 if (!(mtd->flags & MTD_WRITEABLE))
303 return -EROFS;
305 ops->retlen = 0;
307 for (i = 0; i < concat->num_subdev; i++) {
308 struct mtd_info *subdev = concat->subdev[i];
310 if (to >= subdev->size) {
311 to -= subdev->size;
312 continue;
313 }
315 /* partial write ? */
316 if (to + devops.len > subdev->size)
317 devops.len = subdev->size - to;
319 err = subdev->write_oob(subdev, to, &devops);
320 ops->retlen += devops.retlen;
321 if (err)
322 return err;
324 devops.len = ops->len - ops->retlen;
325 if (!devops.len)
326 return 0;
328 if (devops.datbuf)
329 devops.datbuf += devops.retlen;
330 if (devops.oobbuf)
331 devops.oobbuf += devops.ooblen;
332 to = 0;
333 }
334 return -EINVAL;
335 }
337 static void concat_erase_callback(struct erase_info *instr)
338 {
339 wake_up((wait_queue_head_t *) instr->priv);
340 }
342 static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
343 {
344 int err;
345 wait_queue_head_t waitq;
346 DECLARE_WAITQUEUE(wait, current);
348 /*
349 * This code was stol^H^H^H^Hinspired by mtdchar.c
350 */
351 init_waitqueue_head(&waitq);
353 erase->mtd = mtd;
354 erase->callback = concat_erase_callback;
355 erase->priv = (unsigned long) &waitq;
357 /*
358 * FIXME: Allow INTERRUPTIBLE. Which means
359 * not having the wait_queue head on the stack.
360 */
361 err = mtd->erase(mtd, erase);
362 if (!err) {
363 set_current_state(TASK_UNINTERRUPTIBLE);
364 add_wait_queue(&waitq, &wait);
365 if (erase->state != MTD_ERASE_DONE
366 && erase->state != MTD_ERASE_FAILED)
367 schedule();
368 remove_wait_queue(&waitq, &wait);
369 set_current_state(TASK_RUNNING);
371 err = (erase->state == MTD_ERASE_FAILED) ? -EIO : 0;
372 }
373 return err;
374 }
376 static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
377 {
378 struct mtd_concat *concat = CONCAT(mtd);
379 struct mtd_info *subdev;
380 int i, err;
381 u_int32_t length, offset = 0;
382 struct erase_info *erase;
384 if (!(mtd->flags & MTD_WRITEABLE))
385 return -EROFS;
387 if (instr->addr > concat->mtd.size)
388 return -EINVAL;
390 if (instr->len + instr->addr > concat->mtd.size)
391 return -EINVAL;
393 /*
394 * Check for proper erase block alignment of the to-be-erased area.
395 * It is easier to do this based on the super device's erase
396 * region info rather than looking at each particular sub-device
397 * in turn.
398 */
399 if (!concat->mtd.numeraseregions) {
400 /* the easy case: device has uniform erase block size */
401 if (instr->addr & (concat->mtd.erasesize - 1))
402 return -EINVAL;
403 if (instr->len & (concat->mtd.erasesize - 1))
404 return -EINVAL;
405 } else {
406 /* device has variable erase size */
407 struct mtd_erase_region_info *erase_regions =
408 concat->mtd.eraseregions;
410 /*
411 * Find the erase region where the to-be-erased area begins:
412 */
413 for (i = 0; i < concat->mtd.numeraseregions &&
414 instr->addr >= erase_regions[i].offset; i++) ;
415 --i;
417 /*
418 * Now erase_regions[i] is the region in which the
419 * to-be-erased area begins. Verify that the starting
420 * offset is aligned to this region's erase size:
421 */
422 if (instr->addr & (erase_regions[i].erasesize - 1))
423 return -EINVAL;
425 /*
426 * now find the erase region where the to-be-erased area ends:
427 */
428 for (; i < concat->mtd.numeraseregions &&
429 (instr->addr + instr->len) >= erase_regions[i].offset;
430 ++i) ;
431 --i;
432 /*
433 * check if the ending offset is aligned to this region's erase size
434 */
435 if ((instr->addr + instr->len) & (erase_regions[i].erasesize -
436 1))
437 return -EINVAL;
438 }
440 instr->fail_addr = 0xffffffff;
442 /* make a local copy of instr to avoid modifying the caller's struct */
443 erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
445 if (!erase)
446 return -ENOMEM;
448 *erase = *instr;
449 length = instr->len;
451 /*
452 * find the subdevice where the to-be-erased area begins, adjust
453 * starting offset to be relative to the subdevice start
454 */
455 for (i = 0; i < concat->num_subdev; i++) {
456 subdev = concat->subdev[i];
457 if (subdev->size <= erase->addr) {
458 erase->addr -= subdev->size;
459 offset += subdev->size;
460 } else {
461 break;
462 }
463 }
465 /* must never happen since size limit has been verified above */
466 BUG_ON(i >= concat->num_subdev);
468 /* now do the erase: */
469 err = 0;
470 for (; length > 0; i++) {
471 /* loop for all subdevices affected by this request */
472 subdev = concat->subdev[i]; /* get current subdevice */
474 /* limit length to subdevice's size: */
475 if (erase->addr + length > subdev->size)
476 erase->len = subdev->size - erase->addr;
477 else
478 erase->len = length;
480 if (!(subdev->flags & MTD_WRITEABLE)) {
481 err = -EROFS;
482 break;
483 }
484 length -= erase->len;
485 if ((err = concat_dev_erase(subdev, erase))) {
486 /* sanity check: should never happen since
487 * block alignment has been checked above */
488 BUG_ON(err == -EINVAL);
489 if (erase->fail_addr != 0xffffffff)
490 instr->fail_addr = erase->fail_addr + offset;
491 break;
492 }
493 /*
494 * erase->addr specifies the offset of the area to be
495 * erased *within the current subdevice*. It can be
496 * non-zero only the first time through this loop, i.e.
497 * for the first subdevice where blocks need to be erased.
498 * All the following erases must begin at the start of the
499 * current subdevice, i.e. at offset zero.
500 */
501 erase->addr = 0;
502 offset += subdev->size;
503 }
504 instr->state = erase->state;
505 kfree(erase);
506 if (err)
507 return err;
509 if (instr->callback)
510 instr->callback(instr);
511 return 0;
512 }
514 static int concat_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
515 {
516 struct mtd_concat *concat = CONCAT(mtd);
517 int i, err = -EINVAL;
519 if ((len + ofs) > mtd->size)
520 return -EINVAL;
522 for (i = 0; i < concat->num_subdev; i++) {
523 struct mtd_info *subdev = concat->subdev[i];
524 size_t size;
526 if (ofs >= subdev->size) {
527 size = 0;
528 ofs -= subdev->size;
529 continue;
530 }
531 if (ofs + len > subdev->size)
532 size = subdev->size - ofs;
533 else
534 size = len;
536 err = subdev->lock(subdev, ofs, size);
538 if (err)
539 break;
541 len -= size;
542 if (len == 0)
543 break;
545 err = -EINVAL;
546 ofs = 0;
547 }
549 return err;
550 }
552 static int concat_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
553 {
554 struct mtd_concat *concat = CONCAT(mtd);
555 int i, err = 0;
557 if ((len + ofs) > mtd->size)
558 return -EINVAL;
560 for (i = 0; i < concat->num_subdev; i++) {
561 struct mtd_info *subdev = concat->subdev[i];
562 size_t size;
564 if (ofs >= subdev->size) {
565 size = 0;
566 ofs -= subdev->size;
567 continue;
568 }
569 if (ofs + len > subdev->size)
570 size = subdev->size - ofs;
571 else
572 size = len;
574 err = subdev->unlock(subdev, ofs, size);
576 if (err)
577 break;
579 len -= size;
580 if (len == 0)
581 break;
583 err = -EINVAL;
584 ofs = 0;
585 }
587 return err;
588 }
590 static void concat_sync(struct mtd_info *mtd)
591 {
592 struct mtd_concat *concat = CONCAT(mtd);
593 int i;
595 for (i = 0; i < concat->num_subdev; i++) {
596 struct mtd_info *subdev = concat->subdev[i];
597 subdev->sync(subdev);
598 }
599 }
601 static int concat_suspend(struct mtd_info *mtd)
602 {
603 struct mtd_concat *concat = CONCAT(mtd);
604 int i, rc = 0;
606 for (i = 0; i < concat->num_subdev; i++) {
607 struct mtd_info *subdev = concat->subdev[i];
608 if ((rc = subdev->suspend(subdev)) < 0)
609 return rc;
610 }
611 return rc;
612 }
614 static void concat_resume(struct mtd_info *mtd)
615 {
616 struct mtd_concat *concat = CONCAT(mtd);
617 int i;
619 for (i = 0; i < concat->num_subdev; i++) {
620 struct mtd_info *subdev = concat->subdev[i];
621 subdev->resume(subdev);
622 }
623 }
625 static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
626 {
627 struct mtd_concat *concat = CONCAT(mtd);
628 int i, res = 0;
630 if (!concat->subdev[0]->block_isbad)
631 return res;
633 if (ofs > mtd->size)
634 return -EINVAL;
636 for (i = 0; i < concat->num_subdev; i++) {
637 struct mtd_info *subdev = concat->subdev[i];
639 if (ofs >= subdev->size) {
640 ofs -= subdev->size;
641 continue;
642 }
644 res = subdev->block_isbad(subdev, ofs);
645 break;
646 }
648 return res;
649 }
651 static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
652 {
653 struct mtd_concat *concat = CONCAT(mtd);
654 int i, err = -EINVAL;
656 if (!concat->subdev[0]->block_markbad)
657 return 0;
659 if (ofs > mtd->size)
660 return -EINVAL;
662 for (i = 0; i < concat->num_subdev; i++) {
663 struct mtd_info *subdev = concat->subdev[i];
665 if (ofs >= subdev->size) {
666 ofs -= subdev->size;
667 continue;
668 }
670 err = subdev->block_markbad(subdev, ofs);
671 if (!err)
672 mtd->ecc_stats.badblocks++;
673 break;
674 }
676 return err;
677 }
679 /*
680 * This function constructs a virtual MTD device by concatenating
681 * num_devs MTD devices. A pointer to the new device object is
682 * stored to *new_dev upon success. This function does _not_
683 * register any devices: this is the caller's responsibility.
684 */
685 struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to concatenate */
686 int num_devs, /* number of subdevices */
687 char *name)
688 { /* name for the new device */
689 int i;
690 size_t size;
691 struct mtd_concat *concat;
692 u_int32_t max_erasesize, curr_erasesize;
693 int num_erase_region;
695 printk(KERN_NOTICE "Concatenating MTD devices:\n");
696 for (i = 0; i < num_devs; i++)
697 printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name);
698 printk(KERN_NOTICE "into device \"%s\"\n", name);
700 /* allocate the device structure */
701 size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
702 concat = kmalloc(size, GFP_KERNEL);
703 if (!concat) {
704 printk
705 ("memory allocation error while creating concatenated device \"%s\"\n",
706 name);
707 return NULL;
708 }
709 memset(concat, 0, size);
710 concat->subdev = (struct mtd_info **) (concat + 1);
712 /*
713 * Set up the new "super" device's MTD object structure, check for
714 * incompatibilites between the subdevices.
715 */
716 concat->mtd.type = subdev[0]->type;
717 concat->mtd.flags = subdev[0]->flags;
718 concat->mtd.size = subdev[0]->size;
719 concat->mtd.erasesize = subdev[0]->erasesize;
720 concat->mtd.writesize = subdev[0]->writesize;
721 concat->mtd.oobsize = subdev[0]->oobsize;
722 concat->mtd.ecctype = subdev[0]->ecctype;
723 concat->mtd.eccsize = subdev[0]->eccsize;
724 if (subdev[0]->writev)
725 concat->mtd.writev = concat_writev;
726 if (subdev[0]->read_oob)
727 concat->mtd.read_oob = concat_read_oob;
728 if (subdev[0]->write_oob)
729 concat->mtd.write_oob = concat_write_oob;
730 if (subdev[0]->block_isbad)
731 concat->mtd.block_isbad = concat_block_isbad;
732 if (subdev[0]->block_markbad)
733 concat->mtd.block_markbad = concat_block_markbad;
735 concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
737 concat->subdev[0] = subdev[0];
739 for (i = 1; i < num_devs; i++) {
740 if (concat->mtd.type != subdev[i]->type) {
741 kfree(concat);
742 printk("Incompatible device type on \"%s\"\n",
743 subdev[i]->name);
744 return NULL;
745 }
746 if (concat->mtd.flags != subdev[i]->flags) {
747 /*
748 * Expect all flags except MTD_WRITEABLE to be
749 * equal on all subdevices.
750 */
751 if ((concat->mtd.flags ^ subdev[i]->
752 flags) & ~MTD_WRITEABLE) {
753 kfree(concat);
754 printk("Incompatible device flags on \"%s\"\n",
755 subdev[i]->name);
756 return NULL;
757 } else
758 /* if writeable attribute differs,
759 make super device writeable */
760 concat->mtd.flags |=
761 subdev[i]->flags & MTD_WRITEABLE;
762 }
763 concat->mtd.size += subdev[i]->size;
764 concat->mtd.ecc_stats.badblocks +=
765 subdev[i]->ecc_stats.badblocks;
766 if (concat->mtd.writesize != subdev[i]->writesize ||
767 concat->mtd.oobsize != subdev[i]->oobsize ||
768 concat->mtd.ecctype != subdev[i]->ecctype ||
769 concat->mtd.eccsize != subdev[i]->eccsize ||
770 !concat->mtd.read_oob != !subdev[i]->read_oob ||
771 !concat->mtd.write_oob != !subdev[i]->write_oob) {
772 kfree(concat);
773 printk("Incompatible OOB or ECC data on \"%s\"\n",
774 subdev[i]->name);
775 return NULL;
776 }
777 concat->subdev[i] = subdev[i];
779 }
781 concat->mtd.ecclayout = subdev[0]->ecclayout;
783 concat->num_subdev = num_devs;
784 concat->mtd.name = name;
786 concat->mtd.erase = concat_erase;
787 concat->mtd.read = concat_read;
788 concat->mtd.write = concat_write;
789 concat->mtd.sync = concat_sync;
790 concat->mtd.lock = concat_lock;
791 concat->mtd.unlock = concat_unlock;
792 concat->mtd.suspend = concat_suspend;
793 concat->mtd.resume = concat_resume;
795 /*
796 * Combine the erase block size info of the subdevices:
797 *
798 * first, walk the map of the new device and see how
799 * many changes in erase size we have
800 */
801 max_erasesize = curr_erasesize = subdev[0]->erasesize;
802 num_erase_region = 1;
803 for (i = 0; i < num_devs; i++) {
804 if (subdev[i]->numeraseregions == 0) {
805 /* current subdevice has uniform erase size */
806 if (subdev[i]->erasesize != curr_erasesize) {
807 /* if it differs from the last subdevice's erase size, count it */
808 ++num_erase_region;
809 curr_erasesize = subdev[i]->erasesize;
810 if (curr_erasesize > max_erasesize)
811 max_erasesize = curr_erasesize;
812 }
813 } else {
814 /* current subdevice has variable erase size */
815 int j;
816 for (j = 0; j < subdev[i]->numeraseregions; j++) {
818 /* walk the list of erase regions, count any changes */
819 if (subdev[i]->eraseregions[j].erasesize !=
820 curr_erasesize) {
821 ++num_erase_region;
822 curr_erasesize =
823 subdev[i]->eraseregions[j].
824 erasesize;
825 if (curr_erasesize > max_erasesize)
826 max_erasesize = curr_erasesize;
827 }
828 }
829 }
830 }
832 if (num_erase_region == 1) {
833 /*
834 * All subdevices have the same uniform erase size.
835 * This is easy:
836 */
837 concat->mtd.erasesize = curr_erasesize;
838 concat->mtd.numeraseregions = 0;
839 } else {
840 /*
841 * erase block size varies across the subdevices: allocate
842 * space to store the data describing the variable erase regions
843 */
844 struct mtd_erase_region_info *erase_region_p;
845 u_int32_t begin, position;
847 concat->mtd.erasesize = max_erasesize;
848 concat->mtd.numeraseregions = num_erase_region;
849 concat->mtd.eraseregions = erase_region_p =
850 kmalloc(num_erase_region *
851 sizeof (struct mtd_erase_region_info), GFP_KERNEL);
852 if (!erase_region_p) {
853 kfree(concat);
854 printk
855 ("memory allocation error while creating erase region list"
856 " for device \"%s\"\n", name);
857 return NULL;
858 }
860 /*
861 * walk the map of the new device once more and fill in
862 * in erase region info:
863 */
864 curr_erasesize = subdev[0]->erasesize;
865 begin = position = 0;
866 for (i = 0; i < num_devs; i++) {
867 if (subdev[i]->numeraseregions == 0) {
868 /* current subdevice has uniform erase size */
869 if (subdev[i]->erasesize != curr_erasesize) {
870 /*
871 * fill in an mtd_erase_region_info structure for the area
872 * we have walked so far:
873 */
874 erase_region_p->offset = begin;
875 erase_region_p->erasesize =
876 curr_erasesize;
877 erase_region_p->numblocks =
878 (position - begin) / curr_erasesize;
879 begin = position;
881 curr_erasesize = subdev[i]->erasesize;
882 ++erase_region_p;
883 }
884 position += subdev[i]->size;
885 } else {
886 /* current subdevice has variable erase size */
887 int j;
888 for (j = 0; j < subdev[i]->numeraseregions; j++) {
889 /* walk the list of erase regions, count any changes */
890 if (subdev[i]->eraseregions[j].
891 erasesize != curr_erasesize) {
892 erase_region_p->offset = begin;
893 erase_region_p->erasesize =
894 curr_erasesize;
895 erase_region_p->numblocks =
896 (position -
897 begin) / curr_erasesize;
898 begin = position;
900 curr_erasesize =
901 subdev[i]->eraseregions[j].
902 erasesize;
903 ++erase_region_p;
904 }
905 position +=
906 subdev[i]->eraseregions[j].
907 numblocks * curr_erasesize;
908 }
909 }
910 }
911 /* Now write the final entry */
912 erase_region_p->offset = begin;
913 erase_region_p->erasesize = curr_erasesize;
914 erase_region_p->numblocks = (position - begin) / curr_erasesize;
915 }
917 return &concat->mtd;
918 }
920 /*
921 * This function destroys an MTD object obtained from concat_mtd_devs()
922 */
924 void mtd_concat_destroy(struct mtd_info *mtd)
925 {
926 struct mtd_concat *concat = CONCAT(mtd);
927 if (concat->mtd.numeraseregions)
928 kfree(concat->mtd.eraseregions);
929 kfree(concat);
930 }
932 EXPORT_SYMBOL(mtd_concat_create);
933 EXPORT_SYMBOL(mtd_concat_destroy);
935 MODULE_LICENSE("GPL");
936 MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>");
937 MODULE_DESCRIPTION("Generic support for concatenating of MTD devices");