ia64/linux-2.6.18-xen.hg

view drivers/mtd/rfd_ftl.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /*
2 * rfd_ftl.c -- resident flash disk (flash translation layer)
3 *
4 * Copyright (C) 2005 Sean Young <sean@mess.org>
5 *
6 * $Id: rfd_ftl.c,v 1.8 2006/01/15 12:51:44 sean Exp $
7 *
8 * This type of flash translation layer (FTL) is used by the Embedded BIOS
9 * by General Software. It is known as the Resident Flash Disk (RFD), see:
10 *
11 * http://www.gensw.com/pages/prod/bios/rfd.htm
12 *
13 * based on ftl.c
14 */
16 #include <linux/hdreg.h>
17 #include <linux/init.h>
18 #include <linux/mtd/blktrans.h>
19 #include <linux/mtd/mtd.h>
20 #include <linux/vmalloc.h>
21 #include <linux/slab.h>
22 #include <linux/jiffies.h>
24 #include <asm/types.h>
26 #define const_cpu_to_le16 __constant_cpu_to_le16
28 static int block_size = 0;
29 module_param(block_size, int, 0);
30 MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit size");
32 #define PREFIX "rfd_ftl: "
34 /* This major has been assigned by device@lanana.org */
35 #ifndef RFD_FTL_MAJOR
36 #define RFD_FTL_MAJOR 256
37 #endif
39 /* Maximum number of partitions in an FTL region */
40 #define PART_BITS 4
42 /* An erase unit should start with this value */
43 #define RFD_MAGIC 0x9193
45 /* the second value is 0xffff or 0xffc8; function unknown */
47 /* the third value is always 0xffff, ignored */
49 /* next is an array of mapping for each corresponding sector */
50 #define HEADER_MAP_OFFSET 3
51 #define SECTOR_DELETED 0x0000
52 #define SECTOR_ZERO 0xfffe
53 #define SECTOR_FREE 0xffff
55 #define SECTOR_SIZE 512
57 #define SECTORS_PER_TRACK 63
59 struct block {
60 enum {
61 BLOCK_OK,
62 BLOCK_ERASING,
63 BLOCK_ERASED,
64 BLOCK_UNUSED,
65 BLOCK_FAILED
66 } state;
67 int free_sectors;
68 int used_sectors;
69 int erases;
70 u_long offset;
71 };
73 struct partition {
74 struct mtd_blktrans_dev mbd;
76 u_int block_size; /* size of erase unit */
77 u_int total_blocks; /* number of erase units */
78 u_int header_sectors_per_block; /* header sectors in erase unit */
79 u_int data_sectors_per_block; /* data sectors in erase unit */
80 u_int sector_count; /* sectors in translated disk */
81 u_int header_size; /* bytes in header sector */
82 int reserved_block; /* block next up for reclaim */
83 int current_block; /* block to write to */
84 u16 *header_cache; /* cached header */
86 int is_reclaiming;
87 int cylinders;
88 int errors;
89 u_long *sector_map;
90 struct block *blocks;
91 };
93 static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf);
95 static int build_block_map(struct partition *part, int block_no)
96 {
97 struct block *block = &part->blocks[block_no];
98 int i;
100 block->offset = part->block_size * block_no;
102 if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) {
103 block->state = BLOCK_UNUSED;
104 return -ENOENT;
105 }
107 block->state = BLOCK_OK;
109 for (i=0; i<part->data_sectors_per_block; i++) {
110 u16 entry;
112 entry = le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]);
114 if (entry == SECTOR_DELETED)
115 continue;
117 if (entry == SECTOR_FREE) {
118 block->free_sectors++;
119 continue;
120 }
122 if (entry == SECTOR_ZERO)
123 entry = 0;
125 if (entry >= part->sector_count) {
126 printk(KERN_WARNING PREFIX
127 "'%s': unit #%d: entry %d corrupt, "
128 "sector %d out of range\n",
129 part->mbd.mtd->name, block_no, i, entry);
130 continue;
131 }
133 if (part->sector_map[entry] != -1) {
134 printk(KERN_WARNING PREFIX
135 "'%s': more than one entry for sector %d\n",
136 part->mbd.mtd->name, entry);
137 part->errors = 1;
138 continue;
139 }
141 part->sector_map[entry] = block->offset +
142 (i + part->header_sectors_per_block) * SECTOR_SIZE;
144 block->used_sectors++;
145 }
147 if (block->free_sectors == part->data_sectors_per_block)
148 part->reserved_block = block_no;
150 return 0;
151 }
153 static int scan_header(struct partition *part)
154 {
155 int sectors_per_block;
156 int i, rc = -ENOMEM;
157 int blocks_found;
158 size_t retlen;
160 sectors_per_block = part->block_size / SECTOR_SIZE;
161 part->total_blocks = part->mbd.mtd->size / part->block_size;
163 if (part->total_blocks < 2)
164 return -ENOENT;
166 /* each erase block has three bytes header, followed by the map */
167 part->header_sectors_per_block =
168 ((HEADER_MAP_OFFSET + sectors_per_block) *
169 sizeof(u16) + SECTOR_SIZE - 1) / SECTOR_SIZE;
171 part->data_sectors_per_block = sectors_per_block -
172 part->header_sectors_per_block;
174 part->header_size = (HEADER_MAP_OFFSET +
175 part->data_sectors_per_block) * sizeof(u16);
177 part->cylinders = (part->data_sectors_per_block *
178 (part->total_blocks - 1) - 1) / SECTORS_PER_TRACK;
180 part->sector_count = part->cylinders * SECTORS_PER_TRACK;
182 part->current_block = -1;
183 part->reserved_block = -1;
184 part->is_reclaiming = 0;
186 part->header_cache = kmalloc(part->header_size, GFP_KERNEL);
187 if (!part->header_cache)
188 goto err;
190 part->blocks = kcalloc(part->total_blocks, sizeof(struct block),
191 GFP_KERNEL);
192 if (!part->blocks)
193 goto err;
195 part->sector_map = vmalloc(part->sector_count * sizeof(u_long));
196 if (!part->sector_map) {
197 printk(KERN_ERR PREFIX "'%s': unable to allocate memory for "
198 "sector map", part->mbd.mtd->name);
199 goto err;
200 }
202 for (i=0; i<part->sector_count; i++)
203 part->sector_map[i] = -1;
205 for (i=0, blocks_found=0; i<part->total_blocks; i++) {
206 rc = part->mbd.mtd->read(part->mbd.mtd,
207 i * part->block_size, part->header_size,
208 &retlen, (u_char*)part->header_cache);
210 if (!rc && retlen != part->header_size)
211 rc = -EIO;
213 if (rc)
214 goto err;
216 if (!build_block_map(part, i))
217 blocks_found++;
218 }
220 if (blocks_found == 0) {
221 printk(KERN_NOTICE PREFIX "no RFD magic found in '%s'\n",
222 part->mbd.mtd->name);
223 rc = -ENOENT;
224 goto err;
225 }
227 if (part->reserved_block == -1) {
228 printk(KERN_WARNING PREFIX "'%s': no empty erase unit found\n",
229 part->mbd.mtd->name);
231 part->errors = 1;
232 }
234 return 0;
236 err:
237 vfree(part->sector_map);
238 kfree(part->header_cache);
239 kfree(part->blocks);
241 return rc;
242 }
244 static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
245 {
246 struct partition *part = (struct partition*)dev;
247 u_long addr;
248 size_t retlen;
249 int rc;
251 if (sector >= part->sector_count)
252 return -EIO;
254 addr = part->sector_map[sector];
255 if (addr != -1) {
256 rc = part->mbd.mtd->read(part->mbd.mtd, addr, SECTOR_SIZE,
257 &retlen, (u_char*)buf);
258 if (!rc && retlen != SECTOR_SIZE)
259 rc = -EIO;
261 if (rc) {
262 printk(KERN_WARNING PREFIX "error reading '%s' at "
263 "0x%lx\n", part->mbd.mtd->name, addr);
264 return rc;
265 }
266 } else
267 memset(buf, 0, SECTOR_SIZE);
269 return 0;
270 }
272 static void erase_callback(struct erase_info *erase)
273 {
274 struct partition *part;
275 u16 magic;
276 int i, rc;
277 size_t retlen;
279 part = (struct partition*)erase->priv;
281 i = erase->addr / part->block_size;
282 if (i >= part->total_blocks || part->blocks[i].offset != erase->addr) {
283 printk(KERN_ERR PREFIX "erase callback for unknown offset %x "
284 "on '%s'\n", erase->addr, part->mbd.mtd->name);
285 return;
286 }
288 if (erase->state != MTD_ERASE_DONE) {
289 printk(KERN_WARNING PREFIX "erase failed at 0x%x on '%s', "
290 "state %d\n", erase->addr,
291 part->mbd.mtd->name, erase->state);
293 part->blocks[i].state = BLOCK_FAILED;
294 part->blocks[i].free_sectors = 0;
295 part->blocks[i].used_sectors = 0;
297 kfree(erase);
299 return;
300 }
302 magic = const_cpu_to_le16(RFD_MAGIC);
304 part->blocks[i].state = BLOCK_ERASED;
305 part->blocks[i].free_sectors = part->data_sectors_per_block;
306 part->blocks[i].used_sectors = 0;
307 part->blocks[i].erases++;
309 rc = part->mbd.mtd->write(part->mbd.mtd,
310 part->blocks[i].offset, sizeof(magic), &retlen,
311 (u_char*)&magic);
313 if (!rc && retlen != sizeof(magic))
314 rc = -EIO;
316 if (rc) {
317 printk(KERN_ERR PREFIX "'%s': unable to write RFD "
318 "header at 0x%lx\n",
319 part->mbd.mtd->name,
320 part->blocks[i].offset);
321 part->blocks[i].state = BLOCK_FAILED;
322 }
323 else
324 part->blocks[i].state = BLOCK_OK;
326 kfree(erase);
327 }
329 static int erase_block(struct partition *part, int block)
330 {
331 struct erase_info *erase;
332 int rc = -ENOMEM;
334 erase = kmalloc(sizeof(struct erase_info), GFP_KERNEL);
335 if (!erase)
336 goto err;
338 erase->mtd = part->mbd.mtd;
339 erase->callback = erase_callback;
340 erase->addr = part->blocks[block].offset;
341 erase->len = part->block_size;
342 erase->priv = (u_long)part;
344 part->blocks[block].state = BLOCK_ERASING;
345 part->blocks[block].free_sectors = 0;
347 rc = part->mbd.mtd->erase(part->mbd.mtd, erase);
349 if (rc) {
350 printk(KERN_ERR PREFIX "erase of region %x,%x on '%s' "
351 "failed\n", erase->addr, erase->len,
352 part->mbd.mtd->name);
353 kfree(erase);
354 }
356 err:
357 return rc;
358 }
360 static int move_block_contents(struct partition *part, int block_no, u_long *old_sector)
361 {
362 void *sector_data;
363 u16 *map;
364 size_t retlen;
365 int i, rc = -ENOMEM;
367 part->is_reclaiming = 1;
369 sector_data = kmalloc(SECTOR_SIZE, GFP_KERNEL);
370 if (!sector_data)
371 goto err3;
373 map = kmalloc(part->header_size, GFP_KERNEL);
374 if (!map)
375 goto err2;
377 rc = part->mbd.mtd->read(part->mbd.mtd,
378 part->blocks[block_no].offset, part->header_size,
379 &retlen, (u_char*)map);
381 if (!rc && retlen != part->header_size)
382 rc = -EIO;
384 if (rc) {
385 printk(KERN_ERR PREFIX "error reading '%s' at "
386 "0x%lx\n", part->mbd.mtd->name,
387 part->blocks[block_no].offset);
389 goto err;
390 }
392 for (i=0; i<part->data_sectors_per_block; i++) {
393 u16 entry = le16_to_cpu(map[HEADER_MAP_OFFSET + i]);
394 u_long addr;
397 if (entry == SECTOR_FREE || entry == SECTOR_DELETED)
398 continue;
400 if (entry == SECTOR_ZERO)
401 entry = 0;
403 /* already warned about and ignored in build_block_map() */
404 if (entry >= part->sector_count)
405 continue;
407 addr = part->blocks[block_no].offset +
408 (i + part->header_sectors_per_block) * SECTOR_SIZE;
410 if (*old_sector == addr) {
411 *old_sector = -1;
412 if (!part->blocks[block_no].used_sectors--) {
413 rc = erase_block(part, block_no);
414 break;
415 }
416 continue;
417 }
418 rc = part->mbd.mtd->read(part->mbd.mtd, addr,
419 SECTOR_SIZE, &retlen, sector_data);
421 if (!rc && retlen != SECTOR_SIZE)
422 rc = -EIO;
424 if (rc) {
425 printk(KERN_ERR PREFIX "'%s': Unable to "
426 "read sector for relocation\n",
427 part->mbd.mtd->name);
429 goto err;
430 }
432 rc = rfd_ftl_writesect((struct mtd_blktrans_dev*)part,
433 entry, sector_data);
435 if (rc)
436 goto err;
437 }
439 err:
440 kfree(map);
441 err2:
442 kfree(sector_data);
443 err3:
444 part->is_reclaiming = 0;
446 return rc;
447 }
449 static int reclaim_block(struct partition *part, u_long *old_sector)
450 {
451 int block, best_block, score, old_sector_block;
452 int rc;
454 /* we have a race if sync doesn't exist */
455 if (part->mbd.mtd->sync)
456 part->mbd.mtd->sync(part->mbd.mtd);
458 score = 0x7fffffff; /* MAX_INT */
459 best_block = -1;
460 if (*old_sector != -1)
461 old_sector_block = *old_sector / part->block_size;
462 else
463 old_sector_block = -1;
465 for (block=0; block<part->total_blocks; block++) {
466 int this_score;
468 if (block == part->reserved_block)
469 continue;
471 /*
472 * Postpone reclaiming if there is a free sector as
473 * more removed sectors is more efficient (have to move
474 * less).
475 */
476 if (part->blocks[block].free_sectors)
477 return 0;
479 this_score = part->blocks[block].used_sectors;
481 if (block == old_sector_block)
482 this_score--;
483 else {
484 /* no point in moving a full block */
485 if (part->blocks[block].used_sectors ==
486 part->data_sectors_per_block)
487 continue;
488 }
490 this_score += part->blocks[block].erases;
492 if (this_score < score) {
493 best_block = block;
494 score = this_score;
495 }
496 }
498 if (best_block == -1)
499 return -ENOSPC;
501 part->current_block = -1;
502 part->reserved_block = best_block;
504 pr_debug("reclaim_block: reclaiming block #%d with %d used "
505 "%d free sectors\n", best_block,
506 part->blocks[best_block].used_sectors,
507 part->blocks[best_block].free_sectors);
509 if (part->blocks[best_block].used_sectors)
510 rc = move_block_contents(part, best_block, old_sector);
511 else
512 rc = erase_block(part, best_block);
514 return rc;
515 }
517 /*
518 * IMPROVE: It would be best to choose the block with the most deleted sectors,
519 * because if we fill that one up first it'll have the most chance of having
520 * the least live sectors at reclaim.
521 */
522 static int find_free_block(struct partition *part)
523 {
524 int block, stop;
526 block = part->current_block == -1 ?
527 jiffies % part->total_blocks : part->current_block;
528 stop = block;
530 do {
531 if (part->blocks[block].free_sectors &&
532 block != part->reserved_block)
533 return block;
535 if (part->blocks[block].state == BLOCK_UNUSED)
536 erase_block(part, block);
538 if (++block >= part->total_blocks)
539 block = 0;
541 } while (block != stop);
543 return -1;
544 }
546 static int find_writable_block(struct partition *part, u_long *old_sector)
547 {
548 int rc, block;
549 size_t retlen;
551 block = find_free_block(part);
553 if (block == -1) {
554 if (!part->is_reclaiming) {
555 rc = reclaim_block(part, old_sector);
556 if (rc)
557 goto err;
559 block = find_free_block(part);
560 }
562 if (block == -1) {
563 rc = -ENOSPC;
564 goto err;
565 }
566 }
568 rc = part->mbd.mtd->read(part->mbd.mtd, part->blocks[block].offset,
569 part->header_size, &retlen, (u_char*)part->header_cache);
571 if (!rc && retlen != part->header_size)
572 rc = -EIO;
574 if (rc) {
575 printk(KERN_ERR PREFIX "'%s': unable to read header at "
576 "0x%lx\n", part->mbd.mtd->name,
577 part->blocks[block].offset);
578 goto err;
579 }
581 part->current_block = block;
583 err:
584 return rc;
585 }
587 static int mark_sector_deleted(struct partition *part, u_long old_addr)
588 {
589 int block, offset, rc;
590 u_long addr;
591 size_t retlen;
592 u16 del = const_cpu_to_le16(SECTOR_DELETED);
594 block = old_addr / part->block_size;
595 offset = (old_addr % part->block_size) / SECTOR_SIZE -
596 part->header_sectors_per_block;
598 addr = part->blocks[block].offset +
599 (HEADER_MAP_OFFSET + offset) * sizeof(u16);
600 rc = part->mbd.mtd->write(part->mbd.mtd, addr,
601 sizeof(del), &retlen, (u_char*)&del);
603 if (!rc && retlen != sizeof(del))
604 rc = -EIO;
606 if (rc) {
607 printk(KERN_ERR PREFIX "error writing '%s' at "
608 "0x%lx\n", part->mbd.mtd->name, addr);
609 if (rc)
610 goto err;
611 }
612 if (block == part->current_block)
613 part->header_cache[offset + HEADER_MAP_OFFSET] = del;
615 part->blocks[block].used_sectors--;
617 if (!part->blocks[block].used_sectors &&
618 !part->blocks[block].free_sectors)
619 rc = erase_block(part, block);
621 err:
622 return rc;
623 }
625 static int find_free_sector(const struct partition *part, const struct block *block)
626 {
627 int i, stop;
629 i = stop = part->data_sectors_per_block - block->free_sectors;
631 do {
632 if (le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i])
633 == SECTOR_FREE)
634 return i;
636 if (++i == part->data_sectors_per_block)
637 i = 0;
638 }
639 while(i != stop);
641 return -1;
642 }
644 static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf, ulong *old_addr)
645 {
646 struct partition *part = (struct partition*)dev;
647 struct block *block;
648 u_long addr;
649 int i;
650 int rc;
651 size_t retlen;
652 u16 entry;
654 if (part->current_block == -1 ||
655 !part->blocks[part->current_block].free_sectors) {
657 rc = find_writable_block(part, old_addr);
658 if (rc)
659 goto err;
660 }
662 block = &part->blocks[part->current_block];
664 i = find_free_sector(part, block);
666 if (i < 0) {
667 rc = -ENOSPC;
668 goto err;
669 }
671 addr = (i + part->header_sectors_per_block) * SECTOR_SIZE +
672 block->offset;
673 rc = part->mbd.mtd->write(part->mbd.mtd,
674 addr, SECTOR_SIZE, &retlen, (u_char*)buf);
676 if (!rc && retlen != SECTOR_SIZE)
677 rc = -EIO;
679 if (rc) {
680 printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
681 part->mbd.mtd->name, addr);
682 if (rc)
683 goto err;
684 }
686 part->sector_map[sector] = addr;
688 entry = cpu_to_le16(sector == 0 ? SECTOR_ZERO : sector);
690 part->header_cache[i + HEADER_MAP_OFFSET] = entry;
692 addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16);
693 rc = part->mbd.mtd->write(part->mbd.mtd, addr,
694 sizeof(entry), &retlen, (u_char*)&entry);
696 if (!rc && retlen != sizeof(entry))
697 rc = -EIO;
699 if (rc) {
700 printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
701 part->mbd.mtd->name, addr);
702 if (rc)
703 goto err;
704 }
705 block->used_sectors++;
706 block->free_sectors--;
708 err:
709 return rc;
710 }
712 static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
713 {
714 struct partition *part = (struct partition*)dev;
715 u_long old_addr;
716 int i;
717 int rc = 0;
719 pr_debug("rfd_ftl_writesect(sector=0x%lx)\n", sector);
721 if (part->reserved_block == -1) {
722 rc = -EACCES;
723 goto err;
724 }
726 if (sector >= part->sector_count) {
727 rc = -EIO;
728 goto err;
729 }
731 old_addr = part->sector_map[sector];
733 for (i=0; i<SECTOR_SIZE; i++) {
734 if (!buf[i])
735 continue;
737 rc = do_writesect(dev, sector, buf, &old_addr);
738 if (rc)
739 goto err;
740 break;
741 }
743 if (i == SECTOR_SIZE)
744 part->sector_map[sector] = -1;
746 if (old_addr != -1)
747 rc = mark_sector_deleted(part, old_addr);
749 err:
750 return rc;
751 }
753 static int rfd_ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
754 {
755 struct partition *part = (struct partition*)dev;
757 geo->heads = 1;
758 geo->sectors = SECTORS_PER_TRACK;
759 geo->cylinders = part->cylinders;
761 return 0;
762 }
764 static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
765 {
766 struct partition *part;
768 if (mtd->type != MTD_NORFLASH)
769 return;
771 part = kcalloc(1, sizeof(struct partition), GFP_KERNEL);
772 if (!part)
773 return;
775 part->mbd.mtd = mtd;
777 if (block_size)
778 part->block_size = block_size;
779 else {
780 if (!mtd->erasesize) {
781 printk(KERN_WARNING PREFIX "please provide block_size");
782 return;
783 }
784 else
785 part->block_size = mtd->erasesize;
786 }
788 if (scan_header(part) == 0) {
789 part->mbd.size = part->sector_count;
790 part->mbd.blksize = SECTOR_SIZE;
791 part->mbd.tr = tr;
792 part->mbd.devnum = -1;
793 if (!(mtd->flags & MTD_WRITEABLE))
794 part->mbd.readonly = 1;
795 else if (part->errors) {
796 printk(KERN_WARNING PREFIX "'%s': errors found, "
797 "setting read-only\n", mtd->name);
798 part->mbd.readonly = 1;
799 }
801 printk(KERN_INFO PREFIX "name: '%s' type: %d flags %x\n",
802 mtd->name, mtd->type, mtd->flags);
804 if (!add_mtd_blktrans_dev((void*)part))
805 return;
806 }
808 kfree(part);
809 }
811 static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev)
812 {
813 struct partition *part = (struct partition*)dev;
814 int i;
816 for (i=0; i<part->total_blocks; i++) {
817 pr_debug("rfd_ftl_remove_dev:'%s': erase unit #%02d: %d erases\n",
818 part->mbd.mtd->name, i, part->blocks[i].erases);
819 }
821 del_mtd_blktrans_dev(dev);
822 vfree(part->sector_map);
823 kfree(part->header_cache);
824 kfree(part->blocks);
825 kfree(part);
826 }
828 struct mtd_blktrans_ops rfd_ftl_tr = {
829 .name = "rfd",
830 .major = RFD_FTL_MAJOR,
831 .part_bits = PART_BITS,
832 .readsect = rfd_ftl_readsect,
833 .writesect = rfd_ftl_writesect,
834 .getgeo = rfd_ftl_getgeo,
835 .add_mtd = rfd_ftl_add_mtd,
836 .remove_dev = rfd_ftl_remove_dev,
837 .owner = THIS_MODULE,
838 };
840 static int __init init_rfd_ftl(void)
841 {
842 return register_mtd_blktrans(&rfd_ftl_tr);
843 }
845 static void __exit cleanup_rfd_ftl(void)
846 {
847 deregister_mtd_blktrans(&rfd_ftl_tr);
848 }
850 module_init(init_rfd_ftl);
851 module_exit(cleanup_rfd_ftl);
853 MODULE_LICENSE("GPL");
854 MODULE_AUTHOR("Sean Young <sean@mess.org>");
855 MODULE_DESCRIPTION("Support code for RFD Flash Translation Layer, "
856 "used by General Software's Embedded BIOS");