ia64/linux-2.6.18-xen.hg

diff drivers/mtd/rfd_ftl.c @ 0:831230e53067

Import 2.6.18 from kernel.org tarball.
author Ian Campbell <ian.campbell@xensource.com>
date Wed Apr 11 14:15:44 2007 +0100 (2007-04-11)
parents
children
line diff
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/drivers/mtd/rfd_ftl.c	Wed Apr 11 14:15:44 2007 +0100
     1.3 @@ -0,0 +1,857 @@
     1.4 +/*
     1.5 + * rfd_ftl.c -- resident flash disk (flash translation layer)
     1.6 + *
     1.7 + * Copyright (C) 2005  Sean Young <sean@mess.org>
     1.8 + *
     1.9 + * $Id: rfd_ftl.c,v 1.8 2006/01/15 12:51:44 sean Exp $
    1.10 + *
    1.11 + * This type of flash translation layer (FTL) is used by the Embedded BIOS
    1.12 + * by General Software. It is known as the Resident Flash Disk (RFD), see:
    1.13 + *
    1.14 + *	http://www.gensw.com/pages/prod/bios/rfd.htm
    1.15 + *
    1.16 + * based on ftl.c
    1.17 + */
    1.18 +
    1.19 +#include <linux/hdreg.h>
    1.20 +#include <linux/init.h>
    1.21 +#include <linux/mtd/blktrans.h>
    1.22 +#include <linux/mtd/mtd.h>
    1.23 +#include <linux/vmalloc.h>
    1.24 +#include <linux/slab.h>
    1.25 +#include <linux/jiffies.h>
    1.26 +
    1.27 +#include <asm/types.h>
    1.28 +
    1.29 +#define const_cpu_to_le16	__constant_cpu_to_le16
    1.30 +
    1.31 +static int block_size = 0;
    1.32 +module_param(block_size, int, 0);
    1.33 +MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit size");
    1.34 +
    1.35 +#define PREFIX "rfd_ftl: "
    1.36 +
    1.37 +/* This major has been assigned by device@lanana.org */
    1.38 +#ifndef RFD_FTL_MAJOR
    1.39 +#define RFD_FTL_MAJOR		256
    1.40 +#endif
    1.41 +
    1.42 +/* Maximum number of partitions in an FTL region */
    1.43 +#define PART_BITS		4
    1.44 +
    1.45 +/* An erase unit should start with this value */
    1.46 +#define RFD_MAGIC		0x9193
    1.47 +
    1.48 +/* the second value is 0xffff or 0xffc8; function unknown */
    1.49 +
    1.50 +/* the third value is always 0xffff, ignored */
    1.51 +
    1.52 +/* next is an array of mapping for each corresponding sector */
    1.53 +#define HEADER_MAP_OFFSET	3
    1.54 +#define SECTOR_DELETED		0x0000
    1.55 +#define SECTOR_ZERO		0xfffe
    1.56 +#define SECTOR_FREE		0xffff
    1.57 +
    1.58 +#define SECTOR_SIZE		512
    1.59 +
    1.60 +#define SECTORS_PER_TRACK	63
    1.61 +
    1.62 +struct block {
    1.63 +	enum {
    1.64 +		BLOCK_OK,
    1.65 +		BLOCK_ERASING,
    1.66 +		BLOCK_ERASED,
    1.67 +		BLOCK_UNUSED,
    1.68 +		BLOCK_FAILED
    1.69 +	} state;
    1.70 +	int free_sectors;
    1.71 +	int used_sectors;
    1.72 +	int erases;
    1.73 +	u_long offset;
    1.74 +};
    1.75 +
    1.76 +struct partition {
    1.77 +	struct mtd_blktrans_dev mbd;
    1.78 +
    1.79 +	u_int block_size;		/* size of erase unit */
    1.80 +	u_int total_blocks;		/* number of erase units */
    1.81 +	u_int header_sectors_per_block;	/* header sectors in erase unit */
    1.82 +	u_int data_sectors_per_block;	/* data sectors in erase unit */
    1.83 +	u_int sector_count;		/* sectors in translated disk */
    1.84 +	u_int header_size;		/* bytes in header sector */
    1.85 +	int reserved_block;		/* block next up for reclaim */
    1.86 +	int current_block;		/* block to write to */
    1.87 +	u16 *header_cache;		/* cached header */
    1.88 +
    1.89 +	int is_reclaiming;
    1.90 +	int cylinders;
    1.91 +	int errors;
    1.92 +	u_long *sector_map;
    1.93 +	struct block *blocks;
    1.94 +};
    1.95 +
    1.96 +static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf);
    1.97 +
    1.98 +static int build_block_map(struct partition *part, int block_no)
    1.99 +{
   1.100 +	struct block *block = &part->blocks[block_no];
   1.101 +	int i;
   1.102 +
   1.103 +	block->offset = part->block_size * block_no;
   1.104 +
   1.105 +	if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) {
   1.106 +		block->state = BLOCK_UNUSED;
   1.107 +		return -ENOENT;
   1.108 +	}
   1.109 +
   1.110 +	block->state = BLOCK_OK;
   1.111 +
   1.112 +	for (i=0; i<part->data_sectors_per_block; i++) {
   1.113 +		u16 entry;
   1.114 +
   1.115 +		entry = le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]);
   1.116 +
   1.117 +		if (entry == SECTOR_DELETED)
   1.118 +			continue;
   1.119 +
   1.120 +		if (entry == SECTOR_FREE) {
   1.121 +			block->free_sectors++;
   1.122 +			continue;
   1.123 +		}
   1.124 +
   1.125 +		if (entry == SECTOR_ZERO)
   1.126 +			entry = 0;
   1.127 +
   1.128 +		if (entry >= part->sector_count) {
   1.129 +			printk(KERN_WARNING PREFIX
   1.130 +				"'%s': unit #%d: entry %d corrupt, "
   1.131 +				"sector %d out of range\n",
   1.132 +				part->mbd.mtd->name, block_no, i, entry);
   1.133 +			continue;
   1.134 +		}
   1.135 +
   1.136 +		if (part->sector_map[entry] != -1) {
   1.137 +			printk(KERN_WARNING PREFIX
   1.138 +				"'%s': more than one entry for sector %d\n",
   1.139 +				part->mbd.mtd->name, entry);
   1.140 +			part->errors = 1;
   1.141 +			continue;
   1.142 +		}
   1.143 +
   1.144 +		part->sector_map[entry] = block->offset +
   1.145 +			(i + part->header_sectors_per_block) * SECTOR_SIZE;
   1.146 +
   1.147 +		block->used_sectors++;
   1.148 +	}
   1.149 +
   1.150 +	if (block->free_sectors == part->data_sectors_per_block)
   1.151 +		part->reserved_block = block_no;
   1.152 +
   1.153 +	return 0;
   1.154 +}
   1.155 +
   1.156 +static int scan_header(struct partition *part)
   1.157 +{
   1.158 +	int sectors_per_block;
   1.159 +	int i, rc = -ENOMEM;
   1.160 +	int blocks_found;
   1.161 +	size_t retlen;
   1.162 +
   1.163 +	sectors_per_block = part->block_size / SECTOR_SIZE;
   1.164 +	part->total_blocks = part->mbd.mtd->size / part->block_size;
   1.165 +
   1.166 +	if (part->total_blocks < 2)
   1.167 +		return -ENOENT;
   1.168 +
   1.169 +	/* each erase block has three bytes header, followed by the map */
   1.170 +	part->header_sectors_per_block =
   1.171 +			((HEADER_MAP_OFFSET + sectors_per_block) *
   1.172 +			sizeof(u16) + SECTOR_SIZE - 1) / SECTOR_SIZE;
   1.173 +
   1.174 +	part->data_sectors_per_block = sectors_per_block -
   1.175 +			part->header_sectors_per_block;
   1.176 +
   1.177 +	part->header_size = (HEADER_MAP_OFFSET +
   1.178 +			part->data_sectors_per_block) * sizeof(u16);
   1.179 +
   1.180 +	part->cylinders = (part->data_sectors_per_block *
   1.181 +			(part->total_blocks - 1) - 1) / SECTORS_PER_TRACK;
   1.182 +
   1.183 +	part->sector_count = part->cylinders * SECTORS_PER_TRACK;
   1.184 +
   1.185 +	part->current_block = -1;
   1.186 +	part->reserved_block = -1;
   1.187 +	part->is_reclaiming = 0;
   1.188 +
   1.189 +	part->header_cache = kmalloc(part->header_size, GFP_KERNEL);
   1.190 +	if (!part->header_cache)
   1.191 +		goto err;
   1.192 +
   1.193 +	part->blocks = kcalloc(part->total_blocks, sizeof(struct block),
   1.194 +			GFP_KERNEL);
   1.195 +	if (!part->blocks)
   1.196 +		goto err;
   1.197 +
   1.198 +	part->sector_map = vmalloc(part->sector_count * sizeof(u_long));
   1.199 +	if (!part->sector_map) {
   1.200 +		printk(KERN_ERR PREFIX "'%s': unable to allocate memory for "
   1.201 +			"sector map", part->mbd.mtd->name);
   1.202 +		goto err;
   1.203 +	}
   1.204 +
   1.205 +	for (i=0; i<part->sector_count; i++)
   1.206 +		part->sector_map[i] = -1;
   1.207 +
   1.208 +	for (i=0, blocks_found=0; i<part->total_blocks; i++) {
   1.209 +		rc = part->mbd.mtd->read(part->mbd.mtd,
   1.210 +				i * part->block_size, part->header_size,
   1.211 +				&retlen, (u_char*)part->header_cache);
   1.212 +
   1.213 +		if (!rc && retlen != part->header_size)
   1.214 +			rc = -EIO;
   1.215 +
   1.216 +		if (rc)
   1.217 +			goto err;
   1.218 +
   1.219 +		if (!build_block_map(part, i))
   1.220 +			blocks_found++;
   1.221 +	}
   1.222 +
   1.223 +	if (blocks_found == 0) {
   1.224 +		printk(KERN_NOTICE PREFIX "no RFD magic found in '%s'\n",
   1.225 +				part->mbd.mtd->name);
   1.226 +		rc = -ENOENT;
   1.227 +		goto err;
   1.228 +	}
   1.229 +
   1.230 +	if (part->reserved_block == -1) {
   1.231 +		printk(KERN_WARNING PREFIX "'%s': no empty erase unit found\n",
   1.232 +				part->mbd.mtd->name);
   1.233 +
   1.234 +		part->errors = 1;
   1.235 +	}
   1.236 +
   1.237 +	return 0;
   1.238 +
   1.239 +err:
   1.240 +	vfree(part->sector_map);
   1.241 +	kfree(part->header_cache);
   1.242 +	kfree(part->blocks);
   1.243 +
   1.244 +	return rc;
   1.245 +}
   1.246 +
   1.247 +static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
   1.248 +{
   1.249 +	struct partition *part = (struct partition*)dev;
   1.250 +	u_long addr;
   1.251 +	size_t retlen;
   1.252 +	int rc;
   1.253 +
   1.254 +	if (sector >= part->sector_count)
   1.255 +		return -EIO;
   1.256 +
   1.257 +	addr = part->sector_map[sector];
   1.258 +	if (addr != -1) {
   1.259 +		rc = part->mbd.mtd->read(part->mbd.mtd, addr, SECTOR_SIZE,
   1.260 +						&retlen, (u_char*)buf);
   1.261 +		if (!rc && retlen != SECTOR_SIZE)
   1.262 +			rc = -EIO;
   1.263 +
   1.264 +		if (rc) {
   1.265 +			printk(KERN_WARNING PREFIX "error reading '%s' at "
   1.266 +				"0x%lx\n", part->mbd.mtd->name, addr);
   1.267 +			return rc;
   1.268 +		}
   1.269 +	} else
   1.270 +		memset(buf, 0, SECTOR_SIZE);
   1.271 +
   1.272 +	return 0;
   1.273 +}
   1.274 +
   1.275 +static void erase_callback(struct erase_info *erase)
   1.276 +{
   1.277 +	struct partition *part;
   1.278 +	u16 magic;
   1.279 +	int i, rc;
   1.280 +	size_t retlen;
   1.281 +
   1.282 +	part = (struct partition*)erase->priv;
   1.283 +
   1.284 +	i = erase->addr / part->block_size;
   1.285 +	if (i >= part->total_blocks || part->blocks[i].offset != erase->addr) {
   1.286 +		printk(KERN_ERR PREFIX "erase callback for unknown offset %x "
   1.287 +				"on '%s'\n", erase->addr, part->mbd.mtd->name);
   1.288 +		return;
   1.289 +	}
   1.290 +
   1.291 +	if (erase->state != MTD_ERASE_DONE) {
   1.292 +		printk(KERN_WARNING PREFIX "erase failed at 0x%x on '%s', "
   1.293 +				"state %d\n", erase->addr,
   1.294 +				part->mbd.mtd->name, erase->state);
   1.295 +
   1.296 +		part->blocks[i].state = BLOCK_FAILED;
   1.297 +		part->blocks[i].free_sectors = 0;
   1.298 +		part->blocks[i].used_sectors = 0;
   1.299 +
   1.300 +		kfree(erase);
   1.301 +
   1.302 +		return;
   1.303 +	}
   1.304 +
   1.305 +	magic = const_cpu_to_le16(RFD_MAGIC);
   1.306 +
   1.307 +	part->blocks[i].state = BLOCK_ERASED;
   1.308 +	part->blocks[i].free_sectors = part->data_sectors_per_block;
   1.309 +	part->blocks[i].used_sectors = 0;
   1.310 +	part->blocks[i].erases++;
   1.311 +
   1.312 +	rc = part->mbd.mtd->write(part->mbd.mtd,
   1.313 +		part->blocks[i].offset, sizeof(magic), &retlen,
   1.314 +		(u_char*)&magic);
   1.315 +
   1.316 +	if (!rc && retlen != sizeof(magic))
   1.317 +		rc = -EIO;
   1.318 +
   1.319 +	if (rc) {
   1.320 +		printk(KERN_ERR PREFIX "'%s': unable to write RFD "
   1.321 +				"header at 0x%lx\n",
   1.322 +				part->mbd.mtd->name,
   1.323 +				part->blocks[i].offset);
   1.324 +		part->blocks[i].state = BLOCK_FAILED;
   1.325 +	}
   1.326 +	else
   1.327 +		part->blocks[i].state = BLOCK_OK;
   1.328 +
   1.329 +	kfree(erase);
   1.330 +}
   1.331 +
   1.332 +static int erase_block(struct partition *part, int block)
   1.333 +{
   1.334 +	struct erase_info *erase;
   1.335 +	int rc = -ENOMEM;
   1.336 +
   1.337 +	erase = kmalloc(sizeof(struct erase_info), GFP_KERNEL);
   1.338 +	if (!erase)
   1.339 +		goto err;
   1.340 +
   1.341 +	erase->mtd = part->mbd.mtd;
   1.342 +	erase->callback = erase_callback;
   1.343 +	erase->addr = part->blocks[block].offset;
   1.344 +	erase->len = part->block_size;
   1.345 +	erase->priv = (u_long)part;
   1.346 +
   1.347 +	part->blocks[block].state = BLOCK_ERASING;
   1.348 +	part->blocks[block].free_sectors = 0;
   1.349 +
   1.350 +	rc = part->mbd.mtd->erase(part->mbd.mtd, erase);
   1.351 +
   1.352 +	if (rc) {
   1.353 +		printk(KERN_ERR PREFIX "erase of region %x,%x on '%s' "
   1.354 +				"failed\n", erase->addr, erase->len,
   1.355 +				part->mbd.mtd->name);
   1.356 +		kfree(erase);
   1.357 +	}
   1.358 +
   1.359 +err:
   1.360 +	return rc;
   1.361 +}
   1.362 +
   1.363 +static int move_block_contents(struct partition *part, int block_no, u_long *old_sector)
   1.364 +{
   1.365 +	void *sector_data;
   1.366 +	u16 *map;
   1.367 +	size_t retlen;
   1.368 +	int i, rc = -ENOMEM;
   1.369 +
   1.370 +	part->is_reclaiming = 1;
   1.371 +
   1.372 +	sector_data = kmalloc(SECTOR_SIZE, GFP_KERNEL);
   1.373 +	if (!sector_data)
   1.374 +		goto err3;
   1.375 +
   1.376 +	map = kmalloc(part->header_size, GFP_KERNEL);
   1.377 +	if (!map)
   1.378 +		goto err2;
   1.379 +
   1.380 +	rc = part->mbd.mtd->read(part->mbd.mtd,
   1.381 +		part->blocks[block_no].offset, part->header_size,
   1.382 +		&retlen, (u_char*)map);
   1.383 +
   1.384 +	if (!rc && retlen != part->header_size)
   1.385 +		rc = -EIO;
   1.386 +
   1.387 +	if (rc) {
   1.388 +		printk(KERN_ERR PREFIX "error reading '%s' at "
   1.389 +			"0x%lx\n", part->mbd.mtd->name,
   1.390 +			part->blocks[block_no].offset);
   1.391 +
   1.392 +		goto err;
   1.393 +	}
   1.394 +
   1.395 +	for (i=0; i<part->data_sectors_per_block; i++) {
   1.396 +		u16 entry = le16_to_cpu(map[HEADER_MAP_OFFSET + i]);
   1.397 +		u_long addr;
   1.398 +
   1.399 +
   1.400 +		if (entry == SECTOR_FREE || entry == SECTOR_DELETED)
   1.401 +			continue;
   1.402 +
   1.403 +		if (entry == SECTOR_ZERO)
   1.404 +			entry = 0;
   1.405 +
   1.406 +		/* already warned about and ignored in build_block_map() */
   1.407 +		if (entry >= part->sector_count)
   1.408 +			continue;
   1.409 +
   1.410 +		addr = part->blocks[block_no].offset +
   1.411 +			(i + part->header_sectors_per_block) * SECTOR_SIZE;
   1.412 +
   1.413 +		if (*old_sector == addr) {
   1.414 +			*old_sector = -1;
   1.415 +			if (!part->blocks[block_no].used_sectors--) {
   1.416 +				rc = erase_block(part, block_no);
   1.417 +				break;
   1.418 +			}
   1.419 +			continue;
   1.420 +		}
   1.421 +		rc = part->mbd.mtd->read(part->mbd.mtd, addr,
   1.422 +			SECTOR_SIZE, &retlen, sector_data);
   1.423 +
   1.424 +		if (!rc && retlen != SECTOR_SIZE)
   1.425 +			rc = -EIO;
   1.426 +
   1.427 +		if (rc) {
   1.428 +			printk(KERN_ERR PREFIX "'%s': Unable to "
   1.429 +				"read sector for relocation\n",
   1.430 +				part->mbd.mtd->name);
   1.431 +
   1.432 +			goto err;
   1.433 +		}
   1.434 +
   1.435 +		rc = rfd_ftl_writesect((struct mtd_blktrans_dev*)part,
   1.436 +				entry, sector_data);
   1.437 +
   1.438 +		if (rc)
   1.439 +			goto err;
   1.440 +	}
   1.441 +
   1.442 +err:
   1.443 +	kfree(map);
   1.444 +err2:
   1.445 +	kfree(sector_data);
   1.446 +err3:
   1.447 +	part->is_reclaiming = 0;
   1.448 +
   1.449 +	return rc;
   1.450 +}
   1.451 +
   1.452 +static int reclaim_block(struct partition *part, u_long *old_sector)
   1.453 +{
   1.454 +	int block, best_block, score, old_sector_block;
   1.455 +	int rc;
   1.456 +
   1.457 +	/* we have a race if sync doesn't exist */
   1.458 +	if (part->mbd.mtd->sync)
   1.459 +		part->mbd.mtd->sync(part->mbd.mtd);
   1.460 +
   1.461 +	score = 0x7fffffff; /* MAX_INT */
   1.462 +	best_block = -1;
   1.463 +	if (*old_sector != -1)
   1.464 +		old_sector_block = *old_sector / part->block_size;
   1.465 +	else
   1.466 +		old_sector_block = -1;
   1.467 +
   1.468 +	for (block=0; block<part->total_blocks; block++) {
   1.469 +		int this_score;
   1.470 +
   1.471 +		if (block == part->reserved_block)
   1.472 +			continue;
   1.473 +
   1.474 +		/*
   1.475 +		 * Postpone reclaiming if there is a free sector as
   1.476 +		 * more removed sectors is more efficient (have to move
   1.477 +		 * less).
   1.478 +		 */
   1.479 +		if (part->blocks[block].free_sectors)
   1.480 +			return 0;
   1.481 +
   1.482 +		this_score = part->blocks[block].used_sectors;
   1.483 +
   1.484 +		if (block == old_sector_block)
   1.485 +			this_score--;
   1.486 +		else {
   1.487 +			/* no point in moving a full block */
   1.488 +			if (part->blocks[block].used_sectors ==
   1.489 +					part->data_sectors_per_block)
   1.490 +				continue;
   1.491 +		}
   1.492 +
   1.493 +		this_score += part->blocks[block].erases;
   1.494 +
   1.495 +		if (this_score < score) {
   1.496 +			best_block = block;
   1.497 +			score = this_score;
   1.498 +		}
   1.499 +	}
   1.500 +
   1.501 +	if (best_block == -1)
   1.502 +		return -ENOSPC;
   1.503 +
   1.504 +	part->current_block = -1;
   1.505 +	part->reserved_block = best_block;
   1.506 +
   1.507 +	pr_debug("reclaim_block: reclaiming block #%d with %d used "
   1.508 +		 "%d free sectors\n", best_block,
   1.509 +		 part->blocks[best_block].used_sectors,
   1.510 +		 part->blocks[best_block].free_sectors);
   1.511 +
   1.512 +	if (part->blocks[best_block].used_sectors)
   1.513 +		rc = move_block_contents(part, best_block, old_sector);
   1.514 +	else
   1.515 +		rc = erase_block(part, best_block);
   1.516 +
   1.517 +	return rc;
   1.518 +}
   1.519 +
   1.520 +/*
   1.521 + * IMPROVE: It would be best to choose the block with the most deleted sectors,
   1.522 + * because if we fill that one up first it'll have the most chance of having
   1.523 + * the least live sectors at reclaim.
   1.524 + */
   1.525 +static int find_free_block(struct partition *part)
   1.526 +{
   1.527 +	int block, stop;
   1.528 +
   1.529 +	block = part->current_block == -1 ?
   1.530 +			jiffies % part->total_blocks : part->current_block;
   1.531 +	stop = block;
   1.532 +
   1.533 +	do {
   1.534 +		if (part->blocks[block].free_sectors &&
   1.535 +				block != part->reserved_block)
   1.536 +			return block;
   1.537 +
   1.538 +		if (part->blocks[block].state == BLOCK_UNUSED)
   1.539 +			erase_block(part, block);
   1.540 +
   1.541 +		if (++block >= part->total_blocks)
   1.542 +			block = 0;
   1.543 +
   1.544 +	} while (block != stop);
   1.545 +
   1.546 +	return -1;
   1.547 +}
   1.548 +
   1.549 +static int find_writable_block(struct partition *part, u_long *old_sector)
   1.550 +{
   1.551 +	int rc, block;
   1.552 +	size_t retlen;
   1.553 +
   1.554 +	block = find_free_block(part);
   1.555 +
   1.556 +	if (block == -1) {
   1.557 +		if (!part->is_reclaiming) {
   1.558 +			rc = reclaim_block(part, old_sector);
   1.559 +			if (rc)
   1.560 +				goto err;
   1.561 +
   1.562 +			block = find_free_block(part);
   1.563 +		}
   1.564 +
   1.565 +		if (block == -1) {
   1.566 +			rc = -ENOSPC;
   1.567 +			goto err;
   1.568 +		}
   1.569 +	}
   1.570 +
   1.571 +	rc = part->mbd.mtd->read(part->mbd.mtd, part->blocks[block].offset,
   1.572 +		part->header_size, &retlen, (u_char*)part->header_cache);
   1.573 +
   1.574 +	if (!rc && retlen != part->header_size)
   1.575 +		rc = -EIO;
   1.576 +
   1.577 +	if (rc) {
   1.578 +		printk(KERN_ERR PREFIX "'%s': unable to read header at "
   1.579 +				"0x%lx\n", part->mbd.mtd->name,
   1.580 +				part->blocks[block].offset);
   1.581 +		goto err;
   1.582 +	}
   1.583 +
   1.584 +	part->current_block = block;
   1.585 +
   1.586 +err:
   1.587 +	return rc;
   1.588 +}
   1.589 +
   1.590 +static int mark_sector_deleted(struct partition *part, u_long old_addr)
   1.591 +{
   1.592 +	int block, offset, rc;
   1.593 +	u_long addr;
   1.594 +	size_t retlen;
   1.595 +	u16 del = const_cpu_to_le16(SECTOR_DELETED);
   1.596 +
   1.597 +	block = old_addr / part->block_size;
   1.598 +	offset = (old_addr % part->block_size) / SECTOR_SIZE -
   1.599 +		part->header_sectors_per_block;
   1.600 +
   1.601 +	addr = part->blocks[block].offset +
   1.602 +			(HEADER_MAP_OFFSET + offset) * sizeof(u16);
   1.603 +	rc = part->mbd.mtd->write(part->mbd.mtd, addr,
   1.604 +		sizeof(del), &retlen, (u_char*)&del);
   1.605 +
   1.606 +	if (!rc && retlen != sizeof(del))
   1.607 +		rc = -EIO;
   1.608 +
   1.609 +	if (rc) {
   1.610 +		printk(KERN_ERR PREFIX "error writing '%s' at "
   1.611 +			"0x%lx\n", part->mbd.mtd->name, addr);
   1.612 +		if (rc)
   1.613 +			goto err;
   1.614 +	}
   1.615 +	if (block == part->current_block)
   1.616 +		part->header_cache[offset + HEADER_MAP_OFFSET] = del;
   1.617 +
   1.618 +	part->blocks[block].used_sectors--;
   1.619 +
   1.620 +	if (!part->blocks[block].used_sectors &&
   1.621 +	    !part->blocks[block].free_sectors)
   1.622 +		rc = erase_block(part, block);
   1.623 +
   1.624 +err:
   1.625 +	return rc;
   1.626 +}
   1.627 +
   1.628 +static int find_free_sector(const struct partition *part, const struct block *block)
   1.629 +{
   1.630 +	int i, stop;
   1.631 +
   1.632 +	i = stop = part->data_sectors_per_block - block->free_sectors;
   1.633 +
   1.634 +	do {
   1.635 +		if (le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i])
   1.636 +				== SECTOR_FREE)
   1.637 +			return i;
   1.638 +
   1.639 +		if (++i == part->data_sectors_per_block)
   1.640 +			i = 0;
   1.641 +	}
   1.642 +	while(i != stop);
   1.643 +
   1.644 +	return -1;
   1.645 +}
   1.646 +
   1.647 +static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf, ulong *old_addr)
   1.648 +{
   1.649 +	struct partition *part = (struct partition*)dev;
   1.650 +	struct block *block;
   1.651 +	u_long addr;
   1.652 +	int i;
   1.653 +	int rc;
   1.654 +	size_t retlen;
   1.655 +	u16 entry;
   1.656 +
   1.657 +	if (part->current_block == -1 ||
   1.658 +		!part->blocks[part->current_block].free_sectors) {
   1.659 +
   1.660 +		rc = find_writable_block(part, old_addr);
   1.661 +		if (rc)
   1.662 +			goto err;
   1.663 +	}
   1.664 +
   1.665 +	block = &part->blocks[part->current_block];
   1.666 +
   1.667 +	i = find_free_sector(part, block);
   1.668 +
   1.669 +	if (i < 0) {
   1.670 +		rc = -ENOSPC;
   1.671 +		goto err;
   1.672 +	}
   1.673 +
   1.674 +	addr = (i + part->header_sectors_per_block) * SECTOR_SIZE +
   1.675 +		block->offset;
   1.676 +	rc = part->mbd.mtd->write(part->mbd.mtd,
   1.677 +		addr, SECTOR_SIZE, &retlen, (u_char*)buf);
   1.678 +
   1.679 +	if (!rc && retlen != SECTOR_SIZE)
   1.680 +		rc = -EIO;
   1.681 +
   1.682 +	if (rc) {
   1.683 +		printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
   1.684 +				part->mbd.mtd->name, addr);
   1.685 +		if (rc)
   1.686 +			goto err;
   1.687 +	}
   1.688 +
   1.689 +	part->sector_map[sector] = addr;
   1.690 +
   1.691 +	entry = cpu_to_le16(sector == 0 ? SECTOR_ZERO : sector);
   1.692 +
   1.693 +	part->header_cache[i + HEADER_MAP_OFFSET] = entry;
   1.694 +
   1.695 +	addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16);
   1.696 +	rc = part->mbd.mtd->write(part->mbd.mtd, addr,
   1.697 +			sizeof(entry), &retlen, (u_char*)&entry);
   1.698 +
   1.699 +	if (!rc && retlen != sizeof(entry))
   1.700 +		rc = -EIO;
   1.701 +
   1.702 +	if (rc) {
   1.703 +		printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
   1.704 +				part->mbd.mtd->name, addr);
   1.705 +		if (rc)
   1.706 +			goto err;
   1.707 +	}
   1.708 +	block->used_sectors++;
   1.709 +	block->free_sectors--;
   1.710 +
   1.711 +err:
   1.712 +	return rc;
   1.713 +}
   1.714 +
   1.715 +static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
   1.716 +{
   1.717 +	struct partition *part = (struct partition*)dev;
   1.718 +	u_long old_addr;
   1.719 +	int i;
   1.720 +	int rc = 0;
   1.721 +
   1.722 +	pr_debug("rfd_ftl_writesect(sector=0x%lx)\n", sector);
   1.723 +
   1.724 +	if (part->reserved_block == -1) {
   1.725 +		rc = -EACCES;
   1.726 +		goto err;
   1.727 +	}
   1.728 +
   1.729 +	if (sector >= part->sector_count) {
   1.730 +		rc = -EIO;
   1.731 +		goto err;
   1.732 +	}
   1.733 +
   1.734 +	old_addr = part->sector_map[sector];
   1.735 +
   1.736 +	for (i=0; i<SECTOR_SIZE; i++) {
   1.737 +		if (!buf[i])
   1.738 +			continue;
   1.739 +
   1.740 +		rc = do_writesect(dev, sector, buf, &old_addr);
   1.741 +		if (rc)
   1.742 +			goto err;
   1.743 +		break;
   1.744 +	}
   1.745 +
   1.746 +	if (i == SECTOR_SIZE)
   1.747 +		part->sector_map[sector] = -1;
   1.748 +
   1.749 +	if (old_addr != -1)
   1.750 +		rc = mark_sector_deleted(part, old_addr);
   1.751 +
   1.752 +err:
   1.753 +	return rc;
   1.754 +}
   1.755 +
   1.756 +static int rfd_ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
   1.757 +{
   1.758 +	struct partition *part = (struct partition*)dev;
   1.759 +
   1.760 +	geo->heads = 1;
   1.761 +	geo->sectors = SECTORS_PER_TRACK;
   1.762 +	geo->cylinders = part->cylinders;
   1.763 +
   1.764 +	return 0;
   1.765 +}
   1.766 +
   1.767 +static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
   1.768 +{
   1.769 +	struct partition *part;
   1.770 +
   1.771 +	if (mtd->type != MTD_NORFLASH)
   1.772 +		return;
   1.773 +
   1.774 +	part = kcalloc(1, sizeof(struct partition), GFP_KERNEL);
   1.775 +	if (!part)
   1.776 +		return;
   1.777 +
   1.778 +	part->mbd.mtd = mtd;
   1.779 +
   1.780 +	if (block_size)
   1.781 +		part->block_size = block_size;
   1.782 +	else {
   1.783 +		if (!mtd->erasesize) {
   1.784 +			printk(KERN_WARNING PREFIX "please provide block_size");
   1.785 +			return;
   1.786 +		}
   1.787 +		else
   1.788 +			part->block_size = mtd->erasesize;
   1.789 +	}
   1.790 +
   1.791 +	if (scan_header(part) == 0) {
   1.792 +		part->mbd.size = part->sector_count;
   1.793 +		part->mbd.blksize = SECTOR_SIZE;
   1.794 +		part->mbd.tr = tr;
   1.795 +		part->mbd.devnum = -1;
   1.796 +		if (!(mtd->flags & MTD_WRITEABLE))
   1.797 +			part->mbd.readonly = 1;
   1.798 +		else if (part->errors) {
   1.799 +			printk(KERN_WARNING PREFIX "'%s': errors found, "
   1.800 +					"setting read-only\n", mtd->name);
   1.801 +			part->mbd.readonly = 1;
   1.802 +		}
   1.803 +
   1.804 +		printk(KERN_INFO PREFIX "name: '%s' type: %d flags %x\n",
   1.805 +				mtd->name, mtd->type, mtd->flags);
   1.806 +
   1.807 +		if (!add_mtd_blktrans_dev((void*)part))
   1.808 +			return;
   1.809 +	}
   1.810 +
   1.811 +	kfree(part);
   1.812 +}
   1.813 +
   1.814 +static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev)
   1.815 +{
   1.816 +	struct partition *part = (struct partition*)dev;
   1.817 +	int i;
   1.818 +
   1.819 +	for (i=0; i<part->total_blocks; i++) {
   1.820 +		pr_debug("rfd_ftl_remove_dev:'%s': erase unit #%02d: %d erases\n",
   1.821 +			part->mbd.mtd->name, i, part->blocks[i].erases);
   1.822 +	}
   1.823 +
   1.824 +	del_mtd_blktrans_dev(dev);
   1.825 +	vfree(part->sector_map);
   1.826 +	kfree(part->header_cache);
   1.827 +	kfree(part->blocks);
   1.828 +	kfree(part);
   1.829 +}
   1.830 +
   1.831 +struct mtd_blktrans_ops rfd_ftl_tr = {
   1.832 +	.name		= "rfd",
   1.833 +	.major		= RFD_FTL_MAJOR,
   1.834 +	.part_bits	= PART_BITS,
   1.835 +	.readsect	= rfd_ftl_readsect,
   1.836 +	.writesect	= rfd_ftl_writesect,
   1.837 +	.getgeo		= rfd_ftl_getgeo,
   1.838 +	.add_mtd	= rfd_ftl_add_mtd,
   1.839 +	.remove_dev	= rfd_ftl_remove_dev,
   1.840 +	.owner		= THIS_MODULE,
   1.841 +};
   1.842 +
   1.843 +static int __init init_rfd_ftl(void)
   1.844 +{
   1.845 +	return register_mtd_blktrans(&rfd_ftl_tr);
   1.846 +}
   1.847 +
   1.848 +static void __exit cleanup_rfd_ftl(void)
   1.849 +{
   1.850 +	deregister_mtd_blktrans(&rfd_ftl_tr);
   1.851 +}
   1.852 +
   1.853 +module_init(init_rfd_ftl);
   1.854 +module_exit(cleanup_rfd_ftl);
   1.855 +
   1.856 +MODULE_LICENSE("GPL");
   1.857 +MODULE_AUTHOR("Sean Young <sean@mess.org>");
   1.858 +MODULE_DESCRIPTION("Support code for RFD Flash Translation Layer, "
   1.859 +		"used by General Software's Embedded BIOS");
   1.860 +