ia64/linux-2.6.18-xen.hg

view drivers/mtd/chips/cfi_cmdset_0002.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /*
2 * Common Flash Interface support:
3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
4 *
5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
8 *
9 * 2_by_8 routines added by Simon Munton
10 *
11 * 4_by_16 work by Carolyn J. Smith
12 *
13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
14 * by Nicolas Pitre)
15 *
16 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
17 *
18 * This code is GPL
19 *
20 * $Id: cfi_cmdset_0002.c,v 1.122 2005/11/07 11:14:22 gleixner Exp $
21 *
22 */
24 #include <linux/module.h>
25 #include <linux/types.h>
26 #include <linux/kernel.h>
27 #include <linux/sched.h>
28 #include <linux/init.h>
29 #include <asm/io.h>
30 #include <asm/byteorder.h>
32 #include <linux/errno.h>
33 #include <linux/slab.h>
34 #include <linux/delay.h>
35 #include <linux/interrupt.h>
36 #include <linux/mtd/compatmac.h>
37 #include <linux/mtd/map.h>
38 #include <linux/mtd/mtd.h>
39 #include <linux/mtd/cfi.h>
40 #include <linux/mtd/xip.h>
42 #define AMD_BOOTLOC_BUG
43 #define FORCE_WORD_WRITE 0
45 #define MAX_WORD_RETRIES 3
47 #define MANUFACTURER_AMD 0x0001
48 #define MANUFACTURER_ATMEL 0x001F
49 #define MANUFACTURER_SST 0x00BF
50 #define SST49LF004B 0x0060
51 #define SST49LF008A 0x005a
52 #define AT49BV6416 0x00d6
54 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
55 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
56 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
57 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
58 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
59 static void cfi_amdstd_sync (struct mtd_info *);
60 static int cfi_amdstd_suspend (struct mtd_info *);
61 static void cfi_amdstd_resume (struct mtd_info *);
62 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
64 static void cfi_amdstd_destroy(struct mtd_info *);
66 struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
67 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
69 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
70 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
71 #include "fwh_lock.h"
73 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
74 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
76 static struct mtd_chip_driver cfi_amdstd_chipdrv = {
77 .probe = NULL, /* Not usable directly */
78 .destroy = cfi_amdstd_destroy,
79 .name = "cfi_cmdset_0002",
80 .module = THIS_MODULE
81 };
84 /* #define DEBUG_CFI_FEATURES */
87 #ifdef DEBUG_CFI_FEATURES
88 static void cfi_tell_features(struct cfi_pri_amdstd *extp)
89 {
90 const char* erase_suspend[3] = {
91 "Not supported", "Read only", "Read/write"
92 };
93 const char* top_bottom[6] = {
94 "No WP", "8x8KiB sectors at top & bottom, no WP",
95 "Bottom boot", "Top boot",
96 "Uniform, Bottom WP", "Uniform, Top WP"
97 };
99 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
100 printk(" Address sensitive unlock: %s\n",
101 (extp->SiliconRevision & 1) ? "Not required" : "Required");
103 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
104 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
105 else
106 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
108 if (extp->BlkProt == 0)
109 printk(" Block protection: Not supported\n");
110 else
111 printk(" Block protection: %d sectors per group\n", extp->BlkProt);
114 printk(" Temporary block unprotect: %s\n",
115 extp->TmpBlkUnprotect ? "Supported" : "Not supported");
116 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
117 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
118 printk(" Burst mode: %s\n",
119 extp->BurstMode ? "Supported" : "Not supported");
120 if (extp->PageMode == 0)
121 printk(" Page mode: Not supported\n");
122 else
123 printk(" Page mode: %d word page\n", extp->PageMode << 2);
125 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
126 extp->VppMin >> 4, extp->VppMin & 0xf);
127 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
128 extp->VppMax >> 4, extp->VppMax & 0xf);
130 if (extp->TopBottom < ARRAY_SIZE(top_bottom))
131 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
132 else
133 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
134 }
135 #endif
137 #ifdef AMD_BOOTLOC_BUG
138 /* Wheee. Bring me the head of someone at AMD. */
139 static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
140 {
141 struct map_info *map = mtd->priv;
142 struct cfi_private *cfi = map->fldrv_priv;
143 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
144 __u8 major = extp->MajorVersion;
145 __u8 minor = extp->MinorVersion;
147 if (((major << 8) | minor) < 0x3131) {
148 /* CFI version 1.0 => don't trust bootloc */
149 if (cfi->id & 0x80) {
150 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
151 extp->TopBottom = 3; /* top boot */
152 } else {
153 extp->TopBottom = 2; /* bottom boot */
154 }
155 }
156 }
157 #endif
159 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
160 {
161 struct map_info *map = mtd->priv;
162 struct cfi_private *cfi = map->fldrv_priv;
163 if (cfi->cfiq->BufWriteTimeoutTyp) {
164 DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
165 mtd->write = cfi_amdstd_write_buffers;
166 }
167 }
169 /* Atmel chips don't use the same PRI format as AMD chips */
170 static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
171 {
172 struct map_info *map = mtd->priv;
173 struct cfi_private *cfi = map->fldrv_priv;
174 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
175 struct cfi_pri_atmel atmel_pri;
177 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
178 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
180 if (atmel_pri.Features & 0x02)
181 extp->EraseSuspend = 2;
183 if (atmel_pri.BottomBoot)
184 extp->TopBottom = 2;
185 else
186 extp->TopBottom = 3;
187 }
189 static void fixup_use_secsi(struct mtd_info *mtd, void *param)
190 {
191 /* Setup for chips with a secsi area */
192 mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
193 mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
194 }
196 static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
197 {
198 struct map_info *map = mtd->priv;
199 struct cfi_private *cfi = map->fldrv_priv;
200 if ((cfi->cfiq->NumEraseRegions == 1) &&
201 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
202 mtd->erase = cfi_amdstd_erase_chip;
203 }
205 }
207 /*
208 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
209 * locked by default.
210 */
211 static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
212 {
213 mtd->lock = cfi_atmel_lock;
214 mtd->unlock = cfi_atmel_unlock;
215 }
217 static struct cfi_fixup cfi_fixup_table[] = {
218 #ifdef AMD_BOOTLOC_BUG
219 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
220 #endif
221 { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
222 { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
223 { CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, },
224 { CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, },
225 { CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, },
226 { CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, },
227 #if !FORCE_WORD_WRITE
228 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
229 #endif
230 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
231 { 0, 0, NULL, NULL }
232 };
233 static struct cfi_fixup jedec_fixup_table[] = {
234 { MANUFACTURER_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
235 { MANUFACTURER_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
236 { 0, 0, NULL, NULL }
237 };
239 static struct cfi_fixup fixup_table[] = {
240 /* The CFI vendor ids and the JEDEC vendor IDs appear
241 * to be common. It is like the devices id's are as
242 * well. This table is to pick all cases where
243 * we know that is the case.
244 */
245 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL },
246 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock, NULL },
247 { 0, 0, NULL, NULL }
248 };
251 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
252 {
253 struct cfi_private *cfi = map->fldrv_priv;
254 struct mtd_info *mtd;
255 int i;
257 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
258 if (!mtd) {
259 printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
260 return NULL;
261 }
262 memset(mtd, 0, sizeof(*mtd));
263 mtd->priv = map;
264 mtd->type = MTD_NORFLASH;
266 /* Fill in the default mtd operations */
267 mtd->erase = cfi_amdstd_erase_varsize;
268 mtd->write = cfi_amdstd_write_words;
269 mtd->read = cfi_amdstd_read;
270 mtd->sync = cfi_amdstd_sync;
271 mtd->suspend = cfi_amdstd_suspend;
272 mtd->resume = cfi_amdstd_resume;
273 mtd->flags = MTD_CAP_NORFLASH;
274 mtd->name = map->name;
275 mtd->writesize = 1;
277 if (cfi->cfi_mode==CFI_MODE_CFI){
278 unsigned char bootloc;
279 /*
280 * It's a real CFI chip, not one for which the probe
281 * routine faked a CFI structure. So we read the feature
282 * table from it.
283 */
284 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
285 struct cfi_pri_amdstd *extp;
287 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
288 if (!extp) {
289 kfree(mtd);
290 return NULL;
291 }
293 if (extp->MajorVersion != '1' ||
294 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
295 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
296 "version %c.%c.\n", extp->MajorVersion,
297 extp->MinorVersion);
298 kfree(extp);
299 kfree(mtd);
300 return NULL;
301 }
303 /* Install our own private info structure */
304 cfi->cmdset_priv = extp;
306 /* Apply cfi device specific fixups */
307 cfi_fixup(mtd, cfi_fixup_table);
309 #ifdef DEBUG_CFI_FEATURES
310 /* Tell the user about it in lots of lovely detail */
311 cfi_tell_features(extp);
312 #endif
314 bootloc = extp->TopBottom;
315 if ((bootloc != 2) && (bootloc != 3)) {
316 printk(KERN_WARNING "%s: CFI does not contain boot "
317 "bank location. Assuming top.\n", map->name);
318 bootloc = 2;
319 }
321 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
322 printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
324 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
325 int j = (cfi->cfiq->NumEraseRegions-1)-i;
326 __u32 swap;
328 swap = cfi->cfiq->EraseRegionInfo[i];
329 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
330 cfi->cfiq->EraseRegionInfo[j] = swap;
331 }
332 }
333 /* Set the default CFI lock/unlock addresses */
334 cfi->addr_unlock1 = 0x555;
335 cfi->addr_unlock2 = 0x2aa;
336 /* Modify the unlock address if we are in compatibility mode */
337 if ( /* x16 in x8 mode */
338 ((cfi->device_type == CFI_DEVICETYPE_X8) &&
339 (cfi->cfiq->InterfaceDesc == 2)) ||
340 /* x32 in x16 mode */
341 ((cfi->device_type == CFI_DEVICETYPE_X16) &&
342 (cfi->cfiq->InterfaceDesc == 4)))
343 {
344 cfi->addr_unlock1 = 0xaaa;
345 cfi->addr_unlock2 = 0x555;
346 }
348 } /* CFI mode */
349 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
350 /* Apply jedec specific fixups */
351 cfi_fixup(mtd, jedec_fixup_table);
352 }
353 /* Apply generic fixups */
354 cfi_fixup(mtd, fixup_table);
356 for (i=0; i< cfi->numchips; i++) {
357 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
358 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
359 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
360 }
362 map->fldrv = &cfi_amdstd_chipdrv;
364 return cfi_amdstd_setup(mtd);
365 }
366 EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
368 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
369 {
370 struct map_info *map = mtd->priv;
371 struct cfi_private *cfi = map->fldrv_priv;
372 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
373 unsigned long offset = 0;
374 int i,j;
376 printk(KERN_NOTICE "number of %s chips: %d\n",
377 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
378 /* Select the correct geometry setup */
379 mtd->size = devsize * cfi->numchips;
381 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
382 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
383 * mtd->numeraseregions, GFP_KERNEL);
384 if (!mtd->eraseregions) {
385 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
386 goto setup_err;
387 }
389 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
390 unsigned long ernum, ersize;
391 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
392 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
394 if (mtd->erasesize < ersize) {
395 mtd->erasesize = ersize;
396 }
397 for (j=0; j<cfi->numchips; j++) {
398 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
399 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
400 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
401 }
402 offset += (ersize * ernum);
403 }
404 if (offset != devsize) {
405 /* Argh */
406 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
407 goto setup_err;
408 }
409 #if 0
410 // debug
411 for (i=0; i<mtd->numeraseregions;i++){
412 printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
413 i,mtd->eraseregions[i].offset,
414 mtd->eraseregions[i].erasesize,
415 mtd->eraseregions[i].numblocks);
416 }
417 #endif
419 /* FIXME: erase-suspend-program is broken. See
420 http://lists.infradead.org/pipermail/linux-mtd/2003-December/009001.html */
421 printk(KERN_NOTICE "cfi_cmdset_0002: Disabling erase-suspend-program due to code brokenness.\n");
423 __module_get(THIS_MODULE);
424 return mtd;
426 setup_err:
427 if(mtd) {
428 kfree(mtd->eraseregions);
429 kfree(mtd);
430 }
431 kfree(cfi->cmdset_priv);
432 kfree(cfi->cfiq);
433 return NULL;
434 }
436 /*
437 * Return true if the chip is ready.
438 *
439 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
440 * non-suspended sector) and is indicated by no toggle bits toggling.
441 *
442 * Note that anything more complicated than checking if no bits are toggling
443 * (including checking DQ5 for an error status) is tricky to get working
444 * correctly and is therefore not done (particulary with interleaved chips
445 * as each chip must be checked independantly of the others).
446 */
447 static int __xipram chip_ready(struct map_info *map, unsigned long addr)
448 {
449 map_word d, t;
451 d = map_read(map, addr);
452 t = map_read(map, addr);
454 return map_word_equal(map, d, t);
455 }
457 /*
458 * Return true if the chip is ready and has the correct value.
459 *
460 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
461 * non-suspended sector) and it is indicated by no bits toggling.
462 *
463 * Error are indicated by toggling bits or bits held with the wrong value,
464 * or with bits toggling.
465 *
466 * Note that anything more complicated than checking if no bits are toggling
467 * (including checking DQ5 for an error status) is tricky to get working
468 * correctly and is therefore not done (particulary with interleaved chips
469 * as each chip must be checked independantly of the others).
470 *
471 */
472 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
473 {
474 map_word oldd, curd;
476 oldd = map_read(map, addr);
477 curd = map_read(map, addr);
479 return map_word_equal(map, oldd, curd) &&
480 map_word_equal(map, curd, expected);
481 }
483 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
484 {
485 DECLARE_WAITQUEUE(wait, current);
486 struct cfi_private *cfi = map->fldrv_priv;
487 unsigned long timeo;
488 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
490 resettime:
491 timeo = jiffies + HZ;
492 retry:
493 switch (chip->state) {
495 case FL_STATUS:
496 for (;;) {
497 if (chip_ready(map, adr))
498 break;
500 if (time_after(jiffies, timeo)) {
501 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
502 spin_unlock(chip->mutex);
503 return -EIO;
504 }
505 spin_unlock(chip->mutex);
506 cfi_udelay(1);
507 spin_lock(chip->mutex);
508 /* Someone else might have been playing with it. */
509 goto retry;
510 }
512 case FL_READY:
513 case FL_CFI_QUERY:
514 case FL_JEDEC_QUERY:
515 return 0;
517 case FL_ERASING:
518 if (mode == FL_WRITING) /* FIXME: Erase-suspend-program appears broken. */
519 goto sleep;
521 if (!(mode == FL_READY || mode == FL_POINT
522 || !cfip
523 || (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))
524 || (mode == FL_WRITING && (cfip->EraseSuspend & 0x1))))
525 goto sleep;
527 /* We could check to see if we're trying to access the sector
528 * that is currently being erased. However, no user will try
529 * anything like that so we just wait for the timeout. */
531 /* Erase suspend */
532 /* It's harmless to issue the Erase-Suspend and Erase-Resume
533 * commands when the erase algorithm isn't in progress. */
534 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
535 chip->oldstate = FL_ERASING;
536 chip->state = FL_ERASE_SUSPENDING;
537 chip->erase_suspended = 1;
538 for (;;) {
539 if (chip_ready(map, adr))
540 break;
542 if (time_after(jiffies, timeo)) {
543 /* Should have suspended the erase by now.
544 * Send an Erase-Resume command as either
545 * there was an error (so leave the erase
546 * routine to recover from it) or we trying to
547 * use the erase-in-progress sector. */
548 map_write(map, CMD(0x30), chip->in_progress_block_addr);
549 chip->state = FL_ERASING;
550 chip->oldstate = FL_READY;
551 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
552 return -EIO;
553 }
555 spin_unlock(chip->mutex);
556 cfi_udelay(1);
557 spin_lock(chip->mutex);
558 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
559 So we can just loop here. */
560 }
561 chip->state = FL_READY;
562 return 0;
564 case FL_XIP_WHILE_ERASING:
565 if (mode != FL_READY && mode != FL_POINT &&
566 (!cfip || !(cfip->EraseSuspend&2)))
567 goto sleep;
568 chip->oldstate = chip->state;
569 chip->state = FL_READY;
570 return 0;
572 case FL_POINT:
573 /* Only if there's no operation suspended... */
574 if (mode == FL_READY && chip->oldstate == FL_READY)
575 return 0;
577 default:
578 sleep:
579 set_current_state(TASK_UNINTERRUPTIBLE);
580 add_wait_queue(&chip->wq, &wait);
581 spin_unlock(chip->mutex);
582 schedule();
583 remove_wait_queue(&chip->wq, &wait);
584 spin_lock(chip->mutex);
585 goto resettime;
586 }
587 }
590 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
591 {
592 struct cfi_private *cfi = map->fldrv_priv;
594 switch(chip->oldstate) {
595 case FL_ERASING:
596 chip->state = chip->oldstate;
597 map_write(map, CMD(0x30), chip->in_progress_block_addr);
598 chip->oldstate = FL_READY;
599 chip->state = FL_ERASING;
600 break;
602 case FL_XIP_WHILE_ERASING:
603 chip->state = chip->oldstate;
604 chip->oldstate = FL_READY;
605 break;
607 case FL_READY:
608 case FL_STATUS:
609 /* We should really make set_vpp() count, rather than doing this */
610 DISABLE_VPP(map);
611 break;
612 default:
613 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
614 }
615 wake_up(&chip->wq);
616 }
618 #ifdef CONFIG_MTD_XIP
620 /*
621 * No interrupt what so ever can be serviced while the flash isn't in array
622 * mode. This is ensured by the xip_disable() and xip_enable() functions
623 * enclosing any code path where the flash is known not to be in array mode.
624 * And within a XIP disabled code path, only functions marked with __xipram
625 * may be called and nothing else (it's a good thing to inspect generated
626 * assembly to make sure inline functions were actually inlined and that gcc
627 * didn't emit calls to its own support functions). Also configuring MTD CFI
628 * support to a single buswidth and a single interleave is also recommended.
629 */
631 static void xip_disable(struct map_info *map, struct flchip *chip,
632 unsigned long adr)
633 {
634 /* TODO: chips with no XIP use should ignore and return */
635 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
636 local_irq_disable();
637 }
639 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
640 unsigned long adr)
641 {
642 struct cfi_private *cfi = map->fldrv_priv;
644 if (chip->state != FL_POINT && chip->state != FL_READY) {
645 map_write(map, CMD(0xf0), adr);
646 chip->state = FL_READY;
647 }
648 (void) map_read(map, adr);
649 xip_iprefetch();
650 local_irq_enable();
651 }
653 /*
654 * When a delay is required for the flash operation to complete, the
655 * xip_udelay() function is polling for both the given timeout and pending
656 * (but still masked) hardware interrupts. Whenever there is an interrupt
657 * pending then the flash erase operation is suspended, array mode restored
658 * and interrupts unmasked. Task scheduling might also happen at that
659 * point. The CPU eventually returns from the interrupt or the call to
660 * schedule() and the suspended flash operation is resumed for the remaining
661 * of the delay period.
662 *
663 * Warning: this function _will_ fool interrupt latency tracing tools.
664 */
666 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
667 unsigned long adr, int usec)
668 {
669 struct cfi_private *cfi = map->fldrv_priv;
670 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
671 map_word status, OK = CMD(0x80);
672 unsigned long suspended, start = xip_currtime();
673 flstate_t oldstate;
675 do {
676 cpu_relax();
677 if (xip_irqpending() && extp &&
678 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
679 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
680 /*
681 * Let's suspend the erase operation when supported.
682 * Note that we currently don't try to suspend
683 * interleaved chips if there is already another
684 * operation suspended (imagine what happens
685 * when one chip was already done with the current
686 * operation while another chip suspended it, then
687 * we resume the whole thing at once). Yes, it
688 * can happen!
689 */
690 map_write(map, CMD(0xb0), adr);
691 usec -= xip_elapsed_since(start);
692 suspended = xip_currtime();
693 do {
694 if (xip_elapsed_since(suspended) > 100000) {
695 /*
696 * The chip doesn't want to suspend
697 * after waiting for 100 msecs.
698 * This is a critical error but there
699 * is not much we can do here.
700 */
701 return;
702 }
703 status = map_read(map, adr);
704 } while (!map_word_andequal(map, status, OK, OK));
706 /* Suspend succeeded */
707 oldstate = chip->state;
708 if (!map_word_bitsset(map, status, CMD(0x40)))
709 break;
710 chip->state = FL_XIP_WHILE_ERASING;
711 chip->erase_suspended = 1;
712 map_write(map, CMD(0xf0), adr);
713 (void) map_read(map, adr);
714 asm volatile (".rep 8; nop; .endr");
715 local_irq_enable();
716 spin_unlock(chip->mutex);
717 asm volatile (".rep 8; nop; .endr");
718 cond_resched();
720 /*
721 * We're back. However someone else might have
722 * decided to go write to the chip if we are in
723 * a suspended erase state. If so let's wait
724 * until it's done.
725 */
726 spin_lock(chip->mutex);
727 while (chip->state != FL_XIP_WHILE_ERASING) {
728 DECLARE_WAITQUEUE(wait, current);
729 set_current_state(TASK_UNINTERRUPTIBLE);
730 add_wait_queue(&chip->wq, &wait);
731 spin_unlock(chip->mutex);
732 schedule();
733 remove_wait_queue(&chip->wq, &wait);
734 spin_lock(chip->mutex);
735 }
736 /* Disallow XIP again */
737 local_irq_disable();
739 /* Resume the write or erase operation */
740 map_write(map, CMD(0x30), adr);
741 chip->state = oldstate;
742 start = xip_currtime();
743 } else if (usec >= 1000000/HZ) {
744 /*
745 * Try to save on CPU power when waiting delay
746 * is at least a system timer tick period.
747 * No need to be extremely accurate here.
748 */
749 xip_cpu_idle();
750 }
751 status = map_read(map, adr);
752 } while (!map_word_andequal(map, status, OK, OK)
753 && xip_elapsed_since(start) < usec);
754 }
756 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
758 /*
759 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
760 * the flash is actively programming or erasing since we have to poll for
761 * the operation to complete anyway. We can't do that in a generic way with
762 * a XIP setup so do it before the actual flash operation in this case
763 * and stub it out from INVALIDATE_CACHE_UDELAY.
764 */
765 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
766 INVALIDATE_CACHED_RANGE(map, from, size)
768 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
769 UDELAY(map, chip, adr, usec)
771 /*
772 * Extra notes:
773 *
774 * Activating this XIP support changes the way the code works a bit. For
775 * example the code to suspend the current process when concurrent access
776 * happens is never executed because xip_udelay() will always return with the
777 * same chip state as it was entered with. This is why there is no care for
778 * the presence of add_wait_queue() or schedule() calls from within a couple
779 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
780 * The queueing and scheduling are always happening within xip_udelay().
781 *
782 * Similarly, get_chip() and put_chip() just happen to always be executed
783 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
784 * is in array mode, therefore never executing many cases therein and not
785 * causing any problem with XIP.
786 */
788 #else
790 #define xip_disable(map, chip, adr)
791 #define xip_enable(map, chip, adr)
792 #define XIP_INVAL_CACHED_RANGE(x...)
794 #define UDELAY(map, chip, adr, usec) \
795 do { \
796 spin_unlock(chip->mutex); \
797 cfi_udelay(usec); \
798 spin_lock(chip->mutex); \
799 } while (0)
801 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
802 do { \
803 spin_unlock(chip->mutex); \
804 INVALIDATE_CACHED_RANGE(map, adr, len); \
805 cfi_udelay(usec); \
806 spin_lock(chip->mutex); \
807 } while (0)
809 #endif
811 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
812 {
813 unsigned long cmd_addr;
814 struct cfi_private *cfi = map->fldrv_priv;
815 int ret;
817 adr += chip->start;
819 /* Ensure cmd read/writes are aligned. */
820 cmd_addr = adr & ~(map_bankwidth(map)-1);
822 spin_lock(chip->mutex);
823 ret = get_chip(map, chip, cmd_addr, FL_READY);
824 if (ret) {
825 spin_unlock(chip->mutex);
826 return ret;
827 }
829 if (chip->state != FL_POINT && chip->state != FL_READY) {
830 map_write(map, CMD(0xf0), cmd_addr);
831 chip->state = FL_READY;
832 }
834 map_copy_from(map, buf, adr, len);
836 put_chip(map, chip, cmd_addr);
838 spin_unlock(chip->mutex);
839 return 0;
840 }
843 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
844 {
845 struct map_info *map = mtd->priv;
846 struct cfi_private *cfi = map->fldrv_priv;
847 unsigned long ofs;
848 int chipnum;
849 int ret = 0;
851 /* ofs: offset within the first chip that the first read should start */
853 chipnum = (from >> cfi->chipshift);
854 ofs = from - (chipnum << cfi->chipshift);
857 *retlen = 0;
859 while (len) {
860 unsigned long thislen;
862 if (chipnum >= cfi->numchips)
863 break;
865 if ((len + ofs -1) >> cfi->chipshift)
866 thislen = (1<<cfi->chipshift) - ofs;
867 else
868 thislen = len;
870 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
871 if (ret)
872 break;
874 *retlen += thislen;
875 len -= thislen;
876 buf += thislen;
878 ofs = 0;
879 chipnum++;
880 }
881 return ret;
882 }
885 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
886 {
887 DECLARE_WAITQUEUE(wait, current);
888 unsigned long timeo = jiffies + HZ;
889 struct cfi_private *cfi = map->fldrv_priv;
891 retry:
892 spin_lock(chip->mutex);
894 if (chip->state != FL_READY){
895 #if 0
896 printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
897 #endif
898 set_current_state(TASK_UNINTERRUPTIBLE);
899 add_wait_queue(&chip->wq, &wait);
901 spin_unlock(chip->mutex);
903 schedule();
904 remove_wait_queue(&chip->wq, &wait);
905 #if 0
906 if(signal_pending(current))
907 return -EINTR;
908 #endif
909 timeo = jiffies + HZ;
911 goto retry;
912 }
914 adr += chip->start;
916 chip->state = FL_READY;
918 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
919 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
920 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
922 map_copy_from(map, buf, adr, len);
924 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
925 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
926 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
927 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
929 wake_up(&chip->wq);
930 spin_unlock(chip->mutex);
932 return 0;
933 }
935 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
936 {
937 struct map_info *map = mtd->priv;
938 struct cfi_private *cfi = map->fldrv_priv;
939 unsigned long ofs;
940 int chipnum;
941 int ret = 0;
944 /* ofs: offset within the first chip that the first read should start */
946 /* 8 secsi bytes per chip */
947 chipnum=from>>3;
948 ofs=from & 7;
951 *retlen = 0;
953 while (len) {
954 unsigned long thislen;
956 if (chipnum >= cfi->numchips)
957 break;
959 if ((len + ofs -1) >> 3)
960 thislen = (1<<3) - ofs;
961 else
962 thislen = len;
964 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
965 if (ret)
966 break;
968 *retlen += thislen;
969 len -= thislen;
970 buf += thislen;
972 ofs = 0;
973 chipnum++;
974 }
975 return ret;
976 }
979 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
980 {
981 struct cfi_private *cfi = map->fldrv_priv;
982 unsigned long timeo = jiffies + HZ;
983 /*
984 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
985 * have a max write time of a few hundreds usec). However, we should
986 * use the maximum timeout value given by the chip at probe time
987 * instead. Unfortunately, struct flchip does have a field for
988 * maximum timeout, only for typical which can be far too short
989 * depending of the conditions. The ' + 1' is to avoid having a
990 * timeout of 0 jiffies if HZ is smaller than 1000.
991 */
992 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
993 int ret = 0;
994 map_word oldd;
995 int retry_cnt = 0;
997 adr += chip->start;
999 spin_lock(chip->mutex);
1000 ret = get_chip(map, chip, adr, FL_WRITING);
1001 if (ret) {
1002 spin_unlock(chip->mutex);
1003 return ret;
1006 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1007 __func__, adr, datum.x[0] );
1009 /*
1010 * Check for a NOP for the case when the datum to write is already
1011 * present - it saves time and works around buggy chips that corrupt
1012 * data at other locations when 0xff is written to a location that
1013 * already contains 0xff.
1014 */
1015 oldd = map_read(map, adr);
1016 if (map_word_equal(map, oldd, datum)) {
1017 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
1018 __func__);
1019 goto op_done;
1022 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1023 ENABLE_VPP(map);
1024 xip_disable(map, chip, adr);
1025 retry:
1026 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1027 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1028 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1029 map_write(map, datum, adr);
1030 chip->state = FL_WRITING;
1032 INVALIDATE_CACHE_UDELAY(map, chip,
1033 adr, map_bankwidth(map),
1034 chip->word_write_time);
1036 /* See comment above for timeout value. */
1037 timeo = jiffies + uWriteTimeout;
1038 for (;;) {
1039 if (chip->state != FL_WRITING) {
1040 /* Someone's suspended the write. Sleep */
1041 DECLARE_WAITQUEUE(wait, current);
1043 set_current_state(TASK_UNINTERRUPTIBLE);
1044 add_wait_queue(&chip->wq, &wait);
1045 spin_unlock(chip->mutex);
1046 schedule();
1047 remove_wait_queue(&chip->wq, &wait);
1048 timeo = jiffies + (HZ / 2); /* FIXME */
1049 spin_lock(chip->mutex);
1050 continue;
1053 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
1054 xip_enable(map, chip, adr);
1055 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1056 xip_disable(map, chip, adr);
1057 break;
1060 if (chip_ready(map, adr))
1061 break;
1063 /* Latency issues. Drop the lock, wait a while and retry */
1064 UDELAY(map, chip, adr, 1);
1066 /* Did we succeed? */
1067 if (!chip_good(map, adr, datum)) {
1068 /* reset on all failures. */
1069 map_write( map, CMD(0xF0), chip->start );
1070 /* FIXME - should have reset delay before continuing */
1072 if (++retry_cnt <= MAX_WORD_RETRIES)
1073 goto retry;
1075 ret = -EIO;
1077 xip_enable(map, chip, adr);
1078 op_done:
1079 chip->state = FL_READY;
1080 put_chip(map, chip, adr);
1081 spin_unlock(chip->mutex);
1083 return ret;
1087 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1088 size_t *retlen, const u_char *buf)
1090 struct map_info *map = mtd->priv;
1091 struct cfi_private *cfi = map->fldrv_priv;
1092 int ret = 0;
1093 int chipnum;
1094 unsigned long ofs, chipstart;
1095 DECLARE_WAITQUEUE(wait, current);
1097 *retlen = 0;
1098 if (!len)
1099 return 0;
1101 chipnum = to >> cfi->chipshift;
1102 ofs = to - (chipnum << cfi->chipshift);
1103 chipstart = cfi->chips[chipnum].start;
1105 /* If it's not bus-aligned, do the first byte write */
1106 if (ofs & (map_bankwidth(map)-1)) {
1107 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1108 int i = ofs - bus_ofs;
1109 int n = 0;
1110 map_word tmp_buf;
1112 retry:
1113 spin_lock(cfi->chips[chipnum].mutex);
1115 if (cfi->chips[chipnum].state != FL_READY) {
1116 #if 0
1117 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1118 #endif
1119 set_current_state(TASK_UNINTERRUPTIBLE);
1120 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1122 spin_unlock(cfi->chips[chipnum].mutex);
1124 schedule();
1125 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1126 #if 0
1127 if(signal_pending(current))
1128 return -EINTR;
1129 #endif
1130 goto retry;
1133 /* Load 'tmp_buf' with old contents of flash */
1134 tmp_buf = map_read(map, bus_ofs+chipstart);
1136 spin_unlock(cfi->chips[chipnum].mutex);
1138 /* Number of bytes to copy from buffer */
1139 n = min_t(int, len, map_bankwidth(map)-i);
1141 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1143 ret = do_write_oneword(map, &cfi->chips[chipnum],
1144 bus_ofs, tmp_buf);
1145 if (ret)
1146 return ret;
1148 ofs += n;
1149 buf += n;
1150 (*retlen) += n;
1151 len -= n;
1153 if (ofs >> cfi->chipshift) {
1154 chipnum ++;
1155 ofs = 0;
1156 if (chipnum == cfi->numchips)
1157 return 0;
1161 /* We are now aligned, write as much as possible */
1162 while(len >= map_bankwidth(map)) {
1163 map_word datum;
1165 datum = map_word_load(map, buf);
1167 ret = do_write_oneword(map, &cfi->chips[chipnum],
1168 ofs, datum);
1169 if (ret)
1170 return ret;
1172 ofs += map_bankwidth(map);
1173 buf += map_bankwidth(map);
1174 (*retlen) += map_bankwidth(map);
1175 len -= map_bankwidth(map);
1177 if (ofs >> cfi->chipshift) {
1178 chipnum ++;
1179 ofs = 0;
1180 if (chipnum == cfi->numchips)
1181 return 0;
1182 chipstart = cfi->chips[chipnum].start;
1186 /* Write the trailing bytes if any */
1187 if (len & (map_bankwidth(map)-1)) {
1188 map_word tmp_buf;
1190 retry1:
1191 spin_lock(cfi->chips[chipnum].mutex);
1193 if (cfi->chips[chipnum].state != FL_READY) {
1194 #if 0
1195 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1196 #endif
1197 set_current_state(TASK_UNINTERRUPTIBLE);
1198 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1200 spin_unlock(cfi->chips[chipnum].mutex);
1202 schedule();
1203 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1204 #if 0
1205 if(signal_pending(current))
1206 return -EINTR;
1207 #endif
1208 goto retry1;
1211 tmp_buf = map_read(map, ofs + chipstart);
1213 spin_unlock(cfi->chips[chipnum].mutex);
1215 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1217 ret = do_write_oneword(map, &cfi->chips[chipnum],
1218 ofs, tmp_buf);
1219 if (ret)
1220 return ret;
1222 (*retlen) += len;
1225 return 0;
1229 /*
1230 * FIXME: interleaved mode not tested, and probably not supported!
1231 */
1232 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1233 unsigned long adr, const u_char *buf,
1234 int len)
1236 struct cfi_private *cfi = map->fldrv_priv;
1237 unsigned long timeo = jiffies + HZ;
1238 /* see comments in do_write_oneword() regarding uWriteTimeo. */
1239 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1240 int ret = -EIO;
1241 unsigned long cmd_adr;
1242 int z, words;
1243 map_word datum;
1245 adr += chip->start;
1246 cmd_adr = adr;
1248 spin_lock(chip->mutex);
1249 ret = get_chip(map, chip, adr, FL_WRITING);
1250 if (ret) {
1251 spin_unlock(chip->mutex);
1252 return ret;
1255 datum = map_word_load(map, buf);
1257 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1258 __func__, adr, datum.x[0] );
1260 XIP_INVAL_CACHED_RANGE(map, adr, len);
1261 ENABLE_VPP(map);
1262 xip_disable(map, chip, cmd_adr);
1264 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1265 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1266 //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1268 /* Write Buffer Load */
1269 map_write(map, CMD(0x25), cmd_adr);
1271 chip->state = FL_WRITING_TO_BUFFER;
1273 /* Write length of data to come */
1274 words = len / map_bankwidth(map);
1275 map_write(map, CMD(words - 1), cmd_adr);
1276 /* Write data */
1277 z = 0;
1278 while(z < words * map_bankwidth(map)) {
1279 datum = map_word_load(map, buf);
1280 map_write(map, datum, adr + z);
1282 z += map_bankwidth(map);
1283 buf += map_bankwidth(map);
1285 z -= map_bankwidth(map);
1287 adr += z;
1289 /* Write Buffer Program Confirm: GO GO GO */
1290 map_write(map, CMD(0x29), cmd_adr);
1291 chip->state = FL_WRITING;
1293 INVALIDATE_CACHE_UDELAY(map, chip,
1294 adr, map_bankwidth(map),
1295 chip->word_write_time);
1297 timeo = jiffies + uWriteTimeout;
1299 for (;;) {
1300 if (chip->state != FL_WRITING) {
1301 /* Someone's suspended the write. Sleep */
1302 DECLARE_WAITQUEUE(wait, current);
1304 set_current_state(TASK_UNINTERRUPTIBLE);
1305 add_wait_queue(&chip->wq, &wait);
1306 spin_unlock(chip->mutex);
1307 schedule();
1308 remove_wait_queue(&chip->wq, &wait);
1309 timeo = jiffies + (HZ / 2); /* FIXME */
1310 spin_lock(chip->mutex);
1311 continue;
1314 if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1315 break;
1317 if (chip_ready(map, adr)) {
1318 xip_enable(map, chip, adr);
1319 goto op_done;
1322 /* Latency issues. Drop the lock, wait a while and retry */
1323 UDELAY(map, chip, adr, 1);
1326 /* reset on all failures. */
1327 map_write( map, CMD(0xF0), chip->start );
1328 xip_enable(map, chip, adr);
1329 /* FIXME - should have reset delay before continuing */
1331 printk(KERN_WARNING "MTD %s(): software timeout\n",
1332 __func__ );
1334 ret = -EIO;
1335 op_done:
1336 chip->state = FL_READY;
1337 put_chip(map, chip, adr);
1338 spin_unlock(chip->mutex);
1340 return ret;
1344 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1345 size_t *retlen, const u_char *buf)
1347 struct map_info *map = mtd->priv;
1348 struct cfi_private *cfi = map->fldrv_priv;
1349 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1350 int ret = 0;
1351 int chipnum;
1352 unsigned long ofs;
1354 *retlen = 0;
1355 if (!len)
1356 return 0;
1358 chipnum = to >> cfi->chipshift;
1359 ofs = to - (chipnum << cfi->chipshift);
1361 /* If it's not bus-aligned, do the first word write */
1362 if (ofs & (map_bankwidth(map)-1)) {
1363 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1364 if (local_len > len)
1365 local_len = len;
1366 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1367 local_len, retlen, buf);
1368 if (ret)
1369 return ret;
1370 ofs += local_len;
1371 buf += local_len;
1372 len -= local_len;
1374 if (ofs >> cfi->chipshift) {
1375 chipnum ++;
1376 ofs = 0;
1377 if (chipnum == cfi->numchips)
1378 return 0;
1382 /* Write buffer is worth it only if more than one word to write... */
1383 while (len >= map_bankwidth(map) * 2) {
1384 /* We must not cross write block boundaries */
1385 int size = wbufsize - (ofs & (wbufsize-1));
1387 if (size > len)
1388 size = len;
1389 if (size % map_bankwidth(map))
1390 size -= size % map_bankwidth(map);
1392 ret = do_write_buffer(map, &cfi->chips[chipnum],
1393 ofs, buf, size);
1394 if (ret)
1395 return ret;
1397 ofs += size;
1398 buf += size;
1399 (*retlen) += size;
1400 len -= size;
1402 if (ofs >> cfi->chipshift) {
1403 chipnum ++;
1404 ofs = 0;
1405 if (chipnum == cfi->numchips)
1406 return 0;
1410 if (len) {
1411 size_t retlen_dregs = 0;
1413 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1414 len, &retlen_dregs, buf);
1416 *retlen += retlen_dregs;
1417 return ret;
1420 return 0;
1424 /*
1425 * Handle devices with one erase region, that only implement
1426 * the chip erase command.
1427 */
1428 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1430 struct cfi_private *cfi = map->fldrv_priv;
1431 unsigned long timeo = jiffies + HZ;
1432 unsigned long int adr;
1433 DECLARE_WAITQUEUE(wait, current);
1434 int ret = 0;
1436 adr = cfi->addr_unlock1;
1438 spin_lock(chip->mutex);
1439 ret = get_chip(map, chip, adr, FL_WRITING);
1440 if (ret) {
1441 spin_unlock(chip->mutex);
1442 return ret;
1445 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1446 __func__, chip->start );
1448 XIP_INVAL_CACHED_RANGE(map, adr, map->size);
1449 ENABLE_VPP(map);
1450 xip_disable(map, chip, adr);
1452 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1453 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1454 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1455 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1456 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1457 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1459 chip->state = FL_ERASING;
1460 chip->erase_suspended = 0;
1461 chip->in_progress_block_addr = adr;
1463 INVALIDATE_CACHE_UDELAY(map, chip,
1464 adr, map->size,
1465 chip->erase_time*500);
1467 timeo = jiffies + (HZ*20);
1469 for (;;) {
1470 if (chip->state != FL_ERASING) {
1471 /* Someone's suspended the erase. Sleep */
1472 set_current_state(TASK_UNINTERRUPTIBLE);
1473 add_wait_queue(&chip->wq, &wait);
1474 spin_unlock(chip->mutex);
1475 schedule();
1476 remove_wait_queue(&chip->wq, &wait);
1477 spin_lock(chip->mutex);
1478 continue;
1480 if (chip->erase_suspended) {
1481 /* This erase was suspended and resumed.
1482 Adjust the timeout */
1483 timeo = jiffies + (HZ*20); /* FIXME */
1484 chip->erase_suspended = 0;
1487 if (chip_ready(map, adr))
1488 break;
1490 if (time_after(jiffies, timeo)) {
1491 printk(KERN_WARNING "MTD %s(): software timeout\n",
1492 __func__ );
1493 break;
1496 /* Latency issues. Drop the lock, wait a while and retry */
1497 UDELAY(map, chip, adr, 1000000/HZ);
1499 /* Did we succeed? */
1500 if (!chip_good(map, adr, map_word_ff(map))) {
1501 /* reset on all failures. */
1502 map_write( map, CMD(0xF0), chip->start );
1503 /* FIXME - should have reset delay before continuing */
1505 ret = -EIO;
1508 chip->state = FL_READY;
1509 xip_enable(map, chip, adr);
1510 put_chip(map, chip, adr);
1511 spin_unlock(chip->mutex);
1513 return ret;
1517 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1519 struct cfi_private *cfi = map->fldrv_priv;
1520 unsigned long timeo = jiffies + HZ;
1521 DECLARE_WAITQUEUE(wait, current);
1522 int ret = 0;
1524 adr += chip->start;
1526 spin_lock(chip->mutex);
1527 ret = get_chip(map, chip, adr, FL_ERASING);
1528 if (ret) {
1529 spin_unlock(chip->mutex);
1530 return ret;
1533 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1534 __func__, adr );
1536 XIP_INVAL_CACHED_RANGE(map, adr, len);
1537 ENABLE_VPP(map);
1538 xip_disable(map, chip, adr);
1540 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1541 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1542 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1543 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1544 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1545 map_write(map, CMD(0x30), adr);
1547 chip->state = FL_ERASING;
1548 chip->erase_suspended = 0;
1549 chip->in_progress_block_addr = adr;
1551 INVALIDATE_CACHE_UDELAY(map, chip,
1552 adr, len,
1553 chip->erase_time*500);
1555 timeo = jiffies + (HZ*20);
1557 for (;;) {
1558 if (chip->state != FL_ERASING) {
1559 /* Someone's suspended the erase. Sleep */
1560 set_current_state(TASK_UNINTERRUPTIBLE);
1561 add_wait_queue(&chip->wq, &wait);
1562 spin_unlock(chip->mutex);
1563 schedule();
1564 remove_wait_queue(&chip->wq, &wait);
1565 spin_lock(chip->mutex);
1566 continue;
1568 if (chip->erase_suspended) {
1569 /* This erase was suspended and resumed.
1570 Adjust the timeout */
1571 timeo = jiffies + (HZ*20); /* FIXME */
1572 chip->erase_suspended = 0;
1575 if (chip_ready(map, adr)) {
1576 xip_enable(map, chip, adr);
1577 break;
1580 if (time_after(jiffies, timeo)) {
1581 xip_enable(map, chip, adr);
1582 printk(KERN_WARNING "MTD %s(): software timeout\n",
1583 __func__ );
1584 break;
1587 /* Latency issues. Drop the lock, wait a while and retry */
1588 UDELAY(map, chip, adr, 1000000/HZ);
1590 /* Did we succeed? */
1591 if (!chip_good(map, adr, map_word_ff(map))) {
1592 /* reset on all failures. */
1593 map_write( map, CMD(0xF0), chip->start );
1594 /* FIXME - should have reset delay before continuing */
1596 ret = -EIO;
1599 chip->state = FL_READY;
1600 put_chip(map, chip, adr);
1601 spin_unlock(chip->mutex);
1602 return ret;
1606 int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1608 unsigned long ofs, len;
1609 int ret;
1611 ofs = instr->addr;
1612 len = instr->len;
1614 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1615 if (ret)
1616 return ret;
1618 instr->state = MTD_ERASE_DONE;
1619 mtd_erase_callback(instr);
1621 return 0;
1625 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1627 struct map_info *map = mtd->priv;
1628 struct cfi_private *cfi = map->fldrv_priv;
1629 int ret = 0;
1631 if (instr->addr != 0)
1632 return -EINVAL;
1634 if (instr->len != mtd->size)
1635 return -EINVAL;
1637 ret = do_erase_chip(map, &cfi->chips[0]);
1638 if (ret)
1639 return ret;
1641 instr->state = MTD_ERASE_DONE;
1642 mtd_erase_callback(instr);
1644 return 0;
1647 static int do_atmel_lock(struct map_info *map, struct flchip *chip,
1648 unsigned long adr, int len, void *thunk)
1650 struct cfi_private *cfi = map->fldrv_priv;
1651 int ret;
1653 spin_lock(chip->mutex);
1654 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
1655 if (ret)
1656 goto out_unlock;
1657 chip->state = FL_LOCKING;
1659 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1660 __func__, adr, len);
1662 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1663 cfi->device_type, NULL);
1664 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1665 cfi->device_type, NULL);
1666 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
1667 cfi->device_type, NULL);
1668 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1669 cfi->device_type, NULL);
1670 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1671 cfi->device_type, NULL);
1672 map_write(map, CMD(0x40), chip->start + adr);
1674 chip->state = FL_READY;
1675 put_chip(map, chip, adr + chip->start);
1676 ret = 0;
1678 out_unlock:
1679 spin_unlock(chip->mutex);
1680 return ret;
1683 static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
1684 unsigned long adr, int len, void *thunk)
1686 struct cfi_private *cfi = map->fldrv_priv;
1687 int ret;
1689 spin_lock(chip->mutex);
1690 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
1691 if (ret)
1692 goto out_unlock;
1693 chip->state = FL_UNLOCKING;
1695 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1696 __func__, adr, len);
1698 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1699 cfi->device_type, NULL);
1700 map_write(map, CMD(0x70), adr);
1702 chip->state = FL_READY;
1703 put_chip(map, chip, adr + chip->start);
1704 ret = 0;
1706 out_unlock:
1707 spin_unlock(chip->mutex);
1708 return ret;
1711 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1713 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
1716 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1718 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
1722 static void cfi_amdstd_sync (struct mtd_info *mtd)
1724 struct map_info *map = mtd->priv;
1725 struct cfi_private *cfi = map->fldrv_priv;
1726 int i;
1727 struct flchip *chip;
1728 int ret = 0;
1729 DECLARE_WAITQUEUE(wait, current);
1731 for (i=0; !ret && i<cfi->numchips; i++) {
1732 chip = &cfi->chips[i];
1734 retry:
1735 spin_lock(chip->mutex);
1737 switch(chip->state) {
1738 case FL_READY:
1739 case FL_STATUS:
1740 case FL_CFI_QUERY:
1741 case FL_JEDEC_QUERY:
1742 chip->oldstate = chip->state;
1743 chip->state = FL_SYNCING;
1744 /* No need to wake_up() on this state change -
1745 * as the whole point is that nobody can do anything
1746 * with the chip now anyway.
1747 */
1748 case FL_SYNCING:
1749 spin_unlock(chip->mutex);
1750 break;
1752 default:
1753 /* Not an idle state */
1754 add_wait_queue(&chip->wq, &wait);
1756 spin_unlock(chip->mutex);
1758 schedule();
1760 remove_wait_queue(&chip->wq, &wait);
1762 goto retry;
1766 /* Unlock the chips again */
1768 for (i--; i >=0; i--) {
1769 chip = &cfi->chips[i];
1771 spin_lock(chip->mutex);
1773 if (chip->state == FL_SYNCING) {
1774 chip->state = chip->oldstate;
1775 wake_up(&chip->wq);
1777 spin_unlock(chip->mutex);
1782 static int cfi_amdstd_suspend(struct mtd_info *mtd)
1784 struct map_info *map = mtd->priv;
1785 struct cfi_private *cfi = map->fldrv_priv;
1786 int i;
1787 struct flchip *chip;
1788 int ret = 0;
1790 for (i=0; !ret && i<cfi->numchips; i++) {
1791 chip = &cfi->chips[i];
1793 spin_lock(chip->mutex);
1795 switch(chip->state) {
1796 case FL_READY:
1797 case FL_STATUS:
1798 case FL_CFI_QUERY:
1799 case FL_JEDEC_QUERY:
1800 chip->oldstate = chip->state;
1801 chip->state = FL_PM_SUSPENDED;
1802 /* No need to wake_up() on this state change -
1803 * as the whole point is that nobody can do anything
1804 * with the chip now anyway.
1805 */
1806 case FL_PM_SUSPENDED:
1807 break;
1809 default:
1810 ret = -EAGAIN;
1811 break;
1813 spin_unlock(chip->mutex);
1816 /* Unlock the chips again */
1818 if (ret) {
1819 for (i--; i >=0; i--) {
1820 chip = &cfi->chips[i];
1822 spin_lock(chip->mutex);
1824 if (chip->state == FL_PM_SUSPENDED) {
1825 chip->state = chip->oldstate;
1826 wake_up(&chip->wq);
1828 spin_unlock(chip->mutex);
1832 return ret;
1836 static void cfi_amdstd_resume(struct mtd_info *mtd)
1838 struct map_info *map = mtd->priv;
1839 struct cfi_private *cfi = map->fldrv_priv;
1840 int i;
1841 struct flchip *chip;
1843 for (i=0; i<cfi->numchips; i++) {
1845 chip = &cfi->chips[i];
1847 spin_lock(chip->mutex);
1849 if (chip->state == FL_PM_SUSPENDED) {
1850 chip->state = FL_READY;
1851 map_write(map, CMD(0xF0), chip->start);
1852 wake_up(&chip->wq);
1854 else
1855 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
1857 spin_unlock(chip->mutex);
1861 static void cfi_amdstd_destroy(struct mtd_info *mtd)
1863 struct map_info *map = mtd->priv;
1864 struct cfi_private *cfi = map->fldrv_priv;
1866 kfree(cfi->cmdset_priv);
1867 kfree(cfi->cfiq);
1868 kfree(cfi);
1869 kfree(mtd->eraseregions);
1872 MODULE_LICENSE("GPL");
1873 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
1874 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");