2 * Common Flash Interface support:
3 * SST Standard Vendor Command Set (ID 0x0701)
5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
7 * 2_by_8 routines added by Simon Munton
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/sched.h>
20 #include <asm/byteorder.h>
22 #include <linux/errno.h>
23 #include <linux/slab.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/mtd/map.h>
27 #include <linux/mtd/cfi.h>
29 static int cfi_sststd_read (struct mtd_info
*, loff_t
, size_t, size_t *, u_char
*);
30 static int cfi_sststd_write(struct mtd_info
*, loff_t
, size_t, size_t *, const u_char
*);
31 static int cfi_sststd_erase_onesize(struct mtd_info
*, struct erase_info
*);
32 static int cfi_sststd_erase_varsize(struct mtd_info
*, struct erase_info
*);
33 static void cfi_sststd_sync (struct mtd_info
*);
34 static int cfi_sststd_suspend (struct mtd_info
*);
35 static void cfi_sststd_resume (struct mtd_info
*);
37 static void cfi_sststd_destroy(struct mtd_info
*);
39 struct mtd_info
*cfi_cmdset_0701(struct map_info
*, int);
40 static struct mtd_info
*cfi_sststd_setup (struct map_info
*);
43 static struct mtd_chip_driver cfi_sststd_chipdrv
= {
44 probe
: NULL
, /* Not usable directly */
45 destroy
: cfi_sststd_destroy
,
46 name
: "cfi_cmdset_0701",
50 struct mtd_info
*cfi_cmdset_0701(struct map_info
*map
, int primary
)
52 struct cfi_private
*cfi
= map
->fldrv_priv
;
53 int ofs_factor
= cfi
->interleave
* cfi
->device_type
;
56 __u32 base
= cfi
->chips
[0].start
;
58 if (cfi
->cfi_mode
==1){
59 __u16 adr
= primary
?cfi
->cfiq
->P_ADR
:cfi
->cfiq
->A_ADR
;
61 cfi_send_gen_cmd(0xAA, 0x5555, base
, map
, cfi
, cfi
->device_type
, NULL
);
62 cfi_send_gen_cmd(0x55, 0x2AAA, base
, map
, cfi
, cfi
->device_type
, NULL
);
63 cfi_send_gen_cmd(0x98, 0x5555, base
, map
, cfi
, cfi
->device_type
, NULL
);
65 major
= cfi_read_query(map
, base
+ (adr
+3)*ofs_factor
);
66 minor
= cfi_read_query(map
, base
+ (adr
+4)*ofs_factor
);
68 printk(" SST Query Table v%c.%c at 0x%4.4X\n",
70 cfi_send_gen_cmd(0xf0, 0x5555, base
, map
, cfi
, cfi
->device_type
, NULL
);
72 cfi_send_gen_cmd(0xAA, 0x5555, base
, map
, cfi
, cfi
->device_type
, NULL
);
73 cfi_send_gen_cmd(0x55, 0x2AAA, base
, map
, cfi
, cfi
->device_type
, NULL
);
74 cfi_send_gen_cmd(0x90, 0x5555, base
, map
, cfi
, cfi
->device_type
, NULL
);
75 cfi
->mfr
= cfi_read_query(map
, base
);
76 cfi
->id
= cfi_read_query(map
, base
+ ofs_factor
);
78 cfi_send_gen_cmd(0xAA, 0x5555, base
, map
, cfi
, cfi
->device_type
, NULL
);
79 cfi_send_gen_cmd(0x55, 0x2AAA, base
, map
, cfi
, cfi
->device_type
, NULL
);
80 cfi_send_gen_cmd(0x98, 0x5555, base
, map
, cfi
, cfi
->device_type
, NULL
);
82 switch (cfi
->device_type
) {
83 case CFI_DEVICETYPE_X16
:
84 cfi
->addr_unlock1
= 0x5555;
85 cfi
->addr_unlock2
= 0x2AAA;
88 printk(KERN_NOTICE
"Eep. Unknown cfi_cmdset_0701 device type %d\n", cfi
->device_type
);
93 for (i
=0; i
< cfi
->numchips
; i
++) {
94 cfi
->chips
[i
].word_write_time
= 1<<cfi
->cfiq
->WordWriteTimeoutTyp
;
95 cfi
->chips
[i
].buffer_write_time
= 1<<cfi
->cfiq
->BufWriteTimeoutTyp
;
96 cfi
->chips
[i
].erase_time
= 1<<cfi
->cfiq
->BlockEraseTimeoutTyp
;
99 map
->fldrv
= &cfi_sststd_chipdrv
;
102 cfi_send_gen_cmd(0xf0, 0x5555, base
, map
, cfi
, cfi
->device_type
, NULL
);
103 return cfi_sststd_setup(map
);
106 static struct mtd_info
*cfi_sststd_setup(struct map_info
*map
)
108 struct cfi_private
*cfi
= map
->fldrv_priv
;
109 struct mtd_info
*mtd
;
110 unsigned long devsize
= (1<<cfi
->cfiq
->DevSize
) * cfi
->interleave
;
112 mtd
= kmalloc(sizeof(*mtd
), GFP_KERNEL
);
113 printk("number of %s chips: %d\n", (cfi
->cfi_mode
)?"JEDEC":"CFI",cfi
->numchips
);
116 printk("Failed to allocate memory for MTD device\n");
117 kfree(cfi
->cmdset_priv
);
121 memset(mtd
, 0, sizeof(*mtd
));
123 mtd
->type
= MTD_NORFLASH
;
124 /* Also select the correct geometry setup too */
125 mtd
->size
= devsize
* cfi
->numchips
;
127 if (cfi
->cfiq
->NumEraseRegions
== 1) {
128 /* No need to muck about with multiple erase sizes */
129 mtd
->erasesize
= ((cfi
->cfiq
->EraseRegionInfo
[0] >> 8) & ~0xff) * cfi
->interleave
;
131 unsigned long offset
= 0;
134 mtd
->numeraseregions
= cfi
->cfiq
->NumEraseRegions
* cfi
->numchips
;
135 mtd
->eraseregions
= kmalloc(sizeof(struct mtd_erase_region_info
) * mtd
->numeraseregions
, GFP_KERNEL
);
136 if (!mtd
->eraseregions
) {
137 printk("Failed to allocate memory for MTD erase region info\n");
138 kfree(cfi
->cmdset_priv
);
142 for (i
=0; i
<cfi
->cfiq
->NumEraseRegions
; i
++) {
143 unsigned long ernum
, ersize
;
144 ersize
= ((cfi
->cfiq
->EraseRegionInfo
[i
] >> 8) & ~0xff) * cfi
->interleave
;
145 ernum
= (cfi
->cfiq
->EraseRegionInfo
[i
] & 0xffff) + 1;
147 if (mtd
->erasesize
< ersize
) {
148 mtd
->erasesize
= ersize
;
150 for (j
=0; j
<cfi
->numchips
; j
++) {
151 mtd
->eraseregions
[(j
*cfi
->cfiq
->NumEraseRegions
)+i
].offset
= (j
*devsize
)+offset
;
152 mtd
->eraseregions
[(j
*cfi
->cfiq
->NumEraseRegions
)+i
].erasesize
= ersize
;
153 mtd
->eraseregions
[(j
*cfi
->cfiq
->NumEraseRegions
)+i
].numblocks
= ernum
;
155 offset
+= (ersize
* ernum
);
159 for (i
=0; i
<mtd
->numeraseregions
;i
++){
160 printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
161 i
,mtd
->eraseregions
[i
].offset
,
162 mtd
->eraseregions
[i
].erasesize
,
163 mtd
->eraseregions
[i
].numblocks
);
167 switch (CFIDEV_BUSWIDTH
)
172 if (mtd
->numeraseregions
> 1)
173 mtd
->erase
= cfi_sststd_erase_varsize
;
175 mtd
->erase
= cfi_sststd_erase_onesize
;
176 mtd
->read
= cfi_sststd_read
;
177 mtd
->write
= cfi_sststd_write
;
181 printk("Unsupported buswidth\n");
183 kfree(cfi
->cmdset_priv
);
187 mtd
->sync
= cfi_sststd_sync
;
188 mtd
->suspend
= cfi_sststd_suspend
;
189 mtd
->resume
= cfi_sststd_resume
;
190 mtd
->flags
= MTD_CAP_NORFLASH
;
191 map
->fldrv
= &cfi_sststd_chipdrv
;
192 mtd
->name
= map
->name
;
197 static inline int do_read_onechip(struct map_info
*map
, struct flchip
*chip
, loff_t adr
, size_t len
, u_char
*buf
)
199 DECLARE_WAITQUEUE(wait
, current
);
200 unsigned long timeo
= jiffies
+ HZ
;
203 cfi_spin_lock(chip
->mutex
);
205 if (chip
->state
!= FL_READY
){
206 printk("Waiting for chip to read, status = %d\n", chip
->state
);
207 set_current_state(TASK_UNINTERRUPTIBLE
);
208 add_wait_queue(&chip
->wq
, &wait
);
210 cfi_spin_unlock(chip
->mutex
);
213 remove_wait_queue(&chip
->wq
, &wait
);
214 timeo
= jiffies
+ HZ
;
221 chip
->state
= FL_READY
;
223 map
->copy_from(map
, buf
, adr
, len
);
226 cfi_spin_unlock(chip
->mutex
);
231 static int cfi_sststd_read (struct mtd_info
*mtd
, loff_t from
, size_t len
, size_t *retlen
, u_char
*buf
)
233 struct map_info
*map
= mtd
->priv
;
234 struct cfi_private
*cfi
= map
->fldrv_priv
;
239 /* ofs: offset within the first chip that the first read should start */
241 chipnum
= (from
>> cfi
->chipshift
);
242 ofs
= from
- (chipnum
<< cfi
->chipshift
);
248 unsigned long thislen
;
250 if (chipnum
>= cfi
->numchips
)
253 if ((len
+ ofs
-1) >> cfi
->chipshift
)
254 thislen
= (1<<cfi
->chipshift
) - ofs
;
258 ret
= do_read_onechip(map
, &cfi
->chips
[chipnum
], ofs
, thislen
, buf
);
272 static int do_write_oneword(struct map_info
*map
, struct flchip
*chip
, unsigned long adr
, __u32 datum
, int fast
)
274 unsigned long timeo
= jiffies
+ HZ
;
275 unsigned int Last
[4];
276 unsigned long Count
= 0;
277 struct cfi_private
*cfi
= map
->fldrv_priv
;
278 DECLARE_WAITQUEUE(wait
, current
);
282 cfi_spin_lock(chip
->mutex
);
284 if (chip
->state
!= FL_READY
){
285 printk("Waiting for chip to write, status = %d\n", chip
->state
);
286 set_current_state(TASK_UNINTERRUPTIBLE
);
287 add_wait_queue(&chip
->wq
, &wait
);
289 cfi_spin_unlock(chip
->mutex
);
292 remove_wait_queue(&chip
->wq
, &wait
);
293 printk("Wake up to write:\n");
294 timeo
= jiffies
+ HZ
;
299 chip
->state
= FL_WRITING
;
303 cfi_send_gen_cmd(0xAA, cfi
->addr_unlock1
, chip
->start
, map
, cfi
, CFI_DEVICETYPE_X16
, NULL
);
304 cfi_send_gen_cmd(0x55, cfi
->addr_unlock2
, chip
->start
, map
, cfi
, CFI_DEVICETYPE_X16
, NULL
);
305 cfi_send_gen_cmd(0xA0, cfi
->addr_unlock1
, chip
->start
, map
, cfi
, CFI_DEVICETYPE_X16
, NULL
);
307 cfi_write(map
, datum
, adr
);
309 cfi_spin_unlock(chip
->mutex
);
310 cfi_udelay(chip
->word_write_time
);
311 cfi_spin_lock(chip
->mutex
);
313 Last
[0] = cfi_read(map
, adr
);
314 // printk("Last[0] is %x\n", Last[0]);
315 Last
[1] = cfi_read(map
, adr
);
316 // printk("Last[1] is %x\n", Last[1]);
317 Last
[2] = cfi_read(map
, adr
);
318 // printk("Last[2] is %x\n", Last[2]);
320 for (Count
= 3; Last
[(Count
- 1) % 4] != Last
[(Count
- 2) % 4] && Count
< 10000; Count
++){
321 cfi_spin_unlock(chip
->mutex
);
323 cfi_spin_lock(chip
->mutex
);
325 Last
[Count
% 4] = cfi_read(map
, adr
);
326 // printk("Last[%d%%4] is %x\n", Count, Last[Count%4]);
329 if (Last
[(Count
- 1) % 4] != datum
){
330 printk("Last[%ld] is %x, datum is %x\n",(Count
- 1) % 4,Last
[(Count
- 1) % 4],datum
);
331 cfi_send_gen_cmd(0xF0, 0, chip
->start
, map
, cfi
, cfi
->device_type
, NULL
);
336 chip
->state
= FL_READY
;
338 cfi_spin_unlock(chip
->mutex
);
343 static int cfi_sststd_write (struct mtd_info
*mtd
, loff_t to
, size_t len
, size_t *retlen
, const u_char
*buf
)
345 struct map_info
*map
= mtd
->priv
;
346 struct cfi_private
*cfi
= map
->fldrv_priv
;
349 unsigned long ofs
, chipstart
;
355 chipnum
= to
>> cfi
->chipshift
;
356 ofs
= to
- (chipnum
<< cfi
->chipshift
);
357 chipstart
= cfi
->chips
[chipnum
].start
;
359 /* If it's not bus-aligned, do the first byte write */
360 if (ofs
& (CFIDEV_BUSWIDTH
-1)) {
361 unsigned long bus_ofs
= ofs
& ~(CFIDEV_BUSWIDTH
-1);
362 int i
= ofs
- bus_ofs
;
367 map
->copy_from(map
, tmp_buf
, bus_ofs
+ cfi
->chips
[chipnum
].start
, CFIDEV_BUSWIDTH
);
368 while (len
&& i
< CFIDEV_BUSWIDTH
)
369 tmp_buf
[i
++] = buf
[n
++], len
--;
371 if (cfi_buswidth_is_2()) {
372 datum
= *(__u16
*)tmp_buf
;
373 } else if (cfi_buswidth_is_4()) {
374 datum
= *(__u32
*)tmp_buf
;
376 return -EINVAL
; /* should never happen, but be safe */
379 ret
= do_write_oneword(map
, &cfi
->chips
[chipnum
],
388 if (ofs
>> cfi
->chipshift
) {
391 if (chipnum
== cfi
->numchips
)
396 /* We are now aligned, write as much as possible */
397 while(len
>= CFIDEV_BUSWIDTH
) {
400 if (cfi_buswidth_is_1()) {
402 } else if (cfi_buswidth_is_2()) {
403 datum
= *(__u16
*)buf
;
404 } else if (cfi_buswidth_is_4()) {
405 datum
= *(__u32
*)buf
;
409 ret
= do_write_oneword(map
, &cfi
->chips
[chipnum
],
410 ofs
, datum
, cfi
->fast_prog
);
415 ofs
+= CFIDEV_BUSWIDTH
;
416 buf
+= CFIDEV_BUSWIDTH
;
417 (*retlen
) += CFIDEV_BUSWIDTH
;
418 len
-= CFIDEV_BUSWIDTH
;
420 if (ofs
>> cfi
->chipshift
) {
423 if (chipnum
== cfi
->numchips
)
425 chipstart
= cfi
->chips
[chipnum
].start
;
429 if (len
& (CFIDEV_BUSWIDTH
-1)) {
434 map
->copy_from(map
, tmp_buf
, ofs
+ cfi
->chips
[chipnum
].start
, CFIDEV_BUSWIDTH
);
436 tmp_buf
[i
++] = buf
[n
++];
438 if (cfi_buswidth_is_2()) {
439 datum
= *(__u16
*)tmp_buf
;
440 } else if (cfi_buswidth_is_4()) {
441 datum
= *(__u32
*)tmp_buf
;
443 return -EINVAL
; /* should never happen, but be safe */
446 ret
= do_write_oneword(map
, &cfi
->chips
[chipnum
],
457 static inline int do_erase_oneblock(struct map_info
*map
, struct flchip
*chip
, unsigned long adr
)
460 unsigned long timeo
= jiffies
+ HZ
;
461 struct cfi_private
*cfi
= map
->fldrv_priv
;
462 unsigned int rdy_mask
;
463 DECLARE_WAITQUEUE(wait
, current
);
466 cfi_spin_lock(chip
->mutex
);
468 if (chip
->state
!= FL_READY
){
469 set_current_state(TASK_UNINTERRUPTIBLE
);
470 add_wait_queue(&chip
->wq
, &wait
);
472 cfi_spin_unlock(chip
->mutex
);
475 remove_wait_queue(&chip
->wq
, &wait
);
476 timeo
= jiffies
+ HZ
;
481 chip
->state
= FL_ERASING
;
485 cfi_send_gen_cmd(0xAA, cfi
->addr_unlock1
, chip
->start
, map
, cfi
, CFI_DEVICETYPE_X16
, NULL
);
486 cfi_send_gen_cmd(0x55, cfi
->addr_unlock2
, chip
->start
, map
, cfi
, CFI_DEVICETYPE_X16
, NULL
);
487 cfi_send_gen_cmd(0x80, cfi
->addr_unlock1
, chip
->start
, map
, cfi
, CFI_DEVICETYPE_X16
, NULL
);
488 cfi_send_gen_cmd(0xAA, cfi
->addr_unlock1
, chip
->start
, map
, cfi
, CFI_DEVICETYPE_X16
, NULL
);
489 cfi_send_gen_cmd(0x55, cfi
->addr_unlock2
, chip
->start
, map
, cfi
, CFI_DEVICETYPE_X16
, NULL
);
490 cfi_write(map
, CMD(0x30), adr
);
492 timeo
= jiffies
+ (HZ
*20);
494 cfi_spin_unlock(chip
->mutex
);
495 schedule_timeout(HZ
);
496 cfi_spin_lock(chip
->mutex
);
498 rdy_mask
= CMD(0x80);
500 /* Once the state machine's known to be working I'll do that */
502 while ( ( (status
= cfi_read(map
,adr
)) & rdy_mask
) != rdy_mask
) {
505 if (chip
->state
!= FL_ERASING
) {
506 /* Someone's suspended the erase. Sleep */
507 set_current_state(TASK_UNINTERRUPTIBLE
);
508 add_wait_queue(&chip
->wq
, &wait
);
510 cfi_spin_unlock(chip
->mutex
);
511 printk("erase suspended. Sleeping\n");
514 remove_wait_queue(&chip
->wq
, &wait
);
515 timeo
= jiffies
+ (HZ
*2);
516 cfi_spin_lock(chip
->mutex
);
520 /* OK Still waiting */
521 if (time_after(jiffies
, timeo
)) {
522 chip
->state
= FL_READY
;
523 cfi_spin_unlock(chip
->mutex
);
524 printk("waiting for erase to complete timed out.");
529 /* Latency issues. Drop the lock, wait a while and retry */
530 cfi_spin_unlock(chip
->mutex
);
533 if ( 0 && !(z
% 100 ))
534 printk("chip not ready yet after erase. looping\n");
538 cfi_spin_lock(chip
->mutex
);
542 /* Done and happy. */
544 chip
->state
= FL_READY
;
546 cfi_spin_unlock(chip
->mutex
);
550 static int cfi_sststd_erase_varsize(struct mtd_info
*mtd
, struct erase_info
*instr
)
552 struct map_info
*map
= mtd
->priv
;
553 struct cfi_private
*cfi
= map
->fldrv_priv
;
554 unsigned long adr
, len
;
555 int chipnum
, ret
= 0;
557 struct mtd_erase_region_info
*regions
= mtd
->eraseregions
;
559 if (instr
->addr
> mtd
->size
)
562 if ((instr
->len
+ instr
->addr
) > mtd
->size
)
565 /* Check that both start and end of the requested erase are
566 * aligned with the erasesize at the appropriate addresses.
571 /* Skip all erase regions which are ended before the start of
572 the requested erase. Actually, to save on the calculations,
573 we skip to the first erase region which starts after the
574 start of the requested erase, and then go back one.
577 while (i
< mtd
->numeraseregions
&& instr
->addr
>= regions
[i
].offset
)
581 /* OK, now i is pointing at the erase region in which this
582 erase request starts. Check the start of the requested
583 erase range is aligned with the erase size which is in
587 if (instr
->addr
& (regions
[i
].erasesize
-1))
590 /* Remember the erase region we start on */
593 /* Next, check that the end of the requested erase is aligned
594 * with the erase region at that address.
597 while (i
<mtd
->numeraseregions
&& (instr
->addr
+ instr
->len
) >= regions
[i
].offset
)
600 /* As before, drop back one to point at the region in which
601 the address actually falls
605 if ((instr
->addr
+ instr
->len
) & (regions
[i
].erasesize
-1))
608 chipnum
= instr
->addr
>> cfi
->chipshift
;
609 adr
= instr
->addr
- (chipnum
<< cfi
->chipshift
);
615 ret
= do_erase_oneblock(map
, &cfi
->chips
[chipnum
], adr
);
620 adr
+= regions
[i
].erasesize
;
621 len
-= regions
[i
].erasesize
;
623 if (adr
% (1<< cfi
->chipshift
) == ((regions
[i
].offset
+ (regions
[i
].erasesize
* regions
[i
].numblocks
)) %( 1<< cfi
->chipshift
)))
626 if (adr
>> cfi
->chipshift
) {
630 if (chipnum
>= cfi
->numchips
)
635 instr
->state
= MTD_ERASE_DONE
;
637 instr
->callback(instr
);
642 static int cfi_sststd_erase_onesize(struct mtd_info
*mtd
, struct erase_info
*instr
)
644 struct map_info
*map
= mtd
->priv
;
645 struct cfi_private
*cfi
= map
->fldrv_priv
;
646 unsigned long adr
, len
;
647 int chipnum
, ret
= 0;
649 if (instr
->addr
& (mtd
->erasesize
- 1))
652 if (instr
->len
& (mtd
->erasesize
-1))
655 if ((instr
->len
+ instr
->addr
) > mtd
->size
)
658 chipnum
= instr
->addr
>> cfi
->chipshift
;
659 adr
= instr
->addr
- (chipnum
<< cfi
->chipshift
);
663 ret
= do_erase_oneblock(map
, &cfi
->chips
[chipnum
], adr
);
668 adr
+= mtd
->erasesize
;
669 len
-= mtd
->erasesize
;
671 if (adr
>> cfi
->chipshift
) {
675 if (chipnum
>= cfi
->numchips
)
680 instr
->state
= MTD_ERASE_DONE
;
682 instr
->callback(instr
);
687 static void cfi_sststd_sync (struct mtd_info
*mtd
)
689 struct map_info
*map
= mtd
->priv
;
690 struct cfi_private
*cfi
= map
->fldrv_priv
;
694 DECLARE_WAITQUEUE(wait
, current
);
696 for (i
=0; !ret
&& i
<cfi
->numchips
; i
++) {
697 chip
= &cfi
->chips
[i
];
700 cfi_spin_lock(chip
->mutex
);
702 switch(chip
->state
) {
707 chip
->oldstate
= chip
->state
;
708 chip
->state
= FL_SYNCING
;
709 /* No need to wake_up() on this state change -
710 * as the whole point is that nobody can do anything
711 * with the chip now anyway.
714 cfi_spin_unlock(chip
->mutex
);
718 /* Not an idle state */
719 add_wait_queue(&chip
->wq
, &wait
);
721 cfi_spin_unlock(chip
->mutex
);
725 remove_wait_queue(&chip
->wq
, &wait
);
731 /* Unlock the chips again */
733 for (i
--; i
>=0; i
--) {
734 chip
= &cfi
->chips
[i
];
736 cfi_spin_lock(chip
->mutex
);
738 if (chip
->state
== FL_SYNCING
) {
739 chip
->state
= chip
->oldstate
;
742 cfi_spin_unlock(chip
->mutex
);
747 static int cfi_sststd_suspend(struct mtd_info
*mtd
)
749 struct map_info
*map
= mtd
->priv
;
750 struct cfi_private
*cfi
= map
->fldrv_priv
;
754 //printk("suspend\n");
756 for (i
=0; !ret
&& i
<cfi
->numchips
; i
++) {
757 chip
= &cfi
->chips
[i
];
759 cfi_spin_lock(chip
->mutex
);
761 switch(chip
->state
) {
766 chip
->oldstate
= chip
->state
;
767 chip
->state
= FL_PM_SUSPENDED
;
768 /* No need to wake_up() on this state change -
769 * as the whole point is that nobody can do anything
770 * with the chip now anyway.
772 case FL_PM_SUSPENDED
:
779 cfi_spin_unlock(chip
->mutex
);
782 /* Unlock the chips again */
785 for (i
--; i
>=0; i
--) {
786 chip
= &cfi
->chips
[i
];
788 cfi_spin_lock(chip
->mutex
);
790 if (chip
->state
== FL_PM_SUSPENDED
) {
791 chip
->state
= chip
->oldstate
;
794 cfi_spin_unlock(chip
->mutex
);
801 static void cfi_sststd_resume(struct mtd_info
*mtd
)
803 struct map_info
*map
= mtd
->priv
;
804 struct cfi_private
*cfi
= map
->fldrv_priv
;
807 //printk("resume\n");
809 for (i
=0; i
<cfi
->numchips
; i
++) {
811 chip
= &cfi
->chips
[i
];
813 cfi_spin_lock(chip
->mutex
);
815 if (chip
->state
== FL_PM_SUSPENDED
) {
816 chip
->state
= FL_READY
;
817 cfi_write(map
, CMD(0xF0), chip
->start
);
821 printk("Argh. Chip not in PM_SUSPENDED state upon resume()\n");
823 cfi_spin_unlock(chip
->mutex
);
827 static void cfi_sststd_destroy(struct mtd_info
*mtd
)
829 struct map_info
*map
= mtd
->priv
;
830 struct cfi_private
*cfi
= map
->fldrv_priv
;
831 kfree(cfi
->cmdset_priv
);
835 #if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
836 #define cfi_sststd_init init_module
837 #define cfi_sststd_exit cleanup_module
840 static char im_name
[]="cfi_cmdset_0701";
842 mod_init_t
cfi_sststd_init(void)
844 inter_module_register(im_name
, THIS_MODULE
, &cfi_cmdset_0701
);
848 mod_exit_t
cfi_sststd_exit(void)
850 inter_module_unregister(im_name
);
853 module_init(cfi_sststd_init
);
854 module_exit(cfi_sststd_exit
);