2 * NVRAM variable manipulation (Linux kernel half)
4 * Copyright 2006, Broadcom Corporation
7 * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
8 * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
9 * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
10 * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
12 * $Id: nvram_linux.c,v 1.19 2006/04/08 07:12:42 honor Exp $
15 #include <linux/config.h>
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/string.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22 #include <linux/slab.h>
23 #include <linux/bootmem.h>
24 #include <linux/wrapper.h>
26 #include <linux/miscdevice.h>
27 #include <linux/mtd/mtd.h>
28 #include <asm/addrspace.h>
30 #include <asm/uaccess.h>
34 #include <bcmendian.h>
42 /* In BSS to minimize text size and page aligned so it can be mmap()-ed */
43 static char nvram_buf
[NVRAM_SPACE
] __attribute__((aligned(PAGE_SIZE
)));
47 #define early_nvram_get(name) nvram_get(name)
51 /* Global SB handle */
52 extern void *bcm947xx_sbh
;
53 extern spinlock_t bcm947xx_sbh_lock
;
56 extern char *cfe_env_get(char *nv_buf
, const char *name
);
59 #define sbh bcm947xx_sbh
60 #define sbh_lock bcm947xx_sbh_lock
62 #define MB * 1024 * 1024
64 /* Probe for NVRAM header */
66 early_nvram_init(void)
68 struct nvram_header
*header
;
70 struct sflash
*info
= NULL
;
72 uint32 base
, off
, lim
;
75 if ((cc
= sb_setcore(sbh
, SB_CC
, 0)) != NULL
) {
76 base
= KSEG1ADDR(SB_FLASH2
);
77 switch (readl(&cc
->capabilities
) & CC_CAP_FLASH_MASK
) {
84 if ((info
= sflash_init(sbh
,cc
)) == NULL
)
94 /* extif assumed, Stop at 4 MB */
95 base
= KSEG1ADDR(SB_FLASH1
);
99 /* XXX: hack for supporting the CFE environment stuff on WGT634U */
100 src
= (u32
*) KSEG1ADDR(base
+ 8 * 1024 * 1024 - 0x2000);
101 dst
= (u32
*) nvram_buf
;
102 if ((lim
== 0x02000000) && ((*src
& 0xff00ff) == 0x000001)) {
103 printk("early_nvram_init: WGT634U NVRAM found.\n");
105 for (i
= 0; i
< 0x1ff0; i
++) {
106 if (*src
== 0xFFFFFFFF)
116 /* Windowed flash access */
117 header
= (struct nvram_header
*) KSEG1ADDR(base
+ off
- NVRAM_SPACE
);
118 if (header
->magic
== NVRAM_MAGIC
)
123 /* Try embedded NVRAM at 4 KB and 1 KB as last resorts */
124 header
= (struct nvram_header
*) KSEG1ADDR(base
+ 4 KB
);
125 if (header
->magic
== NVRAM_MAGIC
)
128 header
= (struct nvram_header
*) KSEG1ADDR(base
+ 1 KB
);
129 if (header
->magic
== NVRAM_MAGIC
)
132 printk("early_nvram_init: NVRAM not found\n");
136 src
= (u32
*) header
;
137 dst
= (u32
*) nvram_buf
;
138 for (i
= 0; i
< sizeof(struct nvram_header
); i
+= 4)
140 for (; i
< header
->len
&& i
< NVRAM_SPACE
; i
+= 4)
141 *dst
++ = ltoh32(*src
++);
144 /* Early (before mm or mtd) read-only access to NVRAM */
146 early_nvram_get(const char *name
)
148 char *var
, *value
, *end
, *eq
;
161 return cfe_env_get(nvram_buf
, name
);
163 /* Look for name=value and return value */
164 var
= &nvram_buf
[sizeof(struct nvram_header
)];
165 end
= nvram_buf
+ sizeof(nvram_buf
) - 2;
166 end
[0] = end
[1] = '\0';
167 for (; *var
; var
= value
+ strlen(value
) + 1) {
168 if (!(eq
= strchr(var
, '=')))
171 if ((eq
- var
) == strlen(name
) && strncmp(var
, name
, (eq
- var
)) == 0)
179 early_nvram_getall(char *buf
, int count
)
193 /* Write name=value\0 ... \0\0 */
194 var
= &nvram_buf
[sizeof(struct nvram_header
)];
195 end
= nvram_buf
+ sizeof(nvram_buf
) - 2;
196 end
[0] = end
[1] = '\0';
197 for (; *var
; var
+= strlen(var
) + 1) {
198 if ((count
- len
) <= (strlen(var
) + 1))
200 len
+= sprintf(buf
+ len
, "%s", var
) + 1;
207 extern char * _nvram_get(const char *name
);
208 extern int _nvram_set(const char *name
, const char *value
);
209 extern int _nvram_unset(const char *name
);
210 extern int _nvram_getall(char *buf
, int count
);
211 extern int _nvram_commit(struct nvram_header
*header
);
212 extern int _nvram_init(void *sbh
);
213 extern void _nvram_exit(void);
216 static spinlock_t nvram_lock
= SPIN_LOCK_UNLOCKED
;
217 static struct semaphore nvram_sem
;
218 static unsigned long nvram_offset
= 0;
219 static int nvram_major
= -1;
220 static devfs_handle_t nvram_handle
= NULL
;
221 static struct mtd_info
*nvram_mtd
= NULL
;
224 _nvram_read(char *buf
)
226 struct nvram_header
*header
= (struct nvram_header
*) buf
;
230 MTD_READ(nvram_mtd
, nvram_mtd
->size
- NVRAM_SPACE
, NVRAM_SPACE
, &len
, buf
) ||
231 len
!= NVRAM_SPACE
||
232 header
->magic
!= NVRAM_MAGIC
) {
233 /* Maybe we can recover some data from early initialization */
234 memcpy(buf
, nvram_buf
, NVRAM_SPACE
);
241 _nvram_realloc(struct nvram_tuple
*t
, const char *name
, const char *value
)
243 if ((nvram_offset
+ strlen(value
) + 1) > NVRAM_SPACE
)
247 if (!(t
= kmalloc(sizeof(struct nvram_tuple
) + strlen(name
) + 1, GFP_ATOMIC
)))
251 t
->name
= (char *) &t
[1];
252 strcpy(t
->name
, name
);
258 if (!t
->value
|| strcmp(t
->value
, value
)) {
259 t
->value
= &nvram_buf
[nvram_offset
];
260 strcpy(t
->value
, value
);
261 nvram_offset
+= strlen(value
) + 1;
268 _nvram_free(struct nvram_tuple
*t
)
277 nvram_set(const char *name
, const char *value
)
281 struct nvram_header
*header
;
283 spin_lock_irqsave(&nvram_lock
, flags
);
284 if ((ret
= _nvram_set(name
, value
))) {
285 /* Consolidate space and try again */
286 if ((header
= kmalloc(NVRAM_SPACE
, GFP_ATOMIC
))) {
287 if (_nvram_commit(header
) == 0)
288 ret
= _nvram_set(name
, value
);
292 spin_unlock_irqrestore(&nvram_lock
, flags
);
298 real_nvram_get(const char *name
)
303 spin_lock_irqsave(&nvram_lock
, flags
);
304 value
= _nvram_get(name
);
305 spin_unlock_irqrestore(&nvram_lock
, flags
);
311 nvram_get(const char *name
)
313 if (nvram_major
>= 0)
314 return real_nvram_get(name
);
316 return early_nvram_get(name
);
320 nvram_unset(const char *name
)
325 spin_lock_irqsave(&nvram_lock
, flags
);
326 ret
= _nvram_unset(name
);
327 spin_unlock_irqrestore(&nvram_lock
, flags
);
333 erase_callback(struct erase_info
*done
)
335 wait_queue_head_t
*wait_q
= (wait_queue_head_t
*) done
->priv
;
343 size_t erasesize
, len
, magic_len
;
346 struct nvram_header
*header
;
349 DECLARE_WAITQUEUE(wait
, current
);
350 wait_queue_head_t wait_q
;
351 struct erase_info erase
;
352 u_int32_t magic_offset
= 0; /* Offset for writing MAGIC # */
355 printk("nvram_commit: NVRAM not found\n");
359 if (in_interrupt()) {
360 printk("nvram_commit: not committing in interrupt\n");
364 /* Backup sector blocks to be erased */
365 erasesize
= ROUNDUP(NVRAM_SPACE
, nvram_mtd
->erasesize
);
366 if (!(buf
= kmalloc(erasesize
, GFP_KERNEL
))) {
367 printk("nvram_commit: out of memory\n");
373 if ((i
= erasesize
- NVRAM_SPACE
) > 0) {
374 offset
= nvram_mtd
->size
- erasesize
;
376 ret
= MTD_READ(nvram_mtd
, offset
, i
, &len
, buf
);
377 if (ret
|| len
!= i
) {
378 printk("nvram_commit: read error ret = %d, len = %d/%d\n", ret
, len
, i
);
382 header
= (struct nvram_header
*)(buf
+ i
);
383 magic_offset
= i
+ ((void *)&header
->magic
- (void *)header
);
385 offset
= nvram_mtd
->size
- NVRAM_SPACE
;
386 magic_offset
= ((void *)&header
->magic
- (void *)header
);
387 header
= (struct nvram_header
*)buf
;
390 /* clear the existing magic # to mark the NVRAM as unusable
391 we can pull MAGIC bits low without erase */
392 header
->magic
= NVRAM_CLEAR_MAGIC
; /* All zeros magic */
394 /* Unlock sector blocks (for Intel 28F320C3B flash) , 20060309 */
395 if(nvram_mtd
->unlock
)
396 nvram_mtd
->unlock(nvram_mtd
, offset
, nvram_mtd
->erasesize
);
398 ret
= MTD_WRITE(nvram_mtd
, offset
+ magic_offset
, sizeof(header
->magic
),
399 &magic_len
, (char *)&header
->magic
);
400 if (ret
|| magic_len
!= sizeof(header
->magic
)) {
401 printk("nvram_commit: clear MAGIC error\n");
406 header
->magic
= NVRAM_MAGIC
; /* reset MAGIC before we regenerate the NVRAM,
407 otherwise we'll have an incorrect CRC */
408 /* Regenerate NVRAM */
409 spin_lock_irqsave(&nvram_lock
, flags
);
410 ret
= _nvram_commit(header
);
411 spin_unlock_irqrestore(&nvram_lock
, flags
);
415 /* Erase sector blocks */
416 init_waitqueue_head(&wait_q
);
417 for (; offset
< nvram_mtd
->size
- NVRAM_SPACE
+ header
->len
; offset
+= nvram_mtd
->erasesize
) {
418 erase
.mtd
= nvram_mtd
;
420 erase
.len
= nvram_mtd
->erasesize
;
421 erase
.callback
= erase_callback
;
422 erase
.priv
= (u_long
) &wait_q
;
424 set_current_state(TASK_INTERRUPTIBLE
);
425 add_wait_queue(&wait_q
, &wait
);
427 /* Unlock sector blocks */
428 if (nvram_mtd
->unlock
)
429 nvram_mtd
->unlock(nvram_mtd
, offset
, nvram_mtd
->erasesize
);
431 if ((ret
= MTD_ERASE(nvram_mtd
, &erase
))) {
432 set_current_state(TASK_RUNNING
);
433 remove_wait_queue(&wait_q
, &wait
);
434 printk("nvram_commit: erase error\n");
438 /* Wait for erase to finish */
440 remove_wait_queue(&wait_q
, &wait
);
443 /* Write partition up to end of data area */
444 header
->magic
= NVRAM_INVALID_MAGIC
; /* All ones magic */
445 offset
= nvram_mtd
->size
- erasesize
;
446 i
= erasesize
- NVRAM_SPACE
+ header
->len
;
447 ret
= MTD_WRITE(nvram_mtd
, offset
, i
, &len
, buf
);
448 if (ret
|| len
!= i
) {
449 printk("nvram_commit: write error\n");
454 /* Now mark the NVRAM in flash as "valid" by setting the correct
456 header
->magic
= NVRAM_MAGIC
;
457 ret
= MTD_WRITE(nvram_mtd
, offset
+ magic_offset
, sizeof(header
->magic
),
458 &magic_len
, (char *)&header
->magic
);
459 if (ret
|| magic_len
!= sizeof(header
->magic
)) {
460 printk("nvram_commit: write MAGIC error\n");
466 * Reading a few bytes back here will put the device
467 * back to the correct mode on certain flashes */
468 offset
= nvram_mtd
->size
- erasesize
;
469 ret
= MTD_READ(nvram_mtd
, offset
, 4, &len
, buf
);
479 nvram_getall(char *buf
, int count
)
484 spin_lock_irqsave(&nvram_lock
, flags
);
485 if (nvram_major
>= 0)
486 ret
= _nvram_getall(buf
, count
);
488 ret
= early_nvram_getall(buf
, count
);
489 spin_unlock_irqrestore(&nvram_lock
, flags
);
500 /* User mode interface below */
503 dev_nvram_read(struct file
*file
, char *buf
, size_t count
, loff_t
*ppos
)
505 char tmp
[100], *name
= tmp
, *value
;
509 if (count
> sizeof(tmp
)) {
510 if (!(name
= kmalloc(count
, GFP_KERNEL
)))
514 if (copy_from_user(name
, buf
, count
)) {
520 /* Get all variables */
521 ret
= nvram_getall(name
, count
);
523 if (copy_to_user(buf
, name
, count
)) {
530 if (!(value
= nvram_get(name
))) {
535 /* Provide the offset into mmap() space */
536 off
= (unsigned long) value
- (unsigned long) nvram_buf
;
538 if (put_user(off
, (unsigned long *) buf
)) {
543 ret
= sizeof(unsigned long);
556 dev_nvram_write(struct file
*file
, const char *buf
, size_t count
, loff_t
*ppos
)
558 char tmp
[100], *name
= tmp
, *value
;
561 if (count
> sizeof(tmp
)) {
562 if (!(name
= kmalloc(count
, GFP_KERNEL
)))
566 if (copy_from_user(name
, buf
, count
)) {
572 name
= strsep(&value
, "=");
574 ret
= nvram_set(name
, value
) ? : count
;
576 ret
= nvram_unset(name
) ? : count
;
586 dev_nvram_ioctl(struct inode
*inode
, struct file
*file
, unsigned int cmd
, unsigned long arg
)
588 if (cmd
!= NVRAM_MAGIC
)
591 return nvram_commit();
595 dev_nvram_mmap(struct file
*file
, struct vm_area_struct
*vma
)
597 unsigned long offset
= virt_to_phys(nvram_buf
);
599 if (remap_page_range(vma
->vm_start
, offset
, vma
->vm_end
-vma
->vm_start
,
607 dev_nvram_open(struct inode
*inode
, struct file
* file
)
614 dev_nvram_release(struct inode
*inode
, struct file
* file
)
620 static struct file_operations dev_nvram_fops
= {
622 open
: dev_nvram_open
,
623 release
: dev_nvram_release
,
624 read
: dev_nvram_read
,
625 write
: dev_nvram_write
,
626 ioctl
: dev_nvram_ioctl
,
627 mmap
: dev_nvram_mmap
,
634 struct page
*page
, *end
;
637 devfs_unregister(nvram_handle
);
639 if (nvram_major
>= 0)
640 devfs_unregister_chrdev(nvram_major
, "nvram");
643 put_mtd_device(nvram_mtd
);
645 while ((PAGE_SIZE
<< order
) < NVRAM_SPACE
)
647 end
= virt_to_page(nvram_buf
+ (PAGE_SIZE
<< order
) - 1);
648 for (page
= virt_to_page(nvram_buf
); page
<= end
; page
++)
649 mem_map_unreserve(page
);
657 int order
= 0, ret
= 0;
658 struct page
*page
, *end
;
661 /* Allocate and reserve memory to mmap() */
662 while ((PAGE_SIZE
<< order
) < NVRAM_SPACE
)
664 end
= virt_to_page(nvram_buf
+ (PAGE_SIZE
<< order
) - 1);
665 for (page
= virt_to_page(nvram_buf
); page
<= end
; page
++)
666 mem_map_reserve(page
);
669 /* Find associated MTD device */
670 for (i
= 0; i
< MAX_MTD_DEVICES
; i
++) {
671 nvram_mtd
= get_mtd_device(NULL
, i
);
673 if (!strcmp(nvram_mtd
->name
, "nvram") &&
674 nvram_mtd
->size
>= NVRAM_SPACE
)
676 put_mtd_device(nvram_mtd
);
679 if (i
>= MAX_MTD_DEVICES
)
683 /* Initialize hash table lock */
684 spin_lock_init(&nvram_lock
);
686 /* Initialize commit semaphore */
687 init_MUTEX(&nvram_sem
);
689 /* Register char device */
690 if ((nvram_major
= devfs_register_chrdev(0, "nvram", &dev_nvram_fops
)) < 0) {
695 /* Initialize hash table */
698 /* Create /dev/nvram handle */
699 nvram_handle
= devfs_register(NULL
, "nvram", DEVFS_FL_NONE
, nvram_major
, 0,
700 S_IFCHR
| S_IRUSR
| S_IWUSR
| S_IRGRP
, &dev_nvram_fops
, NULL
);
702 /* Set the SDRAM NCDL value into NVRAM if not already done */
703 if (getintvar(NULL
, "sdram_ncdl") == 0) {
705 char buf
[] = "0x00000000";
707 if ((ncdl
= sb_memc_get_ncdl(sbh
))) {
708 sprintf(buf
, "0x%08x", ncdl
);
709 nvram_set("sdram_ncdl", buf
);
721 module_init(dev_nvram_init
);
722 module_exit(dev_nvram_exit
);