[package] libpcap: explicitely disable libnl support to prevent different build resul...
[openwrt.git] / target / linux / coldfire / files-2.6.31 / arch / m68k / include / asm / cf_page.h
1 /*
2 * linux/include/asm-m68k/cf_page.h
3 *
4 * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
5 * Based on linux/include/asm-m68k/page.h
6 *
7 * 10/09/08 JKM: split Coldfire pieces into separate file
8 */
9 #ifndef __CF_PAGE__
10 #define __CF_PAGE__
11
12 #include <linux/const.h>
13 #include <asm/setup.h>
14 #include <asm/page_offset.h>
15
16 /* Virtual base page location */
17 #define PAGE_OFFSET (PAGE_OFFSET_RAW)
18
19 /* PAGE_SHIFT determines the page size */
20 #define PAGE_SHIFT (13) /* 8K pages */
21 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
22 #define PAGE_MASK (~(PAGE_SIZE-1))
23
24 #define THREAD_SIZE PAGE_SIZE
25
26 #ifndef __ASSEMBLY__
27 #include <linux/compiler.h>
28 #include <asm/module.h>
29
30 #define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
31 #define free_user_page(page, addr) free_page(addr)
32
33 #define clear_page(page) memset((page), 0, PAGE_SIZE)
34 #define copy_page(to,from) memcpy((to), (from), PAGE_SIZE)
35
36 #define clear_user_page(addr, vaddr, page) \
37 do { clear_page(addr); \
38 flush_dcache_page(page); \
39 } while (0)
40
41 #define copy_user_page(to, from, vaddr, page) \
42 do { copy_page(to, from); \
43 flush_dcache_page(page); \
44 } while (0)
45
46 /*
47 * These are used to make use of C type-checking..
48 */
49 typedef struct { unsigned long pte; } pte_t;
50 typedef struct { unsigned long pmd[16]; } pmd_t;
51 typedef struct { unsigned long pgd; } pgd_t;
52 typedef struct { unsigned long pgprot; } pgprot_t;
53 typedef struct page *pgtable_t;
54
55 #define pte_val(x) ((x).pte)
56 #define pmd_val(x) ((&x)->pmd[0])
57 #define pgd_val(x) ((x).pgd)
58 #define pgprot_val(x) ((x).pgprot)
59
60 #define __pte(x) ((pte_t) { (x) } )
61 #define __pmd(x) ((pmd_t) { (x) } )
62 #define __pgd(x) ((pgd_t) { (x) } )
63 #define __pgprot(x) ((pgprot_t) { (x) } )
64
65 /* to align the pointer to the (next) page boundary */
66 /*Defined in linux/mm.h*/
67 /*#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)*/
68
69 extern unsigned long m68k_memoffset;
70
71 #define WANT_PAGE_VIRTUAL
72
73 extern unsigned long cf_dma_base;
74 extern unsigned long cf_dma_end;
75
76 /*
77 * Convert a virt to a phys
78 */
79 static inline unsigned long ___pa(void *vaddr)
80 {
81 #if CONFIG_SDRAM_BASE != PAGE_OFFSET
82 return (((unsigned long)vaddr & 0x0fffffff) + CONFIG_SDRAM_BASE);
83 #else
84 if ((unsigned long)vaddr >= CONFIG_DMA_BASE &&
85 (unsigned long)vaddr < (CONFIG_DMA_BASE + CONFIG_DMA_SIZE)) {
86 /* address is in carved out DMA range */
87 return ((unsigned long)vaddr - CONFIG_DMA_BASE) + CONFIG_SDRAM_BASE;
88 }
89 else if ((unsigned long)vaddr >= PAGE_OFFSET &&
90 (unsigned long)vaddr < (PAGE_OFFSET + CONFIG_SDRAM_SIZE)) {
91 /* normal mapping */
92 return ((unsigned long)vaddr - PAGE_OFFSET) + CONFIG_SDRAM_BASE;
93 }
94
95 return (unsigned long)vaddr;
96 #endif
97 }
98 #define __pa(vaddr) ___pa((void *)(vaddr))
99
100 /*
101 * Convert a phys to a virt
102 */
103 static inline void *__va(unsigned long paddr)
104 {
105 #if CONFIG_SDRAM_BASE != PAGE_OFFSET
106 return (void *)((paddr & 0x0fffffff) + PAGE_OFFSET);
107 #else
108 if (paddr >= cf_dma_base && paddr <= cf_dma_end) {
109 /* mapped address for DMA */
110 return (void *)((paddr - CONFIG_SDRAM_BASE) + CONFIG_DMA_BASE);
111 }
112 else if (paddr >= cf_dma_end &&
113 paddr < (CONFIG_SDRAM_BASE + CONFIG_SDRAM_SIZE)) {
114 /* normal mapping */
115 return (void *)((paddr - CONFIG_SDRAM_BASE) + PAGE_OFFSET);
116 }
117 return (void *)paddr;
118 #endif
119 }
120
121 /*
122 * NOTE: virtual isn't really correct, actually it should be the offset into the
123 * memory node, but we have no highmem, so that works for now.
124 *
125 * TODO: implement (fast) pfn<->pgdat_idx conversion functions, this makes lots
126 * of the shifts unnecessary.
127 *
128 * PFNs are used to map physical pages. So PFN[0] maps to the base phys addr.
129 */
130 #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
131 #define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
132
133 extern int m68k_virt_to_node_shift;
134
135 #ifdef CONFIG_SINGLE_MEMORY_CHUNK
136 #define __virt_to_node(addr) (&pg_data_map[0])
137 #else
138 extern struct pglist_data *pg_data_table[];
139
140 static inline __attribute_const__ int __virt_to_node_shift(void)
141 {
142 return m68k_virt_to_node_shift;
143 }
144
145 #define __virt_to_node(addr) (pg_data_table[(unsigned long)(addr) >> __virt_to_node_shift()])
146 #endif /* !CONFIG_SINGLE_MEMORY_CHUNK */
147
148 #define virt_to_page(addr) ({ \
149 pfn_to_page(virt_to_pfn(addr)); \
150 })
151 #define page_to_virt(page) ({ \
152 pfn_to_virt(page_to_pfn(page)); \
153 })
154
155 #define pfn_to_page(pfn) ({ \
156 unsigned long __pfn = (pfn); \
157 struct pglist_data *pgdat; \
158 pgdat = __virt_to_node((unsigned long)pfn_to_virt(__pfn)); \
159 pgdat->node_mem_map + (__pfn - pgdat->node_start_pfn); \
160 })
161 #define page_to_pfn(_page) ({ \
162 struct page *__p = (_page); \
163 struct pglist_data *pgdat; \
164 pgdat = &pg_data_map[page_to_nid(__p)]; \
165 ((__p) - pgdat->node_mem_map) + pgdat->node_start_pfn; \
166 })
167
168 #define virt_addr_valid(kaddr) ( ((void *)(kaddr) >= (void *)PAGE_OFFSET && \
169 (void *)(kaddr) < high_memory) || \
170 ((void *)(kaddr) >= (void*)CONFIG_DMA_BASE && \
171 (void *)(kaddr) < (void*)(CONFIG_DMA_BASE+CONFIG_DMA_SIZE)))
172
173 #define pfn_valid(pfn) virt_addr_valid(pfn_to_virt(pfn))
174
175 #endif /* __ASSEMBLY__ */
176
177 #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
178 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
179
180 #include <asm-generic/getorder.h>
181
182 #ifdef CONFIG_VDSO
183 #define __HAVE_ARCH_GATE_AREA
184 #endif
185
186 #endif /* __CF_PAGE__ */
This page took 0.049359 seconds and 5 git commands to generate.