1 From 0f85e79f6f01f50cb703866a555085a9c65bad2f Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Thu, 29 Sep 2011 20:31:54 +0200
4 Subject: [PATCH 21/24] MIPS: lantiq: adds cache split
7 arch/mips/Kconfig | 22 ++++++
8 arch/mips/kernel/vpe.c | 66 ++++++++++++++++++
9 arch/mips/mm/c-r4k.c | 172 ++++++++++++++++++++++++++++++++++++++++++++++++
10 3 files changed, 260 insertions(+), 0 deletions(-)
12 --- a/arch/mips/Kconfig
13 +++ b/arch/mips/Kconfig
14 @@ -1922,6 +1922,28 @@ config IFX_VPE_EXT
16 IFX included extensions in APRP
18 +config IFX_VPE_CACHE_SPLIT
19 + bool "IFX Cache Split Ways"
20 + depends on IFX_VPE_EXT
22 + IFX extension for reserving (splitting) cache ways among VPEs. You must
23 + give kernel command line arguments vpe_icache_shared=0 or
24 + vpe_dcache_shared=0 to enable splitting of icache or dcache
25 + respectively. Then you can specify which cache ways should be
26 + assigned to which VPE. There are total 8 cache ways, 4 each
27 + for dcache and icache: dcache_way0, dcache_way1,dcache_way2,
28 + dcache_way3 and icache_way0,icache_way1, icache_way2,icache_way3.
30 + For example, if you specify vpe_icache_shared=0 and icache_way2=1,
31 + then the 3rd icache way will be assigned to VPE0 and denied in VPE1.
33 + For icache, software is required to make at least one cache way available
34 + for a VPE at all times i.e., one can't assign all the icache ways to one
37 + By default, vpe_dcache_shared and vpe_icache_shared are set to 1
38 + (i.e., both icache and dcache are shared among VPEs)
41 bool "34K Performance counters"
42 depends on MIPS_MT && PROC_FS
43 --- a/arch/mips/kernel/vpe.c
44 +++ b/arch/mips/kernel/vpe.c
45 @@ -128,6 +128,13 @@ __setup("vpe1_wdog_timeout=", wdog_timeo
46 EXPORT_SYMBOL(vpe1_wdog_timeout);
50 +#ifdef CONFIG_IFX_VPE_CACHE_SPLIT /* Code for splitting the cache ways among VPEs. */
51 +extern int vpe_icache_shared,vpe_dcache_shared;
52 +extern int icache_way0,icache_way1,icache_way2,icache_way3;
53 +extern int dcache_way0,dcache_way1,dcache_way2,dcache_way3;
56 /* grab the likely amount of memory we will need. */
57 #ifdef CONFIG_MIPS_VPE_LOADER_TOM
58 #define P_SIZE (2 * 1024 * 1024)
59 @@ -866,6 +873,65 @@ static int vpe_run(struct vpe * v)
61 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
63 +#ifdef CONFIG_IFX_VPE_CACHE_SPLIT
64 + if ( (!vpe_icache_shared) || (!vpe_dcache_shared) ) {
66 + /* PCP bit must be 1 to split the cache */
67 + if(read_c0_mvpconf0() & MVPCONF0_PCP) {
69 + if ( !vpe_icache_shared ){
70 + write_vpe_c0_vpeconf0((read_vpe_c0_vpeconf0()) & ~VPECONF0_ICS);
73 + * If any cache way is 1, then that way is denied
74 + * in VPE1. Otherwise assign that way to VPE1.
77 + write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_IWX0 );
79 + write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_IWX0 );
81 + write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_IWX1 );
83 + write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_IWX1 );
85 + write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_IWX2 );
87 + write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_IWX2 );
89 + write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_IWX3 );
91 + write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_IWX3 );
94 + if ( !vpe_dcache_shared ) {
95 + write_vpe_c0_vpeconf0((read_vpe_c0_vpeconf0()) & ~VPECONF0_DCS);
98 + * If any cache way is 1, then that way is denied
99 + * in VPE1. Otherwise assign that way to VPE1.
102 + write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_DWX0 );
104 + write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_DWX0 );
106 + write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_DWX1 );
108 + write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_DWX1 );
110 + write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_DWX2 );
112 + write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_DWX2 );
114 + write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_DWX3 );
116 + write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_DWX3 );
120 +#endif /* endif CONFIG_IFX_VPE_CACHE_SPLIT */
122 /* clear out any left overs from a previous program */
123 write_vpe_c0_status(0);
124 write_vpe_c0_cause(0);
125 --- a/arch/mips/mm/c-r4k.c
126 +++ b/arch/mips/mm/c-r4k.c
127 @@ -1350,6 +1350,106 @@ static int __init setcoherentio(char *st
128 __setup("coherentio", setcoherentio);
131 +#ifdef CONFIG_IFX_VPE_CACHE_SPLIT /* Code for splitting the cache ways among VPEs. */
133 +#include <asm/mipsmtregs.h>
136 + * By default, vpe_icache_shared and vpe_dcache_shared
137 + * values are 1 i.e., both icache and dcache are shared
141 +int vpe_icache_shared = 1;
142 +static int __init vpe_icache_shared_val(char *str)
144 + get_option(&str, &vpe_icache_shared);
147 +__setup("vpe_icache_shared=", vpe_icache_shared_val);
148 +EXPORT_SYMBOL(vpe_icache_shared);
150 +int vpe_dcache_shared = 1;
151 +static int __init vpe_dcache_shared_val(char *str)
153 + get_option(&str, &vpe_dcache_shared);
156 +__setup("vpe_dcache_shared=", vpe_dcache_shared_val);
157 +EXPORT_SYMBOL(vpe_dcache_shared);
160 + * Software is required to make atleast one icache
161 + * way available for a VPE at all times i.e., one
162 + * can't assign all the icache ways to one VPE.
165 +int icache_way0 = 0;
166 +static int __init icache_way0_val(char *str)
168 + get_option(&str, &icache_way0);
171 +__setup("icache_way0=", icache_way0_val);
173 +int icache_way1 = 0;
174 +static int __init icache_way1_val(char *str)
176 + get_option(&str, &icache_way1);
179 +__setup("icache_way1=", icache_way1_val);
181 +int icache_way2 = 0;
182 +static int __init icache_way2_val(char *str)
184 + get_option(&str, &icache_way2);
187 +__setup("icache_way2=", icache_way2_val);
189 +int icache_way3 = 0;
190 +static int __init icache_way3_val(char *str)
192 + get_option(&str, &icache_way3);
195 +__setup("icache_way3=", icache_way3_val);
197 +int dcache_way0 = 0;
198 +static int __init dcache_way0_val(char *str)
200 + get_option(&str, &dcache_way0);
203 +__setup("dcache_way0=", dcache_way0_val);
205 +int dcache_way1 = 0;
206 +static int __init dcache_way1_val(char *str)
208 + get_option(&str, &dcache_way1);
211 +__setup("dcache_way1=", dcache_way1_val);
213 +int dcache_way2 = 0;
214 +static int __init dcache_way2_val(char *str)
216 + get_option(&str, &dcache_way2);
219 +__setup("dcache_way2=", dcache_way2_val);
221 +int dcache_way3 = 0;
222 +static int __init dcache_way3_val(char *str)
224 + get_option(&str, &dcache_way3);
227 +__setup("dcache_way3=", dcache_way3_val);
229 +#endif /* endif CONFIG_IFX_VPE_CACHE_SPLIT */
231 void __cpuinit r4k_cache_init(void)
233 extern void build_clear_page(void);
234 @@ -1369,6 +1469,78 @@ void __cpuinit r4k_cache_init(void)
238 +#ifdef CONFIG_IFX_VPE_CACHE_SPLIT
240 + * We split the cache ways appropriately among the VPEs
241 + * based on cache ways values we received as command line
244 + if ( (!vpe_icache_shared) || (!vpe_dcache_shared) ){
246 + /* PCP bit must be 1 to split the cache */
247 + if(read_c0_mvpconf0() & MVPCONF0_PCP) {
249 + /* Set CPA bit which enables us to modify VPEOpt register */
250 + write_c0_mvpcontrol((read_c0_mvpcontrol()) | MVPCONTROL_CPA);
252 + if ( !vpe_icache_shared ){
253 + write_c0_vpeconf0((read_c0_vpeconf0()) & ~VPECONF0_ICS);
255 + * If any cache way is 1, then that way is denied
256 + * in VPE0. Otherwise assign that way to VPE0.
258 + printk(KERN_DEBUG "icache is split\n");
259 + printk(KERN_DEBUG "icache_way0=%d icache_way1=%d icache_way2=%d icache_way3=%d\n",
260 + icache_way0, icache_way1,icache_way2, icache_way3);
262 + write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_IWX0 );
264 + write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_IWX0 );
266 + write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_IWX1 );
268 + write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_IWX1 );
270 + write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_IWX2 );
272 + write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_IWX2 );
274 + write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_IWX3 );
276 + write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_IWX3 );
279 + if ( !vpe_dcache_shared ) {
281 + * If any cache way is 1, then that way is denied
282 + * in VPE0. Otherwise assign that way to VPE0.
284 + printk(KERN_DEBUG "dcache is split\n");
285 + printk(KERN_DEBUG "dcache_way0=%d dcache_way1=%d dcache_way2=%d dcache_way3=%d\n",
286 + dcache_way0, dcache_way1, dcache_way2, dcache_way3);
287 + write_c0_vpeconf0((read_c0_vpeconf0()) & ~VPECONF0_DCS);
289 + write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_DWX0 );
291 + write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_DWX0 );
293 + write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_DWX1 );
295 + write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_DWX1 );
297 + write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_DWX2 );
299 + write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_DWX2 );
301 + write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_DWX3 );
303 + write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_DWX3 );
308 +#endif /* endif CONFIG_IFX_VPE_CACHE_SPLIT */
313 --- a/arch/mips/lantiq/setup.c
314 +++ b/arch/mips/lantiq/setup.c
319 +/* assume 16M as default incase uboot fails to pass proper ramsize */
320 +unsigned long physical_memsize = 16L;
322 void __init plat_mem_setup(void)
324 - /* assume 16M as default incase uboot fails to pass proper ramsize */
325 - unsigned long memsize = 16;
326 char **envp = (char **) KSEG1ADDR(fw_arg2);
328 ioport_resource.start = IOPORT_RESOURCE_START;
329 @@ -35,13 +36,13 @@ void __init plat_mem_setup(void)
330 char *e = (char *)KSEG1ADDR(*envp);
331 if (!strncmp(e, "memsize=", 8)) {
333 - if (strict_strtoul(e, 0, &memsize))
334 + if (strict_strtoul(e, 0, &physical_memsize))
335 pr_warn("bad memsize specified\n");
339 - memsize *= 1024 * 1024;
340 - add_memory_region(0x00000000, memsize, BOOT_MEM_RAM);
341 + physical_memsize *= 1024 * 1024;
342 + add_memory_region(0x00000000, physical_memsize, BOOT_MEM_RAM);