BCMA - Reorder SPROM fill
[openwrt.git] / target / linux / lantiq / patches / 0019-MIPS-lantiq-adds-VPE-extensions.patch
1 From c6c810d83f0d95f54c3a6b338d219cec7ccef4c9 Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Thu, 29 Sep 2011 20:30:40 +0200
4 Subject: [PATCH 19/24] MIPS: lantiq: adds VPE extensions
5
6 ---
7 arch/mips/Kconfig | 22 +++
8 arch/mips/include/asm/mipsmtregs.h | 54 +++++++
9 arch/mips/kernel/Makefile | 3 +-
10 arch/mips/kernel/mips-mt.c | 97 +++++++++++--
11 arch/mips/kernel/mtsched_proc.c | 279 ++++++++++++++++++++++++++++++++++++
12 arch/mips/kernel/perf_proc.c | 191 ++++++++++++++++++++++++
13 arch/mips/kernel/proc.c | 17 +++
14 arch/mips/kernel/smtc.c | 7 +
15 arch/mips/kernel/vpe.c | 250 ++++++++++++++++++++++++++++++++-
16 9 files changed, 905 insertions(+), 15 deletions(-)
17 create mode 100644 arch/mips/kernel/mtsched_proc.c
18 create mode 100644 arch/mips/kernel/perf_proc.c
19
20 --- a/arch/mips/Kconfig
21 +++ b/arch/mips/Kconfig
22 @@ -1915,6 +1915,28 @@ config MIPS_VPE_LOADER
23 Includes a loader for loading an elf relocatable object
24 onto another VPE and running it.
25
26 +config IFX_VPE_EXT
27 + bool "IFX APRP Extensions"
28 + depends on MIPS_VPE_LOADER
29 + default y
30 + help
31 + IFX included extensions in APRP
32 +
33 +config PERFCTRS
34 + bool "34K Performance counters"
35 + depends on MIPS_MT && PROC_FS
36 + default n
37 + help
38 + 34K Performance counter through /proc
39 +
40 +config MTSCHED
41 + bool "Support mtsched priority configuration for TCs"
42 + depends on MIPS_MT && PROC_FS
43 + default y
44 + help
45 + Support for mtsched priority configuration for TCs through
46 + /proc/mips/mtsched
47 +
48 config MIPS_MT_SMTC_IM_BACKSTOP
49 bool "Use per-TC register bits as backstop for inhibited IM bits"
50 depends on MIPS_MT_SMTC
51 --- a/arch/mips/include/asm/mipsmtregs.h
52 +++ b/arch/mips/include/asm/mipsmtregs.h
53 @@ -28,14 +28,34 @@
54 #define read_c0_vpeconf0() __read_32bit_c0_register($1, 2)
55 #define write_c0_vpeconf0(val) __write_32bit_c0_register($1, 2, val)
56
57 +#define read_c0_vpeconf1() __read_32bit_c0_register($1, 3)
58 +#define write_c0_vpeconf1(val) __write_32bit_c0_register($1, 3, val)
59 +
60 +#define read_c0_vpeschedule() __read_32bit_c0_register($1, 5)
61 +#define write_c0_vpeschedule(val) __write_32bit_c0_register($1, 5, val)
62 +
63 +#define read_c0_vpeschefback() __read_32bit_c0_register($1, 6)
64 +#define write_c0_vpeschefback(val) __write_32bit_c0_register($1, 6, val)
65 +
66 +#define read_c0_vpeopt() __read_32bit_c0_register($1, 7)
67 +#define write_c0_vpeopt(val) __write_32bit_c0_register($1, 7, val)
68 +
69 #define read_c0_tcstatus() __read_32bit_c0_register($2, 1)
70 #define write_c0_tcstatus(val) __write_32bit_c0_register($2, 1, val)
71
72 #define read_c0_tcbind() __read_32bit_c0_register($2, 2)
73 +#define write_c0_tcbind(val) __write_32bit_c0_register($2, 2, val)
74
75 #define read_c0_tccontext() __read_32bit_c0_register($2, 5)
76 #define write_c0_tccontext(val) __write_32bit_c0_register($2, 5, val)
77
78 +#define read_c0_tcschedule() __read_32bit_c0_register($2, 6)
79 +#define write_c0_tcschedule(val) __write_32bit_c0_register($2, 6, val)
80 +
81 +#define read_c0_tcschefback() __read_32bit_c0_register($2, 7)
82 +#define write_c0_tcschefback(val) __write_32bit_c0_register($2, 7, val)
83 +
84 +
85 #else /* Assembly */
86 /*
87 * Macros for use in assembly language code
88 @@ -74,6 +94,8 @@
89 #define MVPCONTROL_STLB_SHIFT 2
90 #define MVPCONTROL_STLB (_ULCAST_(1) << MVPCONTROL_STLB_SHIFT)
91
92 +#define MVPCONTROL_CPA_SHIFT 3
93 +#define MVPCONTROL_CPA (_ULCAST_(1) << MVPCONTROL_CPA_SHIFT)
94
95 /* MVPConf0 fields */
96 #define MVPCONF0_PTC_SHIFT 0
97 @@ -84,6 +106,8 @@
98 #define MVPCONF0_TCA ( _ULCAST_(1) << MVPCONF0_TCA_SHIFT)
99 #define MVPCONF0_PTLBE_SHIFT 16
100 #define MVPCONF0_PTLBE (_ULCAST_(0x3ff) << MVPCONF0_PTLBE_SHIFT)
101 +#define MVPCONF0_PCP_SHIFT 27
102 +#define MVPCONF0_PCP (_ULCAST_(1) << MVPCONF0_PCP_SHIFT)
103 #define MVPCONF0_TLBS_SHIFT 29
104 #define MVPCONF0_TLBS (_ULCAST_(1) << MVPCONF0_TLBS_SHIFT)
105 #define MVPCONF0_M_SHIFT 31
106 @@ -121,9 +145,25 @@
107 #define VPECONF0_VPA (_ULCAST_(1) << VPECONF0_VPA_SHIFT)
108 #define VPECONF0_MVP_SHIFT 1
109 #define VPECONF0_MVP (_ULCAST_(1) << VPECONF0_MVP_SHIFT)
110 +#define VPECONF0_ICS_SHIFT 16
111 +#define VPECONF0_ICS (_ULCAST_(1) << VPECONF0_ICS_SHIFT)
112 +#define VPECONF0_DCS_SHIFT 17
113 +#define VPECONF0_DCS (_ULCAST_(1) << VPECONF0_DCS_SHIFT)
114 #define VPECONF0_XTC_SHIFT 21
115 #define VPECONF0_XTC (_ULCAST_(0xff) << VPECONF0_XTC_SHIFT)
116
117 +/* VPEOpt fields */
118 +#define VPEOPT_DWX_SHIFT 0
119 +#define VPEOPT_IWX_SHIFT 8
120 +#define VPEOPT_IWX0 ( _ULCAST_(0x1) << VPEOPT_IWX_SHIFT)
121 +#define VPEOPT_IWX1 ( _ULCAST_(0x2) << VPEOPT_IWX_SHIFT)
122 +#define VPEOPT_IWX2 ( _ULCAST_(0x4) << VPEOPT_IWX_SHIFT)
123 +#define VPEOPT_IWX3 ( _ULCAST_(0x8) << VPEOPT_IWX_SHIFT)
124 +#define VPEOPT_DWX0 ( _ULCAST_(0x1) << VPEOPT_DWX_SHIFT)
125 +#define VPEOPT_DWX1 ( _ULCAST_(0x2) << VPEOPT_DWX_SHIFT)
126 +#define VPEOPT_DWX2 ( _ULCAST_(0x4) << VPEOPT_DWX_SHIFT)
127 +#define VPEOPT_DWX3 ( _ULCAST_(0x8) << VPEOPT_DWX_SHIFT)
128 +
129 /* TCStatus fields (per TC) */
130 #define TCSTATUS_TASID (_ULCAST_(0xff))
131 #define TCSTATUS_IXMT_SHIFT 10
132 @@ -350,6 +390,14 @@ do { \
133 #define write_vpe_c0_vpecontrol(val) mttc0(1, 1, val)
134 #define read_vpe_c0_vpeconf0() mftc0(1, 2)
135 #define write_vpe_c0_vpeconf0(val) mttc0(1, 2, val)
136 +#define read_vpe_c0_vpeschedule() mftc0(1, 5)
137 +#define write_vpe_c0_vpeschedule(val) mttc0(1, 5, val)
138 +#define read_vpe_c0_vpeschefback() mftc0(1, 6)
139 +#define write_vpe_c0_vpeschefback(val) mttc0(1, 6, val)
140 +#define read_vpe_c0_vpeopt() mftc0(1, 7)
141 +#define write_vpe_c0_vpeopt(val) mttc0(1, 7, val)
142 +#define read_vpe_c0_wired() mftc0(6, 0)
143 +#define write_vpe_c0_wired(val) mttc0(6, 0, val)
144 #define read_vpe_c0_count() mftc0(9, 0)
145 #define write_vpe_c0_count(val) mttc0(9, 0, val)
146 #define read_vpe_c0_status() mftc0(12, 0)
147 @@ -381,6 +429,12 @@ do { \
148 #define write_tc_c0_tchalt(val) mttc0(2, 4, val)
149 #define read_tc_c0_tccontext() mftc0(2, 5)
150 #define write_tc_c0_tccontext(val) mttc0(2, 5, val)
151 +#define read_tc_c0_tcschedule() mftc0(2, 6)
152 +#define write_tc_c0_tcschedule(val) mttc0(2, 6, val)
153 +#define read_tc_c0_tcschefback() mftc0(2, 7)
154 +#define write_tc_c0_tcschefback(val) mttc0(2, 7, val)
155 +#define read_tc_c0_entryhi() mftc0(10, 0)
156 +#define write_tc_c0_entryhi(val) mttc0(10, 0, val)
157
158 /* GPR */
159 #define read_tc_gpr_sp() mftgpr(29)
160 --- a/arch/mips/kernel/Makefile
161 +++ b/arch/mips/kernel/Makefile
162 @@ -86,7 +86,8 @@ obj-$(CONFIG_MIPS32_O32) += binfmt_elfo3
163
164 obj-$(CONFIG_KGDB) += kgdb.o
165 obj-$(CONFIG_PROC_FS) += proc.o
166 -
167 +obj-$(CONFIG_MTSCHED) += mtsched_proc.o
168 +obj-$(CONFIG_PERFCTRS) += perf_proc.o
169 obj-$(CONFIG_64BIT) += cpu-bugs64.o
170
171 obj-$(CONFIG_I8253) += i8253.o
172 --- a/arch/mips/kernel/mips-mt.c
173 +++ b/arch/mips/kernel/mips-mt.c
174 @@ -21,26 +21,96 @@
175 #include <asm/cacheflush.h>
176
177 int vpelimit;
178 -
179 static int __init maxvpes(char *str)
180 {
181 get_option(&str, &vpelimit);
182 -
183 return 1;
184 }
185 -
186 __setup("maxvpes=", maxvpes);
187
188 int tclimit;
189 -
190 static int __init maxtcs(char *str)
191 {
192 get_option(&str, &tclimit);
193 + return 1;
194 +}
195 +__setup("maxtcs=", maxtcs);
196
197 +#ifdef CONFIG_IFX_VPE_EXT
198 +int stlb;
199 +static int __init istlbshared(char *str)
200 +{
201 + get_option(&str, &stlb);
202 return 1;
203 }
204 +__setup("vpe_tlb_shared=", istlbshared);
205
206 -__setup("maxtcs=", maxtcs);
207 +int vpe0_wired;
208 +static int __init vpe0wired(char *str)
209 +{
210 + get_option(&str, &vpe0_wired);
211 + return 1;
212 +}
213 +__setup("vpe0_wired_tlb_entries=", vpe0wired);
214 +
215 +int vpe1_wired;
216 +static int __init vpe1wired(char *str)
217 +{
218 + get_option(&str, &vpe1_wired);
219 + return 1;
220 +}
221 +__setup("vpe1_wired_tlb_entries=", vpe1wired);
222 +
223 +#ifdef CONFIG_MIPS_MT_SMTC
224 +extern int nostlb;
225 +#endif
226 +void configure_tlb(void)
227 +{
228 + int vpeflags, tcflags, tlbsiz;
229 + unsigned int config1val;
230 + vpeflags = dvpe();
231 + tcflags = dmt();
232 + write_c0_vpeconf0((read_c0_vpeconf0() | VPECONF0_MVP));
233 + write_c0_mvpcontrol((read_c0_mvpcontrol() | MVPCONTROL_VPC));
234 + mips_ihb();
235 + //printk("stlb = %d, vpe0_wired = %d vpe1_wired=%d\n", stlb,vpe0_wired, vpe1_wired);
236 + if (stlb) {
237 + if (!(read_c0_mvpconf0() & MVPCONF0_TLBS)) {
238 + emt(tcflags);
239 + evpe(vpeflags);
240 + return;
241 + }
242 +
243 + write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB);
244 + write_c0_wired(vpe0_wired + vpe1_wired);
245 + if (((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) {
246 + config1val = read_vpe_c0_config1();
247 + tlbsiz = (((config1val >> 25) & 0x3f) + 1);
248 + if (tlbsiz > 64)
249 + tlbsiz = 64;
250 + cpu_data[0].tlbsize = tlbsiz;
251 + current_cpu_data.tlbsize = tlbsiz;
252 + }
253 +
254 + }
255 + else {
256 + write_c0_mvpcontrol(read_c0_mvpcontrol() & ~MVPCONTROL_STLB);
257 + write_c0_wired(vpe0_wired);
258 + }
259 +
260 + ehb();
261 + write_c0_mvpcontrol((read_c0_mvpcontrol() & ~MVPCONTROL_VPC));
262 + ehb();
263 + local_flush_tlb_all();
264 +
265 + printk("Wired TLB entries for Linux read_c0_wired() = %d\n", read_c0_wired());
266 +#ifdef CONFIG_MIPS_MT_SMTC
267 + nostlb = !stlb;
268 +#endif
269 + emt(tcflags);
270 + evpe(vpeflags);
271 +}
272 +#endif
273
274 /*
275 * Dump new MIPS MT state for the core. Does not leave TCs halted.
276 @@ -78,18 +148,18 @@ void mips_mt_regdump(unsigned long mvpct
277 if ((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) {
278 printk(" VPE %d\n", i);
279 printk(" VPEControl : %08lx\n",
280 - read_vpe_c0_vpecontrol());
281 + read_vpe_c0_vpecontrol());
282 printk(" VPEConf0 : %08lx\n",
283 - read_vpe_c0_vpeconf0());
284 + read_vpe_c0_vpeconf0());
285 printk(" VPE%d.Status : %08lx\n",
286 - i, read_vpe_c0_status());
287 + i, read_vpe_c0_status());
288 printk(" VPE%d.EPC : %08lx %pS\n",
289 - i, read_vpe_c0_epc(),
290 - (void *) read_vpe_c0_epc());
291 + i, read_vpe_c0_epc(),
292 + (void *) read_vpe_c0_epc());
293 printk(" VPE%d.Cause : %08lx\n",
294 - i, read_vpe_c0_cause());
295 + i, read_vpe_c0_cause());
296 printk(" VPE%d.Config7 : %08lx\n",
297 - i, read_vpe_c0_config7());
298 + i, read_vpe_c0_config7());
299 break; /* Next VPE */
300 }
301 }
302 @@ -287,6 +357,9 @@ void mips_mt_set_cpuoptions(void)
303 printk("Mapped %ld ITC cells starting at 0x%08x\n",
304 ((itcblkgrn & 0x7fe00000) >> 20), itc_base);
305 }
306 +#ifdef CONFIG_IFX_VPE_EXT
307 + configure_tlb();
308 +#endif
309 }
310
311 /*
312 --- /dev/null
313 +++ b/arch/mips/kernel/mtsched_proc.c
314 @@ -0,0 +1,279 @@
315 +/*
316 + * /proc hooks for MIPS MT scheduling policy management for 34K cores
317 + *
318 + * This program is free software; you can distribute it and/or modify it
319 + * under the terms of the GNU General Public License (Version 2) as
320 + * published by the Free Software Foundation.
321 + *
322 + * This program is distributed in the hope it will be useful, but WITHOUT
323 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
324 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
325 + * for more details.
326 + *
327 + * You should have received a copy of the GNU General Public License along
328 + * with this program; if not, write to the Free Software Foundation, Inc.,
329 + * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
330 + *
331 + * Copyright (C) 2006 Mips Technologies, Inc
332 + */
333 +
334 +#include <linux/kernel.h>
335 +
336 +#include <asm/cpu.h>
337 +#include <asm/processor.h>
338 +#include <asm/system.h>
339 +#include <asm/mipsregs.h>
340 +#include <asm/mipsmtregs.h>
341 +#include <asm/uaccess.h>
342 +#include <linux/proc_fs.h>
343 +
344 +static struct proc_dir_entry *mtsched_proc;
345 +
346 +#ifndef CONFIG_MIPS_MT_SMTC
347 +#define NTCS 2
348 +#else
349 +#define NTCS NR_CPUS
350 +#endif
351 +#define NVPES 2
352 +
353 +int lastvpe = 1;
354 +int lasttc = 8;
355 +
356 +static int proc_read_mtsched(char *page, char **start, off_t off,
357 + int count, int *eof, void *data)
358 +{
359 + int totalen = 0;
360 + int len;
361 +
362 + int i;
363 + int vpe;
364 + int mytc;
365 + unsigned long flags;
366 + unsigned int mtflags;
367 + unsigned int haltstate;
368 + unsigned int vpes_checked[NVPES];
369 + unsigned int vpeschedule[NVPES];
370 + unsigned int vpeschefback[NVPES];
371 + unsigned int tcschedule[NTCS];
372 + unsigned int tcschefback[NTCS];
373 +
374 + /* Dump the state of the MIPS MT scheduling policy manager */
375 + /* Inititalize control state */
376 + for(i = 0; i < NVPES; i++) {
377 + vpes_checked[i] = 0;
378 + vpeschedule[i] = 0;
379 + vpeschefback[i] = 0;
380 + }
381 + for(i = 0; i < NTCS; i++) {
382 + tcschedule[i] = 0;
383 + tcschefback[i] = 0;
384 + }
385 +
386 + /* Disable interrupts and multithreaded issue */
387 + local_irq_save(flags);
388 + mtflags = dvpe();
389 +
390 + /* Then go through the TCs, halt 'em, and extract the values */
391 + mytc = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT;
392 + for(i = 0; i < NTCS; i++) {
393 + if(i == mytc) {
394 + /* No need to halt ourselves! */
395 + tcschedule[i] = read_c0_tcschedule();
396 + tcschefback[i] = read_c0_tcschefback();
397 + /* If VPE bound to TC hasn't been checked, do it */
398 + vpe = read_c0_tcbind() & TCBIND_CURVPE;
399 + if(!vpes_checked[vpe]) {
400 + vpeschedule[vpe] = read_c0_vpeschedule();
401 + vpeschefback[vpe] = read_c0_vpeschefback();
402 + vpes_checked[vpe] = 1;
403 + }
404 + } else {
405 + settc(i);
406 + haltstate = read_tc_c0_tchalt();
407 + write_tc_c0_tchalt(TCHALT_H);
408 + mips_ihb();
409 + tcschedule[i] = read_tc_c0_tcschedule();
410 + tcschefback[i] = read_tc_c0_tcschefback();
411 + /* If VPE bound to TC hasn't been checked, do it */
412 + vpe = read_tc_c0_tcbind() & TCBIND_CURVPE;
413 + if(!vpes_checked[vpe]) {
414 + vpeschedule[vpe] = read_vpe_c0_vpeschedule();
415 + vpeschefback[vpe] = read_vpe_c0_vpeschefback();
416 + vpes_checked[vpe] = 1;
417 + }
418 + if(!haltstate) write_tc_c0_tchalt(0);
419 + }
420 + }
421 + /* Re-enable MT and interrupts */
422 + evpe(mtflags);
423 + local_irq_restore(flags);
424 +
425 + for(vpe=0; vpe < NVPES; vpe++) {
426 + len = sprintf(page, "VPE[%d].VPEschedule = 0x%08x\n",
427 + vpe, vpeschedule[vpe]);
428 + totalen += len;
429 + page += len;
430 + len = sprintf(page, "VPE[%d].VPEschefback = 0x%08x\n",
431 + vpe, vpeschefback[vpe]);
432 + totalen += len;
433 + page += len;
434 + }
435 + for(i=0; i < NTCS; i++) {
436 + len = sprintf(page, "TC[%d].TCschedule = 0x%08x\n",
437 + i, tcschedule[i]);
438 + totalen += len;
439 + page += len;
440 + len = sprintf(page, "TC[%d].TCschefback = 0x%08x\n",
441 + i, tcschefback[i]);
442 + totalen += len;
443 + page += len;
444 + }
445 + return totalen;
446 +}
447 +
448 +/*
449 + * Write to perf counter registers based on text input
450 + */
451 +
452 +#define TXTBUFSZ 100
453 +
454 +static int proc_write_mtsched(struct file *file, const char *buffer,
455 + unsigned long count, void *data)
456 +{
457 + int len = 0;
458 + char mybuf[TXTBUFSZ];
459 + /* At most, we will set up 9 TCs and 2 VPEs, 11 entries in all */
460 + char entity[1]; //, entity1[1];
461 + int number[1];
462 + unsigned long value[1];
463 + int nparsed = 0 , index = 0;
464 + unsigned long flags;
465 + unsigned int mtflags;
466 + unsigned int haltstate;
467 + unsigned int tcbindval;
468 +
469 + if(count >= TXTBUFSZ) len = TXTBUFSZ-1;
470 + else len = count;
471 + memset(mybuf,0,TXTBUFSZ);
472 + if(copy_from_user(mybuf, buffer, len)) return -EFAULT;
473 +
474 + nparsed = sscanf(mybuf, "%c%d %lx",
475 + &entity[0] ,&number[0], &value[0]);
476 +
477 + /*
478 + * Having acquired the inputs, which might have
479 + * generated exceptions and preemptions,
480 + * program the registers.
481 + */
482 + /* Disable interrupts and multithreaded issue */
483 + local_irq_save(flags);
484 + mtflags = dvpe();
485 +
486 + if(entity[index] == 't' ) {
487 + /* Set TCSchedule or TCScheFBack of specified TC */
488 + if(number[index] > NTCS) goto skip;
489 + /* If it's our own TC, do it direct */
490 + if(number[index] ==
491 + ((read_c0_tcbind() & TCBIND_CURTC)
492 + >> TCBIND_CURTC_SHIFT)) {
493 + if(entity[index] == 't')
494 + write_c0_tcschedule(value[index]);
495 + else
496 + write_c0_tcschefback(value[index]);
497 + } else {
498 + /* Otherwise, we do it via MTTR */
499 + settc(number[index]);
500 + haltstate = read_tc_c0_tchalt();
501 + write_tc_c0_tchalt(TCHALT_H);
502 + mips_ihb();
503 + if(entity[index] == 't')
504 + write_tc_c0_tcschedule(value[index]);
505 + else
506 + write_tc_c0_tcschefback(value[index]);
507 + mips_ihb();
508 + if(!haltstate) write_tc_c0_tchalt(0);
509 + }
510 + } else if(entity[index] == 'v') {
511 + /* Set VPESchedule of specified VPE */
512 + if(number[index] > NVPES) goto skip;
513 + tcbindval = read_c0_tcbind();
514 + /* Are we doing this to our current VPE? */
515 + if((tcbindval & TCBIND_CURVPE) == number[index]) {
516 + /* Then life is simple */
517 + write_c0_vpeschedule(value[index]);
518 + } else {
519 + /*
520 + * Bind ourselves to the other VPE long enough
521 + * to program the bind value.
522 + */
523 + write_c0_tcbind((tcbindval & ~TCBIND_CURVPE)
524 + | number[index]);
525 + mips_ihb();
526 + write_c0_vpeschedule(value[index]);
527 + mips_ihb();
528 + /* Restore previous binding */
529 + write_c0_tcbind(tcbindval);
530 + mips_ihb();
531 + }
532 + }
533 +
534 + else if(entity[index] == 'r') {
535 + unsigned int vpes_checked[2], vpe ,i , mytc;
536 + vpes_checked[0] = vpes_checked[1] = 0;
537 +
538 + /* Then go through the TCs, halt 'em, and extract the values */
539 + mytc = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT;
540 +
541 + for(i = 0; i < NTCS; i++) {
542 + if(i == mytc) {
543 + /* No need to halt ourselves! */
544 + write_c0_vpeschefback(0);
545 + write_c0_tcschefback(0);
546 + } else {
547 + settc(i);
548 + haltstate = read_tc_c0_tchalt();
549 + write_tc_c0_tchalt(TCHALT_H);
550 + mips_ihb();
551 + write_tc_c0_tcschefback(0);
552 + /* If VPE bound to TC hasn't been checked, do it */
553 + vpe = read_tc_c0_tcbind() & TCBIND_CURVPE;
554 + if(!vpes_checked[vpe]) {
555 + write_vpe_c0_vpeschefback(0);
556 + vpes_checked[vpe] = 1;
557 + }
558 + if(!haltstate) write_tc_c0_tchalt(0);
559 + }
560 + }
561 + }
562 + else {
563 + printk ("\n Usage : <t/v><0/1> <Hex Value>\n Example : t0 0x01\n");
564 + }
565 +
566 +skip:
567 + /* Re-enable MT and interrupts */
568 + evpe(mtflags);
569 + local_irq_restore(flags);
570 + return (len);
571 +}
572 +
573 +static int __init init_mtsched_proc(void)
574 +{
575 + extern struct proc_dir_entry *get_mips_proc_dir(void);
576 + struct proc_dir_entry *mips_proc_dir;
577 +
578 + if (!cpu_has_mipsmt) {
579 + printk("mtsched: not a MIPS MT capable processor\n");
580 + return -ENODEV;
581 + }
582 +
583 + mips_proc_dir = get_mips_proc_dir();
584 +
585 + mtsched_proc = create_proc_entry("mtsched", 0644, mips_proc_dir);
586 + mtsched_proc->read_proc = proc_read_mtsched;
587 + mtsched_proc->write_proc = proc_write_mtsched;
588 +
589 + return 0;
590 +}
591 +
592 +/* Automagically create the entry */
593 +module_init(init_mtsched_proc);
594 --- /dev/null
595 +++ b/arch/mips/kernel/perf_proc.c
596 @@ -0,0 +1,191 @@
597 +/*
598 + * /proc hooks for CPU performance counter support for SMTC kernel
599 + * (and ultimately others)
600 + * Copyright (C) 2006 Mips Technologies, Inc
601 + */
602 +
603 +#include <linux/kernel.h>
604 +
605 +#include <asm/cpu.h>
606 +#include <asm/processor.h>
607 +#include <asm/system.h>
608 +#include <asm/mipsregs.h>
609 +#include <asm/uaccess.h>
610 +#include <linux/proc_fs.h>
611 +
612 +/*
613 + * /proc diagnostic and statistics hooks
614 + */
615 +
616 +
617 +/* Internal software-extended event counters */
618 +
619 +static unsigned long long extencount[4] = {0,0,0,0};
620 +
621 +static struct proc_dir_entry *perf_proc;
622 +
623 +static int proc_read_perf(char *page, char **start, off_t off,
624 + int count, int *eof, void *data)
625 +{
626 + int totalen = 0;
627 + int len;
628 +
629 + len = sprintf(page, "PerfCnt[0].Ctl : 0x%08x\n", read_c0_perfctrl0());
630 + totalen += len;
631 + page += len;
632 + len = sprintf(page, "PerfCnt[0].Cnt : %Lu\n",
633 + extencount[0] + (unsigned long long)((unsigned)read_c0_perfcntr0()));
634 + totalen += len;
635 + page += len;
636 + len = sprintf(page, "PerfCnt[1].Ctl : 0x%08x\n", read_c0_perfctrl1());
637 + totalen += len;
638 + page += len;
639 + len = sprintf(page, "PerfCnt[1].Cnt : %Lu\n",
640 + extencount[1] + (unsigned long long)((unsigned)read_c0_perfcntr1()));
641 + totalen += len;
642 + page += len;
643 + len = sprintf(page, "PerfCnt[2].Ctl : 0x%08x\n", read_c0_perfctrl2());
644 + totalen += len;
645 + page += len;
646 + len = sprintf(page, "PerfCnt[2].Cnt : %Lu\n",
647 + extencount[2] + (unsigned long long)((unsigned)read_c0_perfcntr2()));
648 + totalen += len;
649 + page += len;
650 + len = sprintf(page, "PerfCnt[3].Ctl : 0x%08x\n", read_c0_perfctrl3());
651 + totalen += len;
652 + page += len;
653 + len = sprintf(page, "PerfCnt[3].Cnt : %Lu\n",
654 + extencount[3] + (unsigned long long)((unsigned)read_c0_perfcntr3()));
655 + totalen += len;
656 + page += len;
657 +
658 + return totalen;
659 +}
660 +
661 +/*
662 + * Write to perf counter registers based on text input
663 + */
664 +
665 +#define TXTBUFSZ 100
666 +
667 +static int proc_write_perf(struct file *file, const char *buffer,
668 + unsigned long count, void *data)
669 +{
670 + int len;
671 + int nparsed;
672 + int index;
673 + char mybuf[TXTBUFSZ];
674 +
675 + int which[4];
676 + unsigned long control[4];
677 + long long ctrdata[4];
678 +
679 + if(count >= TXTBUFSZ) len = TXTBUFSZ-1;
680 + else len = count;
681 + memset(mybuf,0,TXTBUFSZ);
682 + if(copy_from_user(mybuf, buffer, len)) return -EFAULT;
683 +
684 + nparsed = sscanf(mybuf,
685 + "%d %lx %Ld %d %lx %Ld %d %lx %Ld %d %lx %Ld",
686 + &which[0], &control[0], &ctrdata[0],
687 + &which[1], &control[1], &ctrdata[1],
688 + &which[2], &control[2], &ctrdata[2],
689 + &which[3], &control[3], &ctrdata[3]);
690 +
691 + for(index = 0; nparsed >= 3; index++) {
692 + switch (which[index]) {
693 + case 0:
694 + write_c0_perfctrl0(control[index]);
695 + if(ctrdata[index] != -1) {
696 + extencount[0] = (unsigned long long)ctrdata[index];
697 + write_c0_perfcntr0((unsigned long)0);
698 + }
699 + break;
700 + case 1:
701 + write_c0_perfctrl1(control[index]);
702 + if(ctrdata[index] != -1) {
703 + extencount[1] = (unsigned long long)ctrdata[index];
704 + write_c0_perfcntr1((unsigned long)0);
705 + }
706 + break;
707 + case 2:
708 + write_c0_perfctrl2(control[index]);
709 + if(ctrdata[index] != -1) {
710 + extencount[2] = (unsigned long long)ctrdata[index];
711 + write_c0_perfcntr2((unsigned long)0);
712 + }
713 + break;
714 + case 3:
715 + write_c0_perfctrl3(control[index]);
716 + if(ctrdata[index] != -1) {
717 + extencount[3] = (unsigned long long)ctrdata[index];
718 + write_c0_perfcntr3((unsigned long)0);
719 + }
720 + break;
721 + }
722 + nparsed -= 3;
723 + }
724 + return (len);
725 +}
726 +
727 +extern int (*perf_irq)(void);
728 +
729 +/*
730 + * Invoked when timer interrupt vector picks up a perf counter overflow
731 + */
732 +
733 +static int perf_proc_irq(void)
734 +{
735 + unsigned long snapshot;
736 +
737 + /*
738 + * It would be nice to do this as a loop, but we don't have
739 + * indirect access to CP0 registers.
740 + */
741 + snapshot = read_c0_perfcntr0();
742 + if ((long)snapshot < 0) {
743 + extencount[0] +=
744 + (unsigned long long)((unsigned)read_c0_perfcntr0());
745 + write_c0_perfcntr0(0);
746 + }
747 + snapshot = read_c0_perfcntr1();
748 + if ((long)snapshot < 0) {
749 + extencount[1] +=
750 + (unsigned long long)((unsigned)read_c0_perfcntr1());
751 + write_c0_perfcntr1(0);
752 + }
753 + snapshot = read_c0_perfcntr2();
754 + if ((long)snapshot < 0) {
755 + extencount[2] +=
756 + (unsigned long long)((unsigned)read_c0_perfcntr2());
757 + write_c0_perfcntr2(0);
758 + }
759 + snapshot = read_c0_perfcntr3();
760 + if ((long)snapshot < 0) {
761 + extencount[3] +=
762 + (unsigned long long)((unsigned)read_c0_perfcntr3());
763 + write_c0_perfcntr3(0);
764 + }
765 + return 0;
766 +}
767 +
768 +static int __init init_perf_proc(void)
769 +{
770 + extern struct proc_dir_entry *get_mips_proc_dir(void);
771 +
772 + struct proc_dir_entry *mips_proc_dir = get_mips_proc_dir();
773 +
774 + write_c0_perfcntr0(0);
775 + write_c0_perfcntr1(0);
776 + write_c0_perfcntr2(0);
777 + write_c0_perfcntr3(0);
778 + perf_proc = create_proc_entry("perf", 0644, mips_proc_dir);
779 + perf_proc->read_proc = proc_read_perf;
780 + perf_proc->write_proc = proc_write_perf;
781 + perf_irq = perf_proc_irq;
782 +
783 + return 0;
784 +}
785 +
786 +/* Automagically create the entry */
787 +module_init(init_perf_proc);
788 --- a/arch/mips/kernel/proc.c
789 +++ b/arch/mips/kernel/proc.c
790 @@ -7,6 +7,7 @@
791 #include <linux/kernel.h>
792 #include <linux/sched.h>
793 #include <linux/seq_file.h>
794 +#include <linux/proc_fs.h>
795 #include <asm/bootinfo.h>
796 #include <asm/cpu.h>
797 #include <asm/cpu-features.h>
798 @@ -110,3 +111,19 @@ const struct seq_operations cpuinfo_op =
799 .stop = c_stop,
800 .show = show_cpuinfo,
801 };
802 +
803 +/*
804 + * Support for MIPS/local /proc hooks in /proc/mips/
805 + */
806 +
807 +static struct proc_dir_entry *mips_proc = NULL;
808 +
809 +struct proc_dir_entry *get_mips_proc_dir(void)
810 +{
811 + /*
812 + * This ought not to be preemptable.
813 + */
814 + if(mips_proc == NULL)
815 + mips_proc = proc_mkdir("mips", NULL);
816 + return(mips_proc);
817 +}
818 --- a/arch/mips/kernel/smtc.c
819 +++ b/arch/mips/kernel/smtc.c
820 @@ -1334,6 +1334,13 @@ void smtc_get_new_mmu_context(struct mm_
821 asid = asid_cache(cpu);
822
823 do {
824 +#ifdef CONFIG_IFX_VPE_EXT
825 + /* If TLB is shared between AP and RP (AP is running SMTC),
826 + leave out max ASID i.e., ASID_MASK for RP
827 + */
828 + if (!nostlb && ((asid & ASID_MASK) == (ASID_MASK - 1)))
829 + asid++;
830 +#endif
831 if (!((asid += ASID_INC) & ASID_MASK) ) {
832 if (cpu_has_vtag_icache)
833 flush_icache_all();
834 --- a/arch/mips/kernel/vpe.c
835 +++ b/arch/mips/kernel/vpe.c
836 @@ -76,6 +76,58 @@ static struct kspd_notifications kspd_ev
837 static int kspd_events_reqd;
838 #endif
839
840 +#ifdef CONFIG_IFX_VPE_EXT
841 +static int is_sdepgm;
842 +extern int stlb;
843 +extern int vpe0_wired;
844 +extern int vpe1_wired;
845 +unsigned int vpe1_load_addr;
846 +
847 +static int __init load_address(char *str)
848 +{
849 + get_option(&str, &vpe1_load_addr);
850 + return 1;
851 +}
852 +__setup("vpe1_load_addr=", load_address);
853 +
854 +#include <asm/mipsmtregs.h>
855 +#define write_vpe_c0_wired(val) mttc0(6, 0, val)
856 +
857 +#ifndef COMMAND_LINE_SIZE
858 +# define COMMAND_LINE_SIZE 512
859 +#endif
860 +
861 +char command_line[COMMAND_LINE_SIZE * 2];
862 +
863 +static unsigned int vpe1_mem;
864 +static int __init vpe1mem(char *str)
865 +{
866 + vpe1_mem = memparse(str, &str);
867 + return 1;
868 +}
869 +__setup("vpe1_mem=", vpe1mem);
870 +
871 +uint32_t vpe1_wdog_ctr;
872 +static int __init wdog_ctr(char *str)
873 +{
874 + get_option(&str, &vpe1_wdog_ctr);
875 + return 1;
876 +}
877 +
878 +__setup("vpe1_wdog_ctr_addr=", wdog_ctr);
879 +EXPORT_SYMBOL(vpe1_wdog_ctr);
880 +
881 +uint32_t vpe1_wdog_timeout;
882 +static int __init wdog_timeout(char *str)
883 +{
884 + get_option(&str, &vpe1_wdog_timeout);
885 + return 1;
886 +}
887 +
888 +__setup("vpe1_wdog_timeout=", wdog_timeout);
889 +EXPORT_SYMBOL(vpe1_wdog_timeout);
890 +
891 +#endif
892 /* grab the likely amount of memory we will need. */
893 #ifdef CONFIG_MIPS_VPE_LOADER_TOM
894 #define P_SIZE (2 * 1024 * 1024)
895 @@ -268,6 +320,13 @@ static void *alloc_progmem(unsigned long
896 void *addr;
897
898 #ifdef CONFIG_MIPS_VPE_LOADER_TOM
899 +#ifdef CONFIG_IFX_VPE_EXT
900 + if (vpe1_load_addr) {
901 + memset((void *)vpe1_load_addr, 0, len);
902 + return (void *)vpe1_load_addr;
903 + }
904 +#endif
905 +
906 /*
907 * This means you must tell Linux to use less memory than you
908 * physically have, for example by passing a mem= boot argument.
909 @@ -746,6 +805,12 @@ static int vpe_run(struct vpe * v)
910 }
911
912 /* Write the address we want it to start running from in the TCPC register. */
913 +#if defined(CONFIG_IFX_VPE_EXT) && 0
914 + if (stlb)
915 + write_vpe_c0_wired(vpe0_wired + vpe1_wired);
916 + else
917 + write_vpe_c0_wired(vpe1_wired);
918 +#endif
919 write_tc_c0_tcrestart((unsigned long)v->__start);
920 write_tc_c0_tccontext((unsigned long)0);
921
922 @@ -759,6 +824,20 @@ static int vpe_run(struct vpe * v)
923
924 write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H);
925
926 +#if defined(CONFIG_IFX_VPE_EXT) && 0
927 + /*
928 + * $a2 & $a3 are used to pass command line parameters to VPE1. $a2
929 + * points to the start of the command line string and $a3 points to
930 + * the end of the string. This convention is identical to the Linux
931 + * kernel boot parameter passing mechanism. Please note that $a3 is
932 + * used to pass physical memory size or 0 in SDE tool kit. So, if you
933 + * are passing comand line parameters through $a2 & $a3 SDE programs
934 + * don't work as desired.
935 + */
936 + mttgpr(6, command_line);
937 + mttgpr(7, (command_line + strlen(command_line)));
938 + if (is_sdepgm)
939 +#endif
940 /*
941 * The sde-kit passes 'memsize' to __start in $a3, so set something
942 * here... Or set $a3 to zero and define DFLT_STACK_SIZE and
943 @@ -833,6 +912,9 @@ static int find_vpe_symbols(struct vpe *
944 if ( (v->__start == 0) || (v->shared_ptr == NULL))
945 return -1;
946
947 +#ifdef CONFIG_IFX_VPE_EXT
948 + is_sdepgm = 1;
949 +#endif
950 return 0;
951 }
952
953 @@ -994,6 +1076,15 @@ static int vpe_elfload(struct vpe * v)
954 (unsigned long)v->load_addr + v->len);
955
956 if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) {
957 +#ifdef CONFIG_IFX_VPE_EXT
958 + if (vpe1_load_addr) {
959 + /* Conversion to KSEG1 is required ??? */
960 + v->__start = KSEG1ADDR(vpe1_load_addr);
961 + is_sdepgm = 0;
962 + return 0;
963 + }
964 +#endif
965 +
966 if (v->__start == 0) {
967 printk(KERN_WARNING "VPE loader: program does not contain "
968 "a __start symbol\n");
969 @@ -1064,6 +1155,9 @@ static int vpe_open(struct inode *inode,
970 struct vpe_notifications *not;
971 struct vpe *v;
972 int ret;
973 +#ifdef CONFIG_IFX_VPE_EXT
974 + int progsize;
975 +#endif
976
977 if (minor != iminor(inode)) {
978 /* assume only 1 device at the moment. */
979 @@ -1089,7 +1183,12 @@ static int vpe_open(struct inode *inode,
980 release_progmem(v->load_addr);
981 cleanup_tc(get_tc(tclimit));
982 }
983 -
984 +#ifdef CONFIG_IFX_VPE_EXT
985 + progsize = (vpe1_mem != 0) ? vpe1_mem : P_SIZE;
986 + //printk("progsize = %x\n", progsize);
987 + v->pbuffer = vmalloc(progsize);
988 + v->plen = progsize;
989 +#else
990 /* this of-course trashes what was there before... */
991 v->pbuffer = vmalloc(P_SIZE);
992 if (!v->pbuffer) {
993 @@ -1097,11 +1196,14 @@ static int vpe_open(struct inode *inode,
994 return -ENOMEM;
995 }
996 v->plen = P_SIZE;
997 +#endif
998 v->load_addr = NULL;
999 v->len = 0;
1000
1001 +#if 0
1002 v->uid = filp->f_cred->fsuid;
1003 v->gid = filp->f_cred->fsgid;
1004 +#endif
1005
1006 #ifdef CONFIG_MIPS_APSP_KSPD
1007 /* get kspd to tell us when a syscall_exit happens */
1008 @@ -1349,6 +1451,133 @@ static void kspd_sp_exit( int sp_id)
1009 cleanup_tc(get_tc(sp_id));
1010 }
1011 #endif
1012 +#ifdef CONFIG_IFX_VPE_EXT
1013 +int32_t vpe1_sw_start(void* sw_start_addr, uint32_t tcmask, uint32_t flags)
1014 +{
1015 + enum vpe_state state;
1016 + struct vpe *v = get_vpe(tclimit);
1017 + struct vpe_notifications *not;
1018 +
1019 + if (tcmask || flags) {
1020 + printk(KERN_WARNING "Currently tcmask and flags should be 0.\
1021 + other values not supported\n");
1022 + return -1;
1023 + }
1024 +
1025 + state = xchg(&v->state, VPE_STATE_INUSE);
1026 + if (state != VPE_STATE_UNUSED) {
1027 + vpe_stop(v);
1028 +
1029 + list_for_each_entry(not, &v->notify, list) {
1030 + not->stop(tclimit);
1031 + }
1032 + }
1033 +
1034 + v->__start = (unsigned long)sw_start_addr;
1035 + is_sdepgm = 0;
1036 +
1037 + if (!vpe_run(v)) {
1038 + printk(KERN_DEBUG "VPE loader: VPE1 running successfully\n");
1039 + return 0;
1040 + }
1041 + return -1;
1042 +}
1043 +
1044 +EXPORT_SYMBOL(vpe1_sw_start);
1045 +
1046 +int32_t vpe1_sw_stop(uint32_t flags)
1047 +{
1048 + struct vpe *v = get_vpe(tclimit);
1049 +
1050 + if (!vpe_free(v)) {
1051 + printk(KERN_DEBUG "RP Stopped\n");
1052 + return 0;
1053 + }
1054 + else
1055 + return -1;
1056 +}
1057 +
1058 +EXPORT_SYMBOL(vpe1_sw_stop);
1059 +
1060 +uint32_t vpe1_get_load_addr (uint32_t flags)
1061 +{
1062 + return vpe1_load_addr;
1063 +}
1064 +
1065 +EXPORT_SYMBOL(vpe1_get_load_addr);
1066 +
1067 +uint32_t vpe1_get_max_mem (uint32_t flags)
1068 +{
1069 + if (!vpe1_mem)
1070 + return P_SIZE;
1071 + else
1072 + return vpe1_mem;
1073 +}
1074 +
1075 +EXPORT_SYMBOL(vpe1_get_max_mem);
1076 +
1077 +void* vpe1_get_cmdline_argument(void)
1078 +{
1079 + return saved_command_line;
1080 +}
1081 +
1082 +EXPORT_SYMBOL(vpe1_get_cmdline_argument);
1083 +
1084 +int32_t vpe1_set_boot_param(char *field, char *value, char flags)
1085 +{
1086 + char *ptr, string[64];
1087 + int start_off, end_off;
1088 + if (!field)
1089 + return -1;
1090 + strcpy(string, field);
1091 + if (value) {
1092 + strcat(string, "=");
1093 + strcat(string, value);
1094 + strcat(command_line, " ");
1095 + strcat(command_line, string);
1096 + }
1097 + else {
1098 + ptr = strstr(command_line, string);
1099 + if (ptr) {
1100 + start_off = ptr - command_line;
1101 + ptr += strlen(string);
1102 + while ((*ptr != ' ') && (*ptr != '\0'))
1103 + ptr++;
1104 + end_off = ptr - command_line;
1105 + command_line[start_off] = '\0';
1106 + strcat (command_line, command_line+end_off);
1107 + }
1108 + }
1109 + return 0;
1110 +}
1111 +
1112 +EXPORT_SYMBOL(vpe1_set_boot_param);
1113 +
1114 +int32_t vpe1_get_boot_param(char *field, char **value, char flags)
1115 +{
1116 + char *ptr, string[64];
1117 + int i = 0;
1118 + if (!field)
1119 + return -1;
1120 + if ((ptr = strstr(command_line, field))) {
1121 + ptr += strlen(field) + 1; /* including = */
1122 + while ((*ptr != ' ') && (*ptr != '\0'))
1123 + string[i++] = *ptr++;
1124 + string[i] = '\0';
1125 + *value = kmalloc((strlen(string) + 1), GFP_KERNEL);
1126 + if (*value != NULL)
1127 + strcpy(*value, string);
1128 + }
1129 + else
1130 + *value = NULL;
1131 +
1132 + return 0;
1133 +}
1134 +
1135 +EXPORT_SYMBOL(vpe1_get_boot_param);
1136 +
1137 +extern void configure_tlb(void);
1138 +#endif
1139
1140 static ssize_t store_kill(struct device *dev, struct device_attribute *attr,
1141 const char *buf, size_t len)
1142 @@ -1430,6 +1659,18 @@ static int __init vpe_module_init(void)
1143 printk("VPE loader: not a MIPS MT capable processor\n");
1144 return -ENODEV;
1145 }
1146 +#ifdef CONFIG_IFX_VPE_EXT
1147 +#ifndef CONFIG_MIPS_MT_SMTC
1148 + configure_tlb();
1149 +#endif
1150 +#endif
1151 +
1152 +#ifndef CONFIG_MIPS_MT_SMTC
1153 + if (!vpelimit)
1154 + vpelimit = 1;
1155 + if (!tclimit)
1156 + tclimit = 1;
1157 +#endif
1158
1159 if (vpelimit == 0) {
1160 printk(KERN_WARNING "No VPEs reserved for AP/SP, not "
1161 @@ -1474,10 +1715,12 @@ static int __init vpe_module_init(void)
1162 mtflags = dmt();
1163 vpflags = dvpe();
1164
1165 + back_to_back_c0_hazard();
1166 +
1167 /* Put MVPE's into 'configuration state' */
1168 set_c0_mvpcontrol(MVPCONTROL_VPC);
1169
1170 - /* dump_mtregs(); */
1171 + dump_mtregs();
1172
1173 val = read_c0_mvpconf0();
1174 hw_tcs = (val & MVPCONF0_PTC) + 1;
1175 @@ -1489,6 +1732,7 @@ static int __init vpe_module_init(void)
1176 * reschedule send IPIs or similar we might hang.
1177 */
1178 clear_c0_mvpcontrol(MVPCONTROL_VPC);
1179 + back_to_back_c0_hazard();
1180 evpe(vpflags);
1181 emt(mtflags);
1182 local_irq_restore(flags);
1183 @@ -1514,6 +1758,7 @@ static int __init vpe_module_init(void)
1184 }
1185
1186 v->ntcs = hw_tcs - tclimit;
1187 + write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1);
1188
1189 /* add the tc to the list of this vpe's tc's. */
1190 list_add(&t->tc, &v->tc);
1191 @@ -1582,6 +1827,7 @@ static int __init vpe_module_init(void)
1192 out_reenable:
1193 /* release config state */
1194 clear_c0_mvpcontrol(MVPCONTROL_VPC);
1195 + back_to_back_c0_hazard();
1196
1197 evpe(vpflags);
1198 emt(mtflags);
This page took 0.101601 seconds and 5 git commands to generate.