1 --- a/arch/mips/Kconfig
2 +++ b/arch/mips/Kconfig
4 Includes a loader for loading an elf relocatable object
5 onto another VPE and running it.
8 + bool "IFX APRP Extensions"
9 + depends on MIPS_VPE_LOADER
12 + IFX included extensions in APRP
15 + bool "34K Performance counters"
16 + depends on MIPS_MT && PROC_FS
19 + 34K Performance counter through /proc
22 + bool "Support mtsched priority configuration for TCs"
23 + depends on MIPS_MT && PROC_FS
26 + Support for mtsched priority configuration for TCs through
29 config MIPS_MT_SMTC_IM_BACKSTOP
30 bool "Use per-TC register bits as backstop for inhibited IM bits"
31 depends on MIPS_MT_SMTC
32 --- a/arch/mips/include/asm/mipsmtregs.h
33 +++ b/arch/mips/include/asm/mipsmtregs.h
35 #define read_c0_vpeconf0() __read_32bit_c0_register($1, 2)
36 #define write_c0_vpeconf0(val) __write_32bit_c0_register($1, 2, val)
38 +#define read_c0_vpeconf1() __read_32bit_c0_register($1, 3)
39 +#define write_c0_vpeconf1(val) __write_32bit_c0_register($1, 3, val)
41 +#define read_c0_vpeschedule() __read_32bit_c0_register($1, 5)
42 +#define write_c0_vpeschedule(val) __write_32bit_c0_register($1, 5, val)
44 +#define read_c0_vpeschefback() __read_32bit_c0_register($1, 6)
45 +#define write_c0_vpeschefback(val) __write_32bit_c0_register($1, 6, val)
47 +#define read_c0_vpeopt() __read_32bit_c0_register($1, 7)
48 +#define write_c0_vpeopt(val) __write_32bit_c0_register($1, 7, val)
50 #define read_c0_tcstatus() __read_32bit_c0_register($2, 1)
51 #define write_c0_tcstatus(val) __write_32bit_c0_register($2, 1, val)
53 #define read_c0_tcbind() __read_32bit_c0_register($2, 2)
54 +#define write_c0_tcbind(val) __write_32bit_c0_register($2, 2, val)
56 #define read_c0_tccontext() __read_32bit_c0_register($2, 5)
57 #define write_c0_tccontext(val) __write_32bit_c0_register($2, 5, val)
59 +#define read_c0_tcschedule() __read_32bit_c0_register($2, 6)
60 +#define write_c0_tcschedule(val) __write_32bit_c0_register($2, 6, val)
62 +#define read_c0_tcschefback() __read_32bit_c0_register($2, 7)
63 +#define write_c0_tcschefback(val) __write_32bit_c0_register($2, 7, val)
68 * Macros for use in assembly language code
70 #define MVPCONTROL_STLB_SHIFT 2
71 #define MVPCONTROL_STLB (_ULCAST_(1) << MVPCONTROL_STLB_SHIFT)
73 +#define MVPCONTROL_CPA_SHIFT 3
74 +#define MVPCONTROL_CPA (_ULCAST_(1) << MVPCONTROL_CPA_SHIFT)
77 #define MVPCONF0_PTC_SHIFT 0
79 #define MVPCONF0_TCA ( _ULCAST_(1) << MVPCONF0_TCA_SHIFT)
80 #define MVPCONF0_PTLBE_SHIFT 16
81 #define MVPCONF0_PTLBE (_ULCAST_(0x3ff) << MVPCONF0_PTLBE_SHIFT)
82 +#define MVPCONF0_PCP_SHIFT 27
83 +#define MVPCONF0_PCP (_ULCAST_(1) << MVPCONF0_PCP_SHIFT)
84 #define MVPCONF0_TLBS_SHIFT 29
85 #define MVPCONF0_TLBS (_ULCAST_(1) << MVPCONF0_TLBS_SHIFT)
86 #define MVPCONF0_M_SHIFT 31
88 #define VPECONF0_VPA (_ULCAST_(1) << VPECONF0_VPA_SHIFT)
89 #define VPECONF0_MVP_SHIFT 1
90 #define VPECONF0_MVP (_ULCAST_(1) << VPECONF0_MVP_SHIFT)
91 +#define VPECONF0_ICS_SHIFT 16
92 +#define VPECONF0_ICS (_ULCAST_(1) << VPECONF0_ICS_SHIFT)
93 +#define VPECONF0_DCS_SHIFT 17
94 +#define VPECONF0_DCS (_ULCAST_(1) << VPECONF0_DCS_SHIFT)
95 #define VPECONF0_XTC_SHIFT 21
96 #define VPECONF0_XTC (_ULCAST_(0xff) << VPECONF0_XTC_SHIFT)
99 +#define VPEOPT_DWX_SHIFT 0
100 +#define VPEOPT_IWX_SHIFT 8
101 +#define VPEOPT_IWX0 ( _ULCAST_(0x1) << VPEOPT_IWX_SHIFT)
102 +#define VPEOPT_IWX1 ( _ULCAST_(0x2) << VPEOPT_IWX_SHIFT)
103 +#define VPEOPT_IWX2 ( _ULCAST_(0x4) << VPEOPT_IWX_SHIFT)
104 +#define VPEOPT_IWX3 ( _ULCAST_(0x8) << VPEOPT_IWX_SHIFT)
105 +#define VPEOPT_DWX0 ( _ULCAST_(0x1) << VPEOPT_DWX_SHIFT)
106 +#define VPEOPT_DWX1 ( _ULCAST_(0x2) << VPEOPT_DWX_SHIFT)
107 +#define VPEOPT_DWX2 ( _ULCAST_(0x4) << VPEOPT_DWX_SHIFT)
108 +#define VPEOPT_DWX3 ( _ULCAST_(0x8) << VPEOPT_DWX_SHIFT)
110 /* TCStatus fields (per TC) */
111 #define TCSTATUS_TASID (_ULCAST_(0xff))
112 #define TCSTATUS_IXMT_SHIFT 10
114 #define write_vpe_c0_vpecontrol(val) mttc0(1, 1, val)
115 #define read_vpe_c0_vpeconf0() mftc0(1, 2)
116 #define write_vpe_c0_vpeconf0(val) mttc0(1, 2, val)
117 +#define read_vpe_c0_vpeschedule() mftc0(1, 5)
118 +#define write_vpe_c0_vpeschedule(val) mttc0(1, 5, val)
119 +#define read_vpe_c0_vpeschefback() mftc0(1, 6)
120 +#define write_vpe_c0_vpeschefback(val) mttc0(1, 6, val)
121 +#define read_vpe_c0_vpeopt() mftc0(1, 7)
122 +#define write_vpe_c0_vpeopt(val) mttc0(1, 7, val)
123 +#define read_vpe_c0_wired() mftc0(6, 0)
124 +#define write_vpe_c0_wired(val) mttc0(6, 0, val)
125 #define read_vpe_c0_count() mftc0(9, 0)
126 #define write_vpe_c0_count(val) mttc0(9, 0, val)
127 #define read_vpe_c0_status() mftc0(12, 0)
129 #define write_tc_c0_tchalt(val) mttc0(2, 4, val)
130 #define read_tc_c0_tccontext() mftc0(2, 5)
131 #define write_tc_c0_tccontext(val) mttc0(2, 5, val)
132 +#define read_tc_c0_tcschedule() mftc0(2, 6)
133 +#define write_tc_c0_tcschedule(val) mttc0(2, 6, val)
134 +#define read_tc_c0_tcschefback() mftc0(2, 7)
135 +#define write_tc_c0_tcschefback(val) mttc0(2, 7, val)
136 +#define read_tc_c0_entryhi() mftc0(10, 0)
137 +#define write_tc_c0_entryhi(val) mttc0(10, 0, val)
140 #define read_tc_gpr_sp() mftgpr(29)
141 --- a/arch/mips/kernel/Makefile
142 +++ b/arch/mips/kernel/Makefile
145 obj-$(CONFIG_KGDB) += kgdb.o
146 obj-$(CONFIG_PROC_FS) += proc.o
148 +obj-$(CONFIG_MTSCHED) += mtsched_proc.o
149 +obj-$(CONFIG_PERFCTRS) += perf_proc.o
150 obj-$(CONFIG_64BIT) += cpu-bugs64.o
152 obj-$(CONFIG_I8253) += i8253.o
153 --- a/arch/mips/kernel/mips-mt.c
154 +++ b/arch/mips/kernel/mips-mt.c
156 #include <asm/cacheflush.h>
160 static int __init maxvpes(char *str)
162 get_option(&str, &vpelimit);
167 __setup("maxvpes=", maxvpes);
171 static int __init maxtcs(char *str)
173 get_option(&str, &tclimit);
176 +__setup("maxtcs=", maxtcs);
178 +#ifdef CONFIG_IFX_VPE_EXT
180 +static int __init istlbshared(char *str)
182 + get_option(&str, &stlb);
185 +__setup("vpe_tlb_shared=", istlbshared);
187 -__setup("maxtcs=", maxtcs);
189 +static int __init vpe0wired(char *str)
191 + get_option(&str, &vpe0_wired);
194 +__setup("vpe0_wired_tlb_entries=", vpe0wired);
197 +static int __init vpe1wired(char *str)
199 + get_option(&str, &vpe1_wired);
202 +__setup("vpe1_wired_tlb_entries=", vpe1wired);
204 +#ifdef CONFIG_MIPS_MT_SMTC
207 +void configure_tlb(void)
209 + int vpeflags, tcflags, tlbsiz;
210 + unsigned int config1val;
213 + write_c0_vpeconf0((read_c0_vpeconf0() | VPECONF0_MVP));
214 + write_c0_mvpcontrol((read_c0_mvpcontrol() | MVPCONTROL_VPC));
216 + //printk("stlb = %d, vpe0_wired = %d vpe1_wired=%d\n", stlb,vpe0_wired, vpe1_wired);
218 + if (!(read_c0_mvpconf0() & MVPCONF0_TLBS)) {
224 + write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB);
225 + write_c0_wired(vpe0_wired + vpe1_wired);
226 + if (((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) {
227 + config1val = read_vpe_c0_config1();
228 + tlbsiz = (((config1val >> 25) & 0x3f) + 1);
231 + cpu_data[0].tlbsize = current_cpu_data.tlbsize = tlbsiz;
236 + write_c0_mvpcontrol(read_c0_mvpcontrol() & ~MVPCONTROL_STLB);
237 + write_c0_wired(vpe0_wired);
241 + write_c0_mvpcontrol((read_c0_mvpcontrol() & ~MVPCONTROL_VPC));
243 + local_flush_tlb_all();
245 + printk("Wired TLB entries for Linux read_c0_wired() = %d\n", read_c0_wired());
246 +#ifdef CONFIG_MIPS_MT_SMTC
255 * Dump new MIPS MT state for the core. Does not leave TCs halted.
257 if ((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) {
258 printk(" VPE %d\n", i);
259 printk(" VPEControl : %08lx\n",
260 - read_vpe_c0_vpecontrol());
261 + read_vpe_c0_vpecontrol());
262 printk(" VPEConf0 : %08lx\n",
263 - read_vpe_c0_vpeconf0());
264 + read_vpe_c0_vpeconf0());
265 printk(" VPE%d.Status : %08lx\n",
266 - i, read_vpe_c0_status());
267 + i, read_vpe_c0_status());
268 printk(" VPE%d.EPC : %08lx %pS\n",
269 - i, read_vpe_c0_epc(),
270 - (void *) read_vpe_c0_epc());
271 + i, read_vpe_c0_epc(),
272 + (void *) read_vpe_c0_epc());
273 printk(" VPE%d.Cause : %08lx\n",
274 - i, read_vpe_c0_cause());
275 + i, read_vpe_c0_cause());
276 printk(" VPE%d.Config7 : %08lx\n",
277 - i, read_vpe_c0_config7());
278 + i, read_vpe_c0_config7());
279 break; /* Next VPE */
283 printk("Mapped %ld ITC cells starting at 0x%08x\n",
284 ((itcblkgrn & 0x7fe00000) >> 20), itc_base);
286 +#ifdef CONFIG_IFX_VPE_EXT
292 --- a/arch/mips/kernel/proc.c
293 +++ b/arch/mips/kernel/proc.c
295 #include <linux/kernel.h>
296 #include <linux/sched.h>
297 #include <linux/seq_file.h>
298 +#include <linux/proc_fs.h>
299 #include <asm/bootinfo.h>
301 #include <asm/cpu-features.h>
304 .show = show_cpuinfo,
308 + * Support for MIPS/local /proc hooks in /proc/mips/
311 +static struct proc_dir_entry *mips_proc = NULL;
313 +struct proc_dir_entry *get_mips_proc_dir(void)
316 + * This ought not to be preemptable.
318 + if(mips_proc == NULL)
319 + mips_proc = proc_mkdir("mips", NULL);
322 --- a/arch/mips/kernel/smtc.c
323 +++ b/arch/mips/kernel/smtc.c
324 @@ -1336,6 +1336,13 @@
325 asid = asid_cache(cpu);
328 +#ifdef CONFIG_IFX_VPE_EXT
329 + /* If TLB is shared between AP and RP (AP is running SMTC),
330 + leave out max ASID i.e., ASID_MASK for RP
332 + if (!nostlb && ((asid & ASID_MASK) == (ASID_MASK - 1)))
335 if (!((asid += ASID_INC) & ASID_MASK) ) {
336 if (cpu_has_vtag_icache)
338 --- a/arch/mips/kernel/vpe.c
339 +++ b/arch/mips/kernel/vpe.c
341 static int kspd_events_reqd;
344 +#ifdef CONFIG_IFX_VPE_EXT
345 +static int is_sdepgm;
347 +extern int vpe0_wired;
348 +extern int vpe1_wired;
349 +unsigned int vpe1_load_addr;
351 +static int __init load_address(char *str)
353 + get_option(&str, &vpe1_load_addr);
356 +__setup("vpe1_load_addr=", load_address);
358 +#include <asm/mipsmtregs.h>
359 +#define write_vpe_c0_wired(val) mttc0(6, 0, val)
361 +#ifndef COMMAND_LINE_SIZE
362 +# define COMMAND_LINE_SIZE 512
365 +char command_line[COMMAND_LINE_SIZE * 2];
367 +static unsigned int vpe1_mem;
368 +static int __init vpe1mem(char *str)
370 + vpe1_mem = memparse(str, &str);
373 +__setup("vpe1_mem=", vpe1mem);
375 +uint32_t vpe1_wdog_ctr;
376 +static int __init wdog_ctr(char *str)
378 + get_option(&str, &vpe1_wdog_ctr);
382 +__setup("vpe1_wdog_ctr_addr=", wdog_ctr);
383 +EXPORT_SYMBOL(vpe1_wdog_ctr);
385 +uint32_t vpe1_wdog_timeout;
386 +static int __init wdog_timeout(char *str)
388 + get_option(&str, &vpe1_wdog_timeout);
392 +__setup("vpe1_wdog_timeout=", wdog_timeout);
393 +EXPORT_SYMBOL(vpe1_wdog_timeout);
396 /* grab the likely amount of memory we will need. */
397 #ifdef CONFIG_MIPS_VPE_LOADER_TOM
398 #define P_SIZE (2 * 1024 * 1024)
402 #ifdef CONFIG_MIPS_VPE_LOADER_TOM
403 +#ifdef CONFIG_IFX_VPE_EXT
404 + if (vpe1_load_addr) {
405 + memset((void *)vpe1_load_addr, 0, len);
406 + return (void *)vpe1_load_addr;
411 * This means you must tell Linux to use less memory than you
412 * physically have, for example by passing a mem= boot argument.
416 /* Write the address we want it to start running from in the TCPC register. */
417 +#if defined(CONFIG_IFX_VPE_EXT) && 0
419 + write_vpe_c0_wired(vpe0_wired + vpe1_wired);
421 + write_vpe_c0_wired(vpe1_wired);
423 write_tc_c0_tcrestart((unsigned long)v->__start);
424 write_tc_c0_tccontext((unsigned long)0);
428 write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H);
430 +#if defined(CONFIG_IFX_VPE_EXT) && 0
432 + * $a2 & $a3 are used to pass command line parameters to VPE1. $a2
433 + * points to the start of the command line string and $a3 points to
434 + * the end of the string. This convention is identical to the Linux
435 + * kernel boot parameter passing mechanism. Please note that $a3 is
436 + * used to pass physical memory size or 0 in SDE tool kit. So, if you
437 + * are passing comand line parameters through $a2 & $a3 SDE programs
438 + * don't work as desired.
440 + mttgpr(6, command_line);
441 + mttgpr(7, (command_line + strlen(command_line)));
445 * The sde-kit passes 'memsize' to __start in $a3, so set something
446 * here... Or set $a3 to zero and define DFLT_STACK_SIZE and
448 if ( (v->__start == 0) || (v->shared_ptr == NULL))
451 +#ifdef CONFIG_IFX_VPE_EXT
457 @@ -994,6 +1076,15 @@
458 (unsigned long)v->load_addr + v->len);
460 if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) {
461 +#ifdef CONFIG_IFX_VPE_EXT
462 + if (vpe1_load_addr) {
463 + /* Conversion to KSEG1 is required ??? */
464 + v->__start = KSEG1ADDR(vpe1_load_addr);
470 if (v->__start == 0) {
471 printk(KERN_WARNING "VPE loader: program does not contain "
472 "a __start symbol\n");
473 @@ -1064,6 +1155,9 @@
474 struct vpe_notifications *not;
477 +#ifdef CONFIG_IFX_VPE_EXT
481 if (minor != iminor(inode)) {
482 /* assume only 1 device at the moment. */
483 @@ -1090,14 +1184,23 @@
484 cleanup_tc(get_tc(tclimit));
487 +#ifdef CONFIG_IFX_VPE_EXT
488 + progsize = (vpe1_mem != 0) ? vpe1_mem : P_SIZE;
489 + //printk("progsize = %x\n", progsize);
490 + v->pbuffer = vmalloc(progsize);
491 + v->plen = progsize;
493 /* this of-course trashes what was there before... */
494 v->pbuffer = vmalloc(P_SIZE);
501 v->uid = filp->f_cred->fsuid;
502 v->gid = filp->f_cred->fsgid;
505 #ifdef CONFIG_MIPS_APSP_KSPD
506 /* get kspd to tell us when a syscall_exit happens */
507 @@ -1350,6 +1453,133 @@
508 cleanup_tc(get_tc(sp_id));
511 +#ifdef CONFIG_IFX_VPE_EXT
512 +int32_t vpe1_sw_start(void* sw_start_addr, uint32_t tcmask, uint32_t flags)
514 + enum vpe_state state;
515 + struct vpe *v = get_vpe(tclimit);
516 + struct vpe_notifications *not;
518 + if (tcmask || flags) {
519 + printk(KERN_WARNING "Currently tcmask and flags should be 0.\
520 + other values not supported\n");
524 + state = xchg(&v->state, VPE_STATE_INUSE);
525 + if (state != VPE_STATE_UNUSED) {
528 + list_for_each_entry(not, &v->notify, list) {
529 + not->stop(tclimit);
533 + v->__start = (unsigned long)sw_start_addr;
537 + printk(KERN_DEBUG "VPE loader: VPE1 running successfully\n");
543 +EXPORT_SYMBOL(vpe1_sw_start);
545 +int32_t vpe1_sw_stop(uint32_t flags)
547 + struct vpe *v = get_vpe(tclimit);
549 + if (!vpe_free(v)) {
550 + printk(KERN_DEBUG "RP Stopped\n");
557 +EXPORT_SYMBOL(vpe1_sw_stop);
559 +uint32_t vpe1_get_load_addr (uint32_t flags)
561 + return vpe1_load_addr;
564 +EXPORT_SYMBOL(vpe1_get_load_addr);
566 +uint32_t vpe1_get_max_mem (uint32_t flags)
574 +EXPORT_SYMBOL(vpe1_get_max_mem);
576 +void* vpe1_get_cmdline_argument(void)
578 + return saved_command_line;
581 +EXPORT_SYMBOL(vpe1_get_cmdline_argument);
583 +int32_t vpe1_set_boot_param(char *field, char *value, char flags)
585 + char *ptr, string[64];
586 + int start_off, end_off;
589 + strcpy(string, field);
591 + strcat(string, "=");
592 + strcat(string, value);
593 + strcat(command_line, " ");
594 + strcat(command_line, string);
597 + ptr = strstr(command_line, string);
599 + start_off = ptr - command_line;
600 + ptr += strlen(string);
601 + while ((*ptr != ' ') && (*ptr != '\0'))
603 + end_off = ptr - command_line;
604 + command_line[start_off] = '\0';
605 + strcat (command_line, command_line+end_off);
611 +EXPORT_SYMBOL(vpe1_set_boot_param);
613 +int32_t vpe1_get_boot_param(char *field, char **value, char flags)
615 + char *ptr, string[64];
619 + if ((ptr = strstr(command_line, field))) {
620 + ptr += strlen(field) + 1; /* including = */
621 + while ((*ptr != ' ') && (*ptr != '\0'))
622 + string[i++] = *ptr++;
624 + *value = kmalloc((strlen(string) + 1), GFP_KERNEL);
625 + if (*value != NULL)
626 + strcpy(*value, string);
634 +EXPORT_SYMBOL(vpe1_get_boot_param);
636 +extern void configure_tlb(void);
639 static ssize_t store_kill(struct device *dev, struct device_attribute *attr,
640 const char *buf, size_t len)
641 @@ -1431,6 +1661,18 @@
642 printk("VPE loader: not a MIPS MT capable processor\n");
645 +#ifdef CONFIG_IFX_VPE_EXT
646 +#ifndef CONFIG_MIPS_MT_SMTC
651 +#ifndef CONFIG_MIPS_MT_SMTC
659 printk(KERN_WARNING "No VPEs reserved for AP/SP, not "
660 @@ -1475,10 +1717,12 @@
664 + back_to_back_c0_hazard();
666 /* Put MVPE's into 'configuration state' */
667 set_c0_mvpcontrol(MVPCONTROL_VPC);
669 - /* dump_mtregs(); */
672 val = read_c0_mvpconf0();
673 hw_tcs = (val & MVPCONF0_PTC) + 1;
674 @@ -1490,6 +1734,7 @@
675 * reschedule send IPIs or similar we might hang.
677 clear_c0_mvpcontrol(MVPCONTROL_VPC);
678 + back_to_back_c0_hazard();
681 local_irq_restore(flags);
682 @@ -1515,6 +1760,7 @@
685 v->ntcs = hw_tcs - tclimit;
686 + write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1);
688 /* add the tc to the list of this vpe's tc's. */
689 list_add(&t->tc, &v->tc);
690 @@ -1583,6 +1829,7 @@
692 /* release config state */
693 clear_c0_mvpcontrol(MVPCONTROL_VPC);
694 + back_to_back_c0_hazard();
699 +++ b/arch/mips/kernel/mtsched_proc.c
702 + * /proc hooks for MIPS MT scheduling policy management for 34K cores
704 + * This program is free software; you can distribute it and/or modify it
705 + * under the terms of the GNU General Public License (Version 2) as
706 + * published by the Free Software Foundation.
708 + * This program is distributed in the hope it will be useful, but WITHOUT
709 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
710 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
711 + * for more details.
713 + * You should have received a copy of the GNU General Public License along
714 + * with this program; if not, write to the Free Software Foundation, Inc.,
715 + * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
717 + * Copyright (C) 2006 Mips Technologies, Inc
720 +#include <linux/kernel.h>
722 +#include <asm/cpu.h>
723 +#include <asm/processor.h>
724 +#include <asm/system.h>
725 +#include <asm/mipsregs.h>
726 +#include <asm/mipsmtregs.h>
727 +#include <asm/uaccess.h>
728 +#include <linux/proc_fs.h>
730 +static struct proc_dir_entry *mtsched_proc;
732 +#ifndef CONFIG_MIPS_MT_SMTC
735 +#define NTCS NR_CPUS
742 +static int proc_read_mtsched(char *page, char **start, off_t off,
743 + int count, int *eof, void *data)
751 + unsigned long flags;
752 + unsigned int mtflags;
753 + unsigned int haltstate;
754 + unsigned int vpes_checked[NVPES];
755 + unsigned int vpeschedule[NVPES];
756 + unsigned int vpeschefback[NVPES];
757 + unsigned int tcschedule[NTCS];
758 + unsigned int tcschefback[NTCS];
760 + /* Dump the state of the MIPS MT scheduling policy manager */
761 + /* Inititalize control state */
762 + for(i = 0; i < NVPES; i++) {
763 + vpes_checked[i] = 0;
764 + vpeschedule[i] = 0;
765 + vpeschefback[i] = 0;
767 + for(i = 0; i < NTCS; i++) {
769 + tcschefback[i] = 0;
772 + /* Disable interrupts and multithreaded issue */
773 + local_irq_save(flags);
776 + /* Then go through the TCs, halt 'em, and extract the values */
777 + mytc = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT;
778 + for(i = 0; i < NTCS; i++) {
780 + /* No need to halt ourselves! */
781 + tcschedule[i] = read_c0_tcschedule();
782 + tcschefback[i] = read_c0_tcschefback();
783 + /* If VPE bound to TC hasn't been checked, do it */
784 + vpe = read_c0_tcbind() & TCBIND_CURVPE;
785 + if(!vpes_checked[vpe]) {
786 + vpeschedule[vpe] = read_c0_vpeschedule();
787 + vpeschefback[vpe] = read_c0_vpeschefback();
788 + vpes_checked[vpe] = 1;
792 + haltstate = read_tc_c0_tchalt();
793 + write_tc_c0_tchalt(TCHALT_H);
795 + tcschedule[i] = read_tc_c0_tcschedule();
796 + tcschefback[i] = read_tc_c0_tcschefback();
797 + /* If VPE bound to TC hasn't been checked, do it */
798 + vpe = read_tc_c0_tcbind() & TCBIND_CURVPE;
799 + if(!vpes_checked[vpe]) {
800 + vpeschedule[vpe] = read_vpe_c0_vpeschedule();
801 + vpeschefback[vpe] = read_vpe_c0_vpeschefback();
802 + vpes_checked[vpe] = 1;
804 + if(!haltstate) write_tc_c0_tchalt(0);
807 + /* Re-enable MT and interrupts */
809 + local_irq_restore(flags);
811 + for(vpe=0; vpe < NVPES; vpe++) {
812 + len = sprintf(page, "VPE[%d].VPEschedule = 0x%08x\n",
813 + vpe, vpeschedule[vpe]);
816 + len = sprintf(page, "VPE[%d].VPEschefback = 0x%08x\n",
817 + vpe, vpeschefback[vpe]);
821 + for(i=0; i < NTCS; i++) {
822 + len = sprintf(page, "TC[%d].TCschedule = 0x%08x\n",
826 + len = sprintf(page, "TC[%d].TCschefback = 0x%08x\n",
827 + i, tcschefback[i]);
835 + * Write to perf counter registers based on text input
838 +#define TXTBUFSZ 1024
840 +static int proc_write_mtsched(struct file *file, const char *buffer,
841 + unsigned long count, void *data)
844 + char mybuf[TXTBUFSZ];
845 + /* At most, we will set up 9 TCs and 2 VPEs, 11 entries in all */
846 + char entity[1]; //, entity1[1];
848 + unsigned long value[1];
849 + int nparsed = 0 , index = 0;
850 + unsigned long flags;
851 + unsigned int mtflags;
852 + unsigned int haltstate;
853 + unsigned int tcbindval;
855 + if(count >= TXTBUFSZ) len = TXTBUFSZ-1;
857 + memset(mybuf,0,TXTBUFSZ);
858 + if(copy_from_user(mybuf, buffer, len)) return -EFAULT;
860 + nparsed = sscanf(mybuf, "%c%d %lx",
861 + &entity[0] ,&number[0], &value[0]);
864 + * Having acquired the inputs, which might have
865 + * generated exceptions and preemptions,
866 + * program the registers.
868 + /* Disable interrupts and multithreaded issue */
869 + local_irq_save(flags);
872 + if(entity[index] == 't' ) {
873 + /* Set TCSchedule or TCScheFBack of specified TC */
874 + if(number[index] > NTCS) goto skip;
875 + /* If it's our own TC, do it direct */
876 + if(number[index] ==
877 + ((read_c0_tcbind() & TCBIND_CURTC)
878 + >> TCBIND_CURTC_SHIFT)) {
879 + if(entity[index] == 't')
880 + write_c0_tcschedule(value[index]);
882 + write_c0_tcschefback(value[index]);
884 + /* Otherwise, we do it via MTTR */
885 + settc(number[index]);
886 + haltstate = read_tc_c0_tchalt();
887 + write_tc_c0_tchalt(TCHALT_H);
889 + if(entity[index] == 't')
890 + write_tc_c0_tcschedule(value[index]);
892 + write_tc_c0_tcschefback(value[index]);
894 + if(!haltstate) write_tc_c0_tchalt(0);
896 + } else if(entity[index] == 'v') {
897 + /* Set VPESchedule of specified VPE */
898 + if(number[index] > NVPES) goto skip;
899 + tcbindval = read_c0_tcbind();
900 + /* Are we doing this to our current VPE? */
901 + if((tcbindval & TCBIND_CURVPE) == number[index]) {
902 + /* Then life is simple */
903 + write_c0_vpeschedule(value[index]);
906 + * Bind ourselves to the other VPE long enough
907 + * to program the bind value.
909 + write_c0_tcbind((tcbindval & ~TCBIND_CURVPE)
912 + write_c0_vpeschedule(value[index]);
914 + /* Restore previous binding */
915 + write_c0_tcbind(tcbindval);
920 + else if(entity[index] == 'r') {
921 + unsigned int vpes_checked[2], vpe ,i , mytc;
922 + vpes_checked[0] = vpes_checked[1] = 0;
924 + /* Then go through the TCs, halt 'em, and extract the values */
925 + mytc = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT;
927 + for(i = 0; i < NTCS; i++) {
929 + /* No need to halt ourselves! */
930 + write_c0_vpeschefback(0);
931 + write_c0_tcschefback(0);
934 + haltstate = read_tc_c0_tchalt();
935 + write_tc_c0_tchalt(TCHALT_H);
937 + write_tc_c0_tcschefback(0);
938 + /* If VPE bound to TC hasn't been checked, do it */
939 + vpe = read_tc_c0_tcbind() & TCBIND_CURVPE;
940 + if(!vpes_checked[vpe]) {
941 + write_vpe_c0_vpeschefback(0);
942 + vpes_checked[vpe] = 1;
944 + if(!haltstate) write_tc_c0_tchalt(0);
949 + printk ("\n Usage : <t/v><0/1> <Hex Value>\n Example : t0 0x01\n");
953 + /* Re-enable MT and interrupts */
955 + local_irq_restore(flags);
959 +static int __init init_mtsched_proc(void)
961 + extern struct proc_dir_entry *get_mips_proc_dir(void);
962 + struct proc_dir_entry *mips_proc_dir;
964 + if (!cpu_has_mipsmt) {
965 + printk("mtsched: not a MIPS MT capable processor\n");
969 + mips_proc_dir = get_mips_proc_dir();
971 + mtsched_proc = create_proc_entry("mtsched", 0644, mips_proc_dir);
972 + mtsched_proc->read_proc = proc_read_mtsched;
973 + mtsched_proc->write_proc = proc_write_mtsched;
978 +/* Automagically create the entry */
979 +module_init(init_mtsched_proc);
981 +++ b/arch/mips/kernel/perf_proc.c
984 + * /proc hooks for CPU performance counter support for SMTC kernel
985 + * (and ultimately others)
986 + * Copyright (C) 2006 Mips Technologies, Inc
989 +#include <linux/kernel.h>
991 +#include <asm/cpu.h>
992 +#include <asm/processor.h>
993 +#include <asm/system.h>
994 +#include <asm/mipsregs.h>
995 +#include <asm/uaccess.h>
996 +#include <linux/proc_fs.h>
999 + * /proc diagnostic and statistics hooks
1003 +/* Internal software-extended event counters */
1005 +static unsigned long long extencount[4] = {0,0,0,0};
1007 +static struct proc_dir_entry *perf_proc;
1009 +static int proc_read_perf(char *page, char **start, off_t off,
1010 + int count, int *eof, void *data)
1015 + len = sprintf(page, "PerfCnt[0].Ctl : 0x%08x\n", read_c0_perfctrl0());
1018 + len = sprintf(page, "PerfCnt[0].Cnt : %Lu\n",
1019 + extencount[0] + (unsigned long long)((unsigned)read_c0_perfcntr0()));
1022 + len = sprintf(page, "PerfCnt[1].Ctl : 0x%08x\n", read_c0_perfctrl1());
1025 + len = sprintf(page, "PerfCnt[1].Cnt : %Lu\n",
1026 + extencount[1] + (unsigned long long)((unsigned)read_c0_perfcntr1()));
1029 + len = sprintf(page, "PerfCnt[2].Ctl : 0x%08x\n", read_c0_perfctrl2());
1032 + len = sprintf(page, "PerfCnt[2].Cnt : %Lu\n",
1033 + extencount[2] + (unsigned long long)((unsigned)read_c0_perfcntr2()));
1036 + len = sprintf(page, "PerfCnt[3].Ctl : 0x%08x\n", read_c0_perfctrl3());
1039 + len = sprintf(page, "PerfCnt[3].Cnt : %Lu\n",
1040 + extencount[3] + (unsigned long long)((unsigned)read_c0_perfcntr3()));
1048 + * Write to perf counter registers based on text input
1051 +#define TXTBUFSZ 1024
1053 +static int proc_write_perf(struct file *file, const char *buffer,
1054 + unsigned long count, void *data)
1059 + char mybuf[TXTBUFSZ];
1062 + unsigned long control[4];
1063 + long long ctrdata[4];
1065 + if(count >= TXTBUFSZ) len = TXTBUFSZ-1;
1067 + memset(mybuf,0,TXTBUFSZ);
1068 + if(copy_from_user(mybuf, buffer, len)) return -EFAULT;
1070 + nparsed = sscanf(mybuf,
1071 + "%d %lx %Ld %d %lx %Ld %d %lx %Ld %d %lx %Ld",
1072 + &which[0], &control[0], &ctrdata[0],
1073 + &which[1], &control[1], &ctrdata[1],
1074 + &which[2], &control[2], &ctrdata[2],
1075 + &which[3], &control[3], &ctrdata[3]);
1077 + for(index = 0; nparsed >= 3; index++) {
1078 + switch (which[index]) {
1080 + write_c0_perfctrl0(control[index]);
1081 + if(ctrdata[index] != -1) {
1082 + extencount[0] = (unsigned long long)ctrdata[index];
1083 + write_c0_perfcntr0((unsigned long)0);
1087 + write_c0_perfctrl1(control[index]);
1088 + if(ctrdata[index] != -1) {
1089 + extencount[1] = (unsigned long long)ctrdata[index];
1090 + write_c0_perfcntr1((unsigned long)0);
1094 + write_c0_perfctrl2(control[index]);
1095 + if(ctrdata[index] != -1) {
1096 + extencount[2] = (unsigned long long)ctrdata[index];
1097 + write_c0_perfcntr2((unsigned long)0);
1101 + write_c0_perfctrl3(control[index]);
1102 + if(ctrdata[index] != -1) {
1103 + extencount[3] = (unsigned long long)ctrdata[index];
1104 + write_c0_perfcntr3((unsigned long)0);
1113 +extern int (*perf_irq)(void);
1116 + * Invoked when timer interrupt vector picks up a perf counter overflow
1119 +static int perf_proc_irq(void)
1121 + unsigned long snapshot;
1124 + * It would be nice to do this as a loop, but we don't have
1125 + * indirect access to CP0 registers.
1127 + snapshot = read_c0_perfcntr0();
1128 + if ((long)snapshot < 0) {
1130 + (unsigned long long)((unsigned)read_c0_perfcntr0());
1131 + write_c0_perfcntr0(0);
1133 + snapshot = read_c0_perfcntr1();
1134 + if ((long)snapshot < 0) {
1136 + (unsigned long long)((unsigned)read_c0_perfcntr1());
1137 + write_c0_perfcntr1(0);
1139 + snapshot = read_c0_perfcntr2();
1140 + if ((long)snapshot < 0) {
1142 + (unsigned long long)((unsigned)read_c0_perfcntr2());
1143 + write_c0_perfcntr2(0);
1145 + snapshot = read_c0_perfcntr3();
1146 + if ((long)snapshot < 0) {
1148 + (unsigned long long)((unsigned)read_c0_perfcntr3());
1149 + write_c0_perfcntr3(0);
1154 +static int __init init_perf_proc(void)
1156 + extern struct proc_dir_entry *get_mips_proc_dir(void);
1158 + struct proc_dir_entry *mips_proc_dir = get_mips_proc_dir();
1160 + write_c0_perfcntr0(0);
1161 + write_c0_perfcntr1(0);
1162 + write_c0_perfcntr2(0);
1163 + write_c0_perfcntr3(0);
1164 + perf_proc = create_proc_entry("perf", 0644, mips_proc_dir);
1165 + perf_proc->read_proc = proc_read_perf;
1166 + perf_proc->write_proc = proc_write_perf;
1167 + perf_irq = perf_proc_irq;
1172 +/* Automagically create the entry */
1173 +module_init(init_perf_proc);