convert arptables to new packaging style
[openwrt.git] / openwrt / package / linux / kernel-source / drivers / net / hnd / sbutils.c
1 /*
2 * Misc utility routines for accessing chip-specific features
3 * of the SiliconBackplane-based Broadcom chips.
4 *
5 * Copyright 2004, Broadcom Corporation
6 * All Rights Reserved.
7 *
8 * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
9 * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
10 * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
11 * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
12 * $Id$
13 */
14
15 #include <typedefs.h>
16 #include <osl.h>
17 #include <bcmutils.h>
18 #include <bcmdevs.h>
19 #include <sbconfig.h>
20 #include <sbchipc.h>
21 #include <sbpci.h>
22 #include <pcicfg.h>
23 #include <sbpcmcia.h>
24 #include <sbextif.h>
25 #include <sbutils.h>
26 #include <bcmsrom.h>
27
28 /* debug/trace */
29 #define SB_ERROR(args)
30
31 typedef uint32 (*sb_intrsoff_t)(void *intr_arg);
32 typedef void (*sb_intrsrestore_t)(void *intr_arg, uint32 arg);
33 typedef bool (*sb_intrsenabled_t)(void *intr_arg);
34
35 /* misc sb info needed by some of the routines */
36 typedef struct sb_info {
37 uint chip; /* chip number */
38 uint chiprev; /* chip revision */
39 uint chippkg; /* chip package option */
40 uint boardtype; /* board type */
41 uint boardvendor; /* board vendor id */
42 uint bus; /* what bus type we are going through */
43
44 void *osh; /* osl os handle */
45 void *sdh; /* bcmsdh handle */
46
47 void *curmap; /* current regs va */
48 void *regs[SB_MAXCORES]; /* other regs va */
49
50 uint curidx; /* current core index */
51 uint dev_coreid; /* the core provides driver functions */
52 uint pciidx; /* pci core index */
53 uint pcirev; /* pci core rev */
54
55 uint pcmciaidx; /* pcmcia core index */
56 uint pcmciarev; /* pcmcia core rev */
57 bool memseg; /* flag to toggle MEM_SEG register */
58
59 uint ccrev; /* chipc core rev */
60
61 uint gpioidx; /* gpio control core index */
62 uint gpioid; /* gpio control coretype */
63
64 uint numcores; /* # discovered cores */
65 uint coreid[SB_MAXCORES]; /* id of each core */
66
67 void *intr_arg; /* interrupt callback function arg */
68 sb_intrsoff_t intrsoff_fn; /* function turns chip interrupts off */
69 sb_intrsrestore_t intrsrestore_fn; /* function restore chip interrupts */
70 sb_intrsenabled_t intrsenabled_fn; /* function to check if chip interrupts are enabled */
71 } sb_info_t;
72
73 /* local prototypes */
74 static void* sb_doattach(sb_info_t *si, uint devid, void *osh, void *regs, uint bustype, void *sdh, char **vars, int *varsz);
75 static void sb_scan(sb_info_t *si);
76 static uint sb_corereg(void *sbh, uint coreidx, uint regoff, uint mask, uint val);
77 static uint _sb_coreidx(void *sbh);
78 static uint sb_findcoreidx(void *sbh, uint coreid, uint coreunit);
79 static uint sb_pcidev2chip(uint pcidev);
80 static uint sb_chip2numcores(uint chip);
81
82 #define SB_INFO(sbh) (sb_info_t*)sbh
83 #define SET_SBREG(sbh, r, mask, val) W_SBREG((sbh), (r), ((R_SBREG((sbh), (r)) & ~(mask)) | (val)))
84 #define GOODCOREADDR(x) (((x) >= SB_ENUM_BASE) && ((x) <= SB_ENUM_LIM) && ISALIGNED((x), SB_CORE_SIZE))
85 #define GOODREGS(regs) (regs && ISALIGNED(regs, SB_CORE_SIZE))
86 #define REGS2SB(va) (sbconfig_t*) ((uint)(va) + SBCONFIGOFF)
87 #define GOODIDX(idx) (((uint)idx) < SB_MAXCORES)
88 #define BADIDX (SB_MAXCORES+1)
89
90 #define R_SBREG(sbh, sbr) sb_read_sbreg((sbh), (sbr))
91 #define W_SBREG(sbh, sbr, v) sb_write_sbreg((sbh), (sbr), (v))
92 #define AND_SBREG(sbh, sbr, v) W_SBREG((sbh), (sbr), (R_SBREG((sbh), (sbr)) & (v)))
93 #define OR_SBREG(sbh, sbr, v) W_SBREG((sbh), (sbr), (R_SBREG((sbh), (sbr)) | (v)))
94
95 /*
96 * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts before/
97 * after core switching to avoid invalid register accesss inside ISR.
98 */
99 #define INTR_OFF(si, intr_val) \
100 if ((si)->intrsoff_fn && (si)->coreid[(si)->curidx] == (si)->dev_coreid) { \
101 intr_val = (*(si)->intrsoff_fn)((si)->intr_arg); }
102 #define INTR_RESTORE(si, intr_val) \
103 if ((si)->intrsrestore_fn && (si)->coreid[(si)->curidx] == (si)->dev_coreid) { \
104 (*(si)->intrsrestore_fn)((si)->intr_arg, intr_val); }
105
106 /* power control defines */
107 #define LPOMINFREQ 25000 /* low power oscillator min */
108 #define LPOMAXFREQ 43000 /* low power oscillator max */
109 #define XTALMINFREQ 19800000 /* 20mhz - 1% */
110 #define XTALMAXFREQ 20200000 /* 20mhz + 1% */
111 #define PCIMINFREQ 25000000 /* 25mhz */
112 #define PCIMAXFREQ 34000000 /* 33mhz + fudge */
113 #define SCC_DEF_DIV 0 /* default slow clock divider */
114
115 #define XTAL_ON_DELAY 1000 /* Xtal power on delay in us */
116
117 #define SCC_LOW2FAST_LIMIT 5000 /* turn on fast clock time, in unit of ms */
118
119
120 static uint32
121 sb_read_sbreg(void *sbh, volatile uint32 *sbr)
122 {
123 sb_info_t *si;
124 uint8 tmp;
125 uint32 val, intr_val = 0;
126
127 si = SB_INFO(sbh);
128
129 /*
130 * compact flash only has 11 bits address, while we needs 12 bits address.
131 * MEM_SEG will be OR'd with other 11 bits address in hardware,
132 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
133 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
134 */
135 if(si->memseg) {
136 INTR_OFF(si, intr_val);
137 tmp = 1;
138 OSL_PCMCIA_WRITE_ATTR(si->osh, MEM_SEG, &tmp, 1);
139 sbr = (uint32 *) (((uint32) sbr) & ~(1 << 11)); /* mask out bit 11*/
140 }
141
142 val = R_REG(sbr);
143
144 if(si->memseg) {
145 tmp = 0;
146 OSL_PCMCIA_WRITE_ATTR(si->osh, MEM_SEG, &tmp, 1);
147 INTR_RESTORE(si, intr_val);
148 }
149
150 return (val);
151 }
152
153 static void
154 sb_write_sbreg(void *sbh, volatile uint32 *sbr, uint32 v)
155 {
156 sb_info_t *si;
157 uint8 tmp;
158 volatile uint32 dummy;
159 uint32 intr_val = 0;
160
161 si = SB_INFO(sbh);
162
163 /*
164 * compact flash only has 11 bits address, while we needs 12 bits address.
165 * MEM_SEG will be OR'd with other 11 bits address in hardware,
166 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
167 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
168 */
169 if(si->memseg) {
170 INTR_OFF(si, intr_val);
171 tmp = 1;
172 OSL_PCMCIA_WRITE_ATTR(si->osh, MEM_SEG, &tmp, 1);
173 sbr = (uint32 *) (((uint32) sbr) & ~(1 << 11)); /* mask out bit 11*/
174 }
175
176 if (si->bus == PCMCIA_BUS) {
177 #ifdef IL_BIGENDIAN
178 dummy = R_REG(sbr);
179 W_REG((volatile uint16 *)((uint32)sbr + 2), (uint16)((v >> 16) & 0xffff));
180 dummy = R_REG(sbr);
181 W_REG((volatile uint16 *)sbr, (uint16)(v & 0xffff));
182 #else
183 dummy = R_REG(sbr);
184 W_REG((volatile uint16 *)sbr, (uint16)(v & 0xffff));
185 dummy = R_REG(sbr);
186 W_REG((volatile uint16 *)((uint32)sbr + 2), (uint16)((v >> 16) & 0xffff));
187 #endif
188 } else
189 W_REG(sbr, v);
190
191 if(si->memseg) {
192 tmp = 0;
193 OSL_PCMCIA_WRITE_ATTR(si->osh, MEM_SEG, &tmp, 1);
194 INTR_RESTORE(si, intr_val);
195 }
196 }
197
198 /*
199 * Allocate a sb handle.
200 * devid - pci device id (used to determine chip#)
201 * osh - opaque OS handle
202 * regs - virtual address of initial core registers
203 * bustype - pci/pcmcia/sb/sdio/etc
204 * vars - pointer to a pointer area for "environment" variables
205 * varsz - pointer to int to return the size of the vars
206 */
207 void*
208 sb_attach(uint devid, void *osh, void *regs, uint bustype, void *sdh, char **vars, int *varsz)
209 {
210 sb_info_t *si;
211
212 /* alloc sb_info_t */
213 if ((si = MALLOC(sizeof (sb_info_t))) == NULL) {
214 SB_ERROR(("sb_attach: malloc failed!\n"));
215 return (NULL);
216 }
217
218 return (sb_doattach(si, devid, osh, regs, bustype, sdh, vars, varsz));
219 }
220
221 /* global kernel resource */
222 static sb_info_t ksi;
223
224 /* generic kernel variant of sb_attach() */
225 void*
226 sb_kattach()
227 {
228 uint32 *regs;
229 char *unused;
230 int varsz;
231
232 if (ksi.curmap == NULL) {
233 uint32 cid;
234
235 regs = (uint32 *)REG_MAP(SB_ENUM_BASE, SB_CORE_SIZE);
236 cid = R_REG((uint32 *)regs);
237 if (((cid & (CID_ID_MASK | CID_PKG_MASK)) == 0x00104712) &&
238 ((cid & CID_REV_MASK) <= 0x00020000)) {
239 uint32 *scc, val;
240
241 scc = (uint32 *)((uint32)regs + OFFSETOF(chipcregs_t, slow_clk_ctl));
242 val = R_REG(scc);
243 SB_ERROR((" initial scc = 0x%x\n", val));
244 val |= SCC_SS_XTAL;
245 W_REG(scc, val);
246 }
247
248 sb_doattach(&ksi, BCM4710_DEVICE_ID, NULL, (void*)regs,
249 SB_BUS, NULL, &unused, &varsz);
250 }
251
252 return &ksi;
253 }
254
255 static void*
256 sb_doattach(sb_info_t *si, uint devid, void *osh, void *regs, uint bustype, void *sdh, char **vars, int *varsz)
257 {
258 uint origidx;
259 chipcregs_t *cc;
260 uint32 w;
261
262 ASSERT(GOODREGS(regs));
263
264 bzero((uchar*)si, sizeof (sb_info_t));
265
266 si->pciidx = si->gpioidx = BADIDX;
267
268 si->osh = osh;
269 si->curmap = regs;
270 si->sdh = sdh;
271
272 /* check to see if we are a sb core mimic'ing a pci core */
273 if (bustype == PCI_BUS) {
274 if (OSL_PCI_READ_CONFIG(osh, PCI_SPROM_CONTROL, sizeof (uint32)) == 0xffffffff)
275 bustype = SB_BUS;
276 else
277 bustype = PCI_BUS;
278 }
279
280 si->bus = bustype;
281
282 if (si->bus == PCMCIA_BUS)
283 /* need to set memseg flag for CF card first before any sb registers access,
284 * such as the access inside sb_scan. the card type is detected and memseg
285 * flag is reassigned later after srom_var_init. there should be no effect
286 * for PCMCIA cards even though the memseg flag is set
287 */
288 si->memseg = TRUE;
289
290 /* kludge to enable the clock on the 4306 which lacks a slowclock */
291 if (si->bus == PCI_BUS)
292 sb_pwrctl_xtal((void*)si, XTAL|PLL, ON);
293
294 /* initialize current core index value */
295 si->curidx = _sb_coreidx((void*)si);
296 if (si->curidx == BADIDX)
297 goto bad;
298
299 /* keep and reuse the initial register mapping */
300 origidx = si->curidx;
301 if (si->bus == SB_BUS)
302 si->regs[origidx] = regs;
303
304 /* is core-0 a chipcommon core? */
305 si->numcores = 1;
306 cc = (chipcregs_t*) sb_setcoreidx((void*)si, 0);
307 if (sb_coreid((void*)si) != SB_CC)
308 cc = NULL;
309
310 /* determine chip id and rev */
311 if (cc) {
312 /* chip common core found! */
313 si->chip = R_REG(&cc->chipid) & CID_ID_MASK;
314 si->chiprev = (R_REG(&cc->chipid) & CID_REV_MASK) >> CID_REV_SHIFT;
315 si->chippkg = (R_REG(&cc->chipid) & CID_PKG_MASK) >> CID_PKG_SHIFT;
316 } else {
317 /* The only pcmcia chip without a chipcommon core is a 4301 */
318 if (si->bus == PCMCIA_BUS)
319 devid = BCM4301_DEVICE_ID;
320
321 /* no chip common core -- must convert device id to chip id */
322 if ((si->chip = sb_pcidev2chip(devid)) == 0) {
323 SB_ERROR(("sb_attach: unrecognized device id 0x%04x\n", devid));
324 goto bad;
325 }
326 }
327
328 /* get chipcommon rev */
329 si->ccrev = cc? sb_corerev((void*)si) : 0;
330
331 /* determine numcores */
332 if ((si->ccrev == 4) || (si->ccrev >= 6))
333 si->numcores = (R_REG(&cc->chipid) & CID_CC_MASK) >> CID_CC_SHIFT;
334 else
335 si->numcores = sb_chip2numcores(si->chip);
336
337 /* return to original core */
338 sb_setcoreidx((void*)si, origidx);
339
340 /* sanity checks */
341 ASSERT(si->chip);
342
343 /* scan for cores */
344 sb_scan(si);
345
346 /* initialize the vars after sb_scan so that the core rev. information
347 * collected by sb_scan is available for the srom_var_init.
348 */
349 if (srom_var_init(si, si->bus, si->curmap, osh, vars, varsz)) {
350 SB_ERROR(("sb_attach: srom_var_init failed\n"));
351 goto bad;
352 }
353
354 if (cc == NULL) {
355 /*
356 * The chip revision number is hardwired into all
357 * of the pci function config rev fields and is
358 * independent from the individual core revision numbers.
359 * For example, the "A0" silicon of each chip is chip rev 0.
360 * For PCMCIA we get it from the CIS instead.
361 */
362 if (si->bus == PCMCIA_BUS) {
363 ASSERT(vars);
364 si->chiprev = getintvar(*vars, "chiprev");
365 } else if (si->bus == PCI_BUS) {
366 w = OSL_PCI_READ_CONFIG(osh, PCI_CFG_REV, sizeof (uint32));
367 si->chiprev = w & 0xff;
368 } else
369 si->chiprev = 0;
370 }
371
372 if (si->bus == PCMCIA_BUS) {
373 w = getintvar(*vars, "regwindowsz");
374 si->memseg = (w <= CFTABLE_REGWIN_2K) ? TRUE : FALSE;
375 }
376
377 /* pci core is required */
378 if (!GOODIDX(si->pciidx)) {
379 SB_ERROR(("sb_attach: pci core not found\n"));
380 goto bad;
381 }
382
383 /* gpio control core is required */
384 if (!GOODIDX(si->gpioidx)) {
385 SB_ERROR(("sb_attach: gpio control core not found\n"));
386 goto bad;
387 }
388
389 /* get boardtype and boardrev */
390 switch (si->bus) {
391 case PCI_BUS:
392 /* do a pci config read to get subsystem id and subvendor id */
393 w = OSL_PCI_READ_CONFIG(osh, PCI_CFG_SVID, sizeof (uint32));
394 si->boardvendor = w & 0xffff;
395 si->boardtype = (w >> 16) & 0xffff;
396 break;
397
398 case PCMCIA_BUS:
399 case SDIO_BUS:
400 si->boardvendor = getintvar(*vars, "manfid");
401 si->boardtype = getintvar(*vars, "prodid");
402 break;
403
404 case SB_BUS:
405 si->boardvendor = VENDOR_BROADCOM;
406 si->boardtype = 0xffff;
407 break;
408 }
409
410 if (si->boardtype == 0) {
411 SB_ERROR(("sb_attach: unknown board type\n"));
412 ASSERT(si->boardtype);
413 }
414
415 /* clear any previous epidiag-induced target abort */
416 sb_taclear((void*)si);
417
418 return ((void*)si);
419
420 bad:
421 MFREE(si, sizeof (sb_info_t));
422 return (NULL);
423 }
424
425 uint
426 sb_coreid(void *sbh)
427 {
428 sb_info_t *si;
429 sbconfig_t *sb;
430
431 si = SB_INFO(sbh);
432 sb = REGS2SB(si->curmap);
433
434 return ((R_SBREG(sbh, &(sb)->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT);
435 }
436
437 uint
438 sb_coreidx(void *sbh)
439 {
440 sb_info_t *si;
441
442 si = SB_INFO(sbh);
443 return (si->curidx);
444 }
445
446 /* return current index of core */
447 static uint
448 _sb_coreidx(void *sbh)
449 {
450 sb_info_t *si;
451 sbconfig_t *sb;
452 uint32 sbaddr = 0;
453
454 si = SB_INFO(sbh);
455 ASSERT(si);
456
457 switch (si->bus) {
458 case SB_BUS:
459 sb = REGS2SB(si->curmap);
460 sbaddr = sb_base(R_SBREG(sbh, &sb->sbadmatch0));
461 break;
462
463 case PCI_BUS:
464 sbaddr = OSL_PCI_READ_CONFIG(si->osh, PCI_BAR0_WIN, sizeof (uint32));
465 break;
466
467 case PCMCIA_BUS: {
468 uint8 tmp;
469
470 OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_ADDR0, &tmp, 1);
471 sbaddr = (uint)tmp << 12;
472 OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_ADDR1, &tmp, 1);
473 sbaddr |= (uint)tmp << 16;
474 OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_ADDR2, &tmp, 1);
475 sbaddr |= (uint)tmp << 24;
476 break;
477 }
478 default:
479 ASSERT(0);
480 }
481
482 if (!GOODCOREADDR(sbaddr))
483 return BADIDX;
484
485 return ((sbaddr - SB_ENUM_BASE) / SB_CORE_SIZE);
486 }
487
488 uint
489 sb_corevendor(void *sbh)
490 {
491 sb_info_t *si;
492 sbconfig_t *sb;
493
494 si = SB_INFO(sbh);
495 sb = REGS2SB(si->curmap);
496
497 return ((R_SBREG(sbh, &(sb)->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT);
498 }
499
500 uint
501 sb_corerev(void *sbh)
502 {
503 sb_info_t *si;
504 sbconfig_t *sb;
505
506 si = SB_INFO(sbh);
507 sb = REGS2SB(si->curmap);
508
509 return (R_SBREG(sbh, &(sb)->sbidhigh) & SBIDH_RC_MASK);
510 }
511
512 #define SBTML_ALLOW (SBTML_PE | SBTML_FGC | SBTML_FL_MASK)
513
514 /* set/clear sbtmstatelow core-specific flags */
515 uint32
516 sb_coreflags(void *sbh, uint32 mask, uint32 val)
517 {
518 sb_info_t *si;
519 sbconfig_t *sb;
520 uint32 w;
521
522 si = SB_INFO(sbh);
523 sb = REGS2SB(si->curmap);
524
525 ASSERT((val & ~mask) == 0);
526 ASSERT((mask & ~SBTML_ALLOW) == 0);
527
528 /* mask and set */
529 if (mask || val) {
530 w = (R_SBREG(sbh, &sb->sbtmstatelow) & ~mask) | val;
531 W_SBREG(sbh, &sb->sbtmstatelow, w);
532 }
533
534 /* return the new value */
535 return (R_SBREG(sbh, &sb->sbtmstatelow) & SBTML_ALLOW);
536 }
537
538 /* set/clear sbtmstatehigh core-specific flags */
539 uint32
540 sb_coreflagshi(void *sbh, uint32 mask, uint32 val)
541 {
542 sb_info_t *si;
543 sbconfig_t *sb;
544 uint32 w;
545
546 si = SB_INFO(sbh);
547 sb = REGS2SB(si->curmap);
548
549 ASSERT((val & ~mask) == 0);
550 ASSERT((mask & ~SBTMH_FL_MASK) == 0);
551
552 /* mask and set */
553 if (mask || val) {
554 w = (R_SBREG(sbh, &sb->sbtmstatehigh) & ~mask) | val;
555 W_SBREG(sbh, &sb->sbtmstatehigh, w);
556 }
557
558 /* return the new value */
559 return (R_SBREG(sbh, &sb->sbtmstatehigh) & SBTMH_FL_MASK);
560 }
561
562 bool
563 sb_iscoreup(void *sbh)
564 {
565 sb_info_t *si;
566 sbconfig_t *sb;
567
568 si = SB_INFO(sbh);
569 sb = REGS2SB(si->curmap);
570
571 return ((R_SBREG(sbh, &(sb)->sbtmstatelow) & (SBTML_RESET | SBTML_REJ | SBTML_CLK)) == SBTML_CLK);
572 }
573
574 /*
575 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
576 * switch back to the original core, and return the new value.
577 */
578 static uint
579 sb_corereg(void *sbh, uint coreidx, uint regoff, uint mask, uint val)
580 {
581 sb_info_t *si;
582 uint origidx;
583 uint32 *r;
584 uint w;
585 uint intr_val = 0;
586
587 ASSERT(GOODIDX(coreidx));
588 ASSERT(regoff < SB_CORE_SIZE);
589 ASSERT((val & ~mask) == 0);
590
591 si = SB_INFO(sbh);
592
593 INTR_OFF(si, intr_val);
594
595 /* save current core index */
596 origidx = sb_coreidx(sbh);
597
598 /* switch core */
599 r = (uint32*) ((uint) sb_setcoreidx(sbh, coreidx) + regoff);
600
601 /* mask and set */
602 if (mask || val) {
603 if (regoff >= SBCONFIGOFF) {
604 w = (R_SBREG(sbh, r) & ~mask) | val;
605 W_SBREG(sbh, r, w);
606 } else {
607 w = (R_REG(r) & ~mask) | val;
608 W_REG(r, w);
609 }
610 }
611
612 /* readback */
613 if (regoff >= SBCONFIGOFF)
614 w = R_SBREG(sbh, r);
615 else
616 w = R_REG(r);
617
618 /* restore core index */
619 if (origidx != coreidx)
620 sb_setcoreidx(sbh, origidx);
621
622 INTR_RESTORE(si, intr_val);
623 return (w);
624 }
625
626 /* scan the sb enumerated space to identify all cores */
627 static void
628 sb_scan(sb_info_t *si)
629 {
630 void *sbh;
631 uint origidx;
632 uint i;
633
634 sbh = (void*) si;
635
636 /* numcores should already be set */
637 ASSERT((si->numcores > 0) && (si->numcores <= SB_MAXCORES));
638
639 /* save current core index */
640 origidx = sb_coreidx(sbh);
641
642 si->pciidx = si->gpioidx = BADIDX;
643
644 for (i = 0; i < si->numcores; i++) {
645 sb_setcoreidx(sbh, i);
646 si->coreid[i] = sb_coreid(sbh);
647
648 if (si->coreid[i] == SB_PCI) {
649 si->pciidx = i;
650 si->pcirev = sb_corerev(sbh);
651
652 } else if (si->coreid[i] == SB_PCMCIA) {
653 si->pcmciaidx = i;
654 si->pcmciarev = sb_corerev(sbh);
655 }
656 }
657
658 /*
659 * Find the gpio "controlling core" type and index.
660 * Precedence:
661 * - if there's a chip common core - use that
662 * - else if there's a pci core (rev >= 2) - use that
663 * - else there had better be an extif core (4710 only)
664 */
665 if (GOODIDX(sb_findcoreidx(sbh, SB_CC, 0))) {
666 si->gpioidx = sb_findcoreidx(sbh, SB_CC, 0);
667 si->gpioid = SB_CC;
668 } else if (GOODIDX(si->pciidx) && (si->pcirev >= 2)) {
669 si->gpioidx = si->pciidx;
670 si->gpioid = SB_PCI;
671 } else if (sb_findcoreidx(sbh, SB_EXTIF, 0)) {
672 si->gpioidx = sb_findcoreidx(sbh, SB_EXTIF, 0);
673 si->gpioid = SB_EXTIF;
674 }
675
676 /* return to original core index */
677 sb_setcoreidx(sbh, origidx);
678 }
679
680 /* may be called with core in reset */
681 void
682 sb_detach(void *sbh)
683 {
684 sb_info_t *si;
685 uint idx;
686
687 si = SB_INFO(sbh);
688
689 if (si == NULL)
690 return;
691
692 if (si->bus == SB_BUS)
693 for (idx = 0; idx < SB_MAXCORES; idx++)
694 if (si->regs[idx]) {
695 REG_UNMAP(si->regs[idx]);
696 si->regs[idx] = NULL;
697 }
698
699 MFREE(si, sizeof (sb_info_t));
700 }
701
702 /* use pci dev id to determine chip id for chips not having a chipcommon core */
703 static uint
704 sb_pcidev2chip(uint pcidev)
705 {
706 if ((pcidev >= BCM4710_DEVICE_ID) && (pcidev <= BCM47XX_USB_ID))
707 return (BCM4710_DEVICE_ID);
708 if ((pcidev >= BCM4610_DEVICE_ID) && (pcidev <= BCM4610_USB_ID))
709 return (BCM4610_DEVICE_ID);
710 if ((pcidev >= BCM4402_DEVICE_ID) && (pcidev <= BCM4402_V90_ID))
711 return (BCM4402_DEVICE_ID);
712 if ((pcidev >= BCM4307_V90_ID) && (pcidev <= BCM4307_D11B_ID))
713 return (BCM4307_DEVICE_ID);
714 if (pcidev == BCM4301_DEVICE_ID)
715 return (BCM4301_DEVICE_ID);
716
717 return (0);
718 }
719
720 /* convert chip number to number of i/o cores */
721 static uint
722 sb_chip2numcores(uint chip)
723 {
724 if (chip == 0x4710)
725 return (9);
726 if (chip == 0x4610)
727 return (9);
728 if (chip == 0x4402)
729 return (3);
730 if ((chip == 0x4307) || (chip == 0x4301))
731 return (5);
732 if (chip == 0x4310)
733 return (8);
734 if (chip == 0x4306) /* < 4306c0 */
735 return (6);
736 if (chip == 0x4704)
737 return (9);
738 if (chip == 0x5365)
739 return (7);
740
741 SB_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n", chip));
742 ASSERT(0);
743 return (1);
744 }
745
746 /* return index of coreid or BADIDX if not found */
747 static uint
748 sb_findcoreidx(void *sbh, uint coreid, uint coreunit)
749 {
750 sb_info_t *si;
751 uint found;
752 uint i;
753
754 si = SB_INFO(sbh);
755 found = 0;
756
757 for (i = 0; i < si->numcores; i++)
758 if (si->coreid[i] == coreid) {
759 if (found == coreunit)
760 return (i);
761 found++;
762 }
763
764 return (BADIDX);
765 }
766
767 /*
768 * this function changes logical "focus" to the indiciated core,
769 * must be called with interrupt off.
770 * Moreover, callers should keep interrupts off during switching out of and back to d11 core
771 */
772 void*
773 sb_setcoreidx(void *sbh, uint coreidx)
774 {
775 sb_info_t *si;
776 uint32 sbaddr;
777 uint8 tmp;
778
779 si = SB_INFO(sbh);
780
781 if (coreidx >= si->numcores)
782 return (NULL);
783
784 /*
785 * If the user has provided an interrupt mask enabled function,
786 * then assert interrupts are disabled before switching the core.
787 */
788 ASSERT((si->intrsenabled_fn == NULL) || !(*(si)->intrsenabled_fn)((si)->intr_arg));
789
790 sbaddr = SB_ENUM_BASE + (coreidx * SB_CORE_SIZE);
791
792 switch (si->bus) {
793 case SB_BUS:
794 /* map new one */
795 if (!si->regs[coreidx]) {
796 si->regs[coreidx] = (void*)REG_MAP(sbaddr, SB_CORE_SIZE);
797 ASSERT(GOODREGS(si->regs[coreidx]));
798 }
799 si->curmap = si->regs[coreidx];
800 break;
801
802 case PCI_BUS:
803 /* point bar0 window */
804 OSL_PCI_WRITE_CONFIG(si->osh, PCI_BAR0_WIN, 4, sbaddr);
805 break;
806
807 case PCMCIA_BUS:
808 tmp = (sbaddr >> 12) & 0x0f;
809 OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_ADDR0, &tmp, 1);
810 tmp = (sbaddr >> 16) & 0xff;
811 OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_ADDR1, &tmp, 1);
812 tmp = (sbaddr >> 24) & 0xff;
813 OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_ADDR2, &tmp, 1);
814 break;
815 }
816
817 si->curidx = coreidx;
818
819 return (si->curmap);
820 }
821
822 /*
823 * this function changes logical "focus" to the indiciated core,
824 * must be called with interrupt off.
825 * Moreover, callers should keep interrupts off during switching out of and back to d11 core
826 */
827 void*
828 sb_setcore(void *sbh, uint coreid, uint coreunit)
829 {
830 sb_info_t *si;
831 uint idx;
832
833 si = SB_INFO(sbh);
834
835 idx = sb_findcoreidx(sbh, coreid, coreunit);
836 if (!GOODIDX(idx))
837 return (NULL);
838
839 return (sb_setcoreidx(sbh, idx));
840 }
841
842 /* return chip number */
843 uint
844 sb_chip(void *sbh)
845 {
846 sb_info_t *si;
847
848 si = SB_INFO(sbh);
849 return (si->chip);
850 }
851
852 /* return chip revision number */
853 uint
854 sb_chiprev(void *sbh)
855 {
856 sb_info_t *si;
857
858 si = SB_INFO(sbh);
859 return (si->chiprev);
860 }
861
862 /* return chip common revision number */
863 uint
864 sb_chipcrev(void *sbh)
865 {
866 sb_info_t *si;
867
868 si = SB_INFO(sbh);
869 return (si->ccrev);
870 }
871
872 /* return chip package option */
873 uint
874 sb_chippkg(void *sbh)
875 {
876 sb_info_t *si;
877
878 si = SB_INFO(sbh);
879 return (si->chippkg);
880 }
881
882 /* return PCI core rev. */
883 uint
884 sb_pcirev(void *sbh)
885 {
886 sb_info_t *si;
887
888 si = SB_INFO(sbh);
889 return (si->pcirev);
890 }
891
892 /* return PCMCIA core rev. */
893 uint
894 sb_pcmciarev(void *sbh)
895 {
896 sb_info_t *si;
897
898 si = SB_INFO(sbh);
899 return (si->pcmciarev);
900 }
901
902 /* return board vendor id */
903 uint
904 sb_boardvendor(void *sbh)
905 {
906 sb_info_t *si;
907
908 si = SB_INFO(sbh);
909 return (si->boardvendor);
910 }
911
912 /* return boardtype */
913 uint
914 sb_boardtype(void *sbh)
915 {
916 sb_info_t *si;
917 char *var;
918
919 si = SB_INFO(sbh);
920
921 if (si->bus == SB_BUS && si->boardtype == 0xffff) {
922 /* boardtype format is a hex string */
923 si->boardtype = getintvar(NULL, "boardtype");
924
925 /* backward compatibility for older boardtype string format */
926 if ((si->boardtype == 0) && (var = getvar(NULL, "boardtype"))) {
927 if (!strcmp(var, "bcm94710dev"))
928 si->boardtype = BCM94710D_BOARD;
929 else if (!strcmp(var, "bcm94710ap"))
930 si->boardtype = BCM94710AP_BOARD;
931 else if (!strcmp(var, "bcm94310u"))
932 si->boardtype = BCM94310U_BOARD;
933 else if (!strcmp(var, "bu4711"))
934 si->boardtype = BU4711_BOARD;
935 else if (!strcmp(var, "bu4710"))
936 si->boardtype = BU4710_BOARD;
937 else if (!strcmp(var, "bcm94702mn"))
938 si->boardtype = BCM94702MN_BOARD;
939 else if (!strcmp(var, "bcm94710r1"))
940 si->boardtype = BCM94710R1_BOARD;
941 else if (!strcmp(var, "bcm94710r4"))
942 si->boardtype = BCM94710R4_BOARD;
943 else if (!strcmp(var, "bcm94702cpci"))
944 si->boardtype = BCM94702CPCI_BOARD;
945 else if (!strcmp(var, "bcm95380_rr"))
946 si->boardtype = BCM95380RR_BOARD;
947 }
948 }
949
950 return (si->boardtype);
951 }
952
953 /* return board bus style */
954 uint
955 sb_boardstyle(void *sbh)
956 {
957 sb_info_t *si;
958 uint16 w;
959
960 si = SB_INFO(sbh);
961
962 if (si->bus == PCMCIA_BUS)
963 return (BOARDSTYLE_PCMCIA);
964
965 if (si->bus == SB_BUS)
966 return (BOARDSTYLE_SOC);
967
968 /* bus is PCI */
969
970 if (OSL_PCI_READ_CONFIG(si->osh, PCI_CFG_CIS, sizeof (uint32)) != 0)
971 return (BOARDSTYLE_CARDBUS);
972
973 if ((srom_read(si->bus, si->curmap, si->osh, (SPROM_SIZE - 1) * 2, 2, &w) == 0) &&
974 (w == 0x0313))
975 return (BOARDSTYLE_CARDBUS);
976
977 return (BOARDSTYLE_PCI);
978 }
979
980 /* return boolean if sbh device is in pci hostmode or client mode */
981 uint
982 sb_bus(void *sbh)
983 {
984 sb_info_t *si;
985
986 si = SB_INFO(sbh);
987 return (si->bus);
988 }
989
990 /* return list of found cores */
991 uint
992 sb_corelist(void *sbh, uint coreid[])
993 {
994 sb_info_t *si;
995
996 si = SB_INFO(sbh);
997
998 bcopy((uchar*)si->coreid, (uchar*)coreid, (si->numcores * sizeof (uint)));
999 return (si->numcores);
1000 }
1001
1002 /* return current register mapping */
1003 void *
1004 sb_coreregs(void *sbh)
1005 {
1006 sb_info_t *si;
1007
1008 si = SB_INFO(sbh);
1009 ASSERT(GOODREGS(si->curmap));
1010
1011 return (si->curmap);
1012 }
1013
1014 /* traverse all cores to find and clear source of serror */
1015 static void
1016 sb_serr_clear(void *sbh)
1017 {
1018 sb_info_t *si;
1019 sbconfig_t *sb;
1020 uint origidx;
1021 uint i, intr_val = 0;
1022 void * corereg = NULL;
1023
1024 si = SB_INFO(sbh);
1025
1026 INTR_OFF(si, intr_val);
1027 origidx = sb_coreidx(sbh);
1028
1029 for (i = 0; i < si->numcores; i++) {
1030 corereg = sb_setcoreidx(sbh, i);
1031 if (NULL != corereg) {
1032 sb = REGS2SB(corereg);
1033 if ((si->chip == BCM4317_DEVICE_ID) && (si->chiprev == 0)) {
1034 W_SBREG(sbh, &sb->sbtmstatehigh, 0);
1035 } else {
1036 if ((R_SBREG(sbh, &sb->sbtmstatehigh)) & SBTMH_SERR) {
1037 AND_SBREG(sbh, &sb->sbtmstatehigh, ~SBTMH_SERR);
1038 SB_ERROR(("sb_serr_clear: SError at core 0x%x\n", sb_coreid(sbh)));
1039 }
1040 }
1041 }
1042 }
1043
1044 sb_setcoreidx(sbh, origidx);
1045 INTR_RESTORE(si, intr_val);
1046 }
1047
1048 /* check if any inband, outband or timeout errors has happened and clear them */
1049 /* !! must be called with chip clk on */
1050 bool
1051 sb_taclear(void *sbh)
1052 {
1053 sb_info_t *si;
1054 sbconfig_t *sb;
1055 uint origidx;
1056 uint intr_val = 0;
1057 bool rc = FALSE;
1058 uint32 inband = 0, serror = 0, timeout = 0;
1059 void *corereg = NULL;
1060 volatile uint32 imstate, tmstate;
1061
1062 si = SB_INFO(sbh);
1063
1064 if (si->bus == PCI_BUS) {
1065 volatile uint32 stcmd;
1066
1067 /* inband error is Target abort for PCI */
1068 stcmd = OSL_PCI_READ_CONFIG(si->osh, PCI_CFG_CMD, sizeof(uint32));
1069 inband = stcmd & PCI_CFG_CMD_STAT_TA;
1070 if (inband)
1071 OSL_PCI_WRITE_CONFIG(si->osh, PCI_CFG_CMD, sizeof(uint32), stcmd);
1072
1073 /* serror */
1074 stcmd = OSL_PCI_READ_CONFIG(si->osh, PCI_INT_STATUS, sizeof(uint32));
1075 serror = stcmd & PCI_SBIM_STATUS_SERR;
1076 if (serror) {
1077 sb_serr_clear(sbh);
1078 OSL_PCI_WRITE_CONFIG(si->osh, PCI_INT_STATUS, sizeof(uint32), stcmd);
1079 }
1080
1081 /* timeout */
1082 imstate = sb_corereg(sbh, si->pciidx, SBCONFIGOFF + OFFSETOF(sbconfig_t, sbimstate), 0, 0);
1083 if ((imstate != 0xffffffff) && (imstate & (SBIM_IBE | SBIM_TO))) {
1084 sb_corereg(sbh, si->pciidx, SBCONFIGOFF + OFFSETOF(sbconfig_t, sbimstate), ~0,
1085 (imstate & ~(SBIM_IBE | SBIM_TO)));
1086 /* inband = imstate & SBIM_IBE; same as TA above */
1087 timeout = imstate & SBIM_TO;
1088 }
1089
1090 } else if (si->bus == PCMCIA_BUS) {
1091
1092 INTR_OFF(si, intr_val);
1093 origidx = sb_coreidx(sbh);
1094
1095 corereg = sb_setcore(sbh, SB_PCMCIA, 0);
1096 if (NULL != corereg) {
1097 sb = REGS2SB(corereg);
1098
1099 imstate = R_SBREG(sbh, &sb->sbimstate);
1100 /* handle surprise removal */
1101 if ((imstate != 0xffffffff) && (imstate & (SBIM_IBE | SBIM_TO))) {
1102 AND_SBREG(sbh, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
1103 inband = imstate & SBIM_IBE;
1104 timeout = imstate & SBIM_TO;
1105 }
1106 tmstate = R_SBREG(sbh, &sb->sbtmstatehigh);
1107 if ((tmstate != 0xffffffff) && (tmstate & SBTMH_INT_STATUS)) {
1108 if (!inband) {
1109 serror = 1;
1110 sb_serr_clear(sbh);
1111 }
1112 OR_SBREG(sbh, &sb->sbtmstatelow, SBTML_INT_ACK);
1113 AND_SBREG(sbh, &sb->sbtmstatelow, ~SBTML_INT_ACK);
1114 }
1115 }
1116 sb_setcoreidx(sbh, origidx);
1117 INTR_RESTORE(si, intr_val);
1118
1119 } else if (si->bus == SDIO_BUS) {
1120
1121 INTR_OFF(si, intr_val);
1122 origidx = sb_coreidx(sbh);
1123
1124 corereg = sb_setcore(sbh, SB_PCMCIA, 0);
1125 if (NULL != corereg) {
1126 sb = REGS2SB(corereg);
1127
1128 imstate = R_SBREG(sbh, &sb->sbimstate);
1129 if ((imstate != 0xffffffff) && (imstate & (SBIM_IBE | SBIM_TO))) {
1130 AND_SBREG(sbh, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
1131 /* inband = imstate & SBIM_IBE; cmd error */
1132 timeout = imstate & SBIM_TO;
1133 }
1134 tmstate = R_SBREG(sbh, &sb->sbtmstatehigh);
1135 if ((tmstate != 0xffffffff) && (tmstate & SBTMH_INT_STATUS)) {
1136 sb_serr_clear(sbh);
1137 serror = 1;
1138 OR_SBREG(sbh, &sb->sbtmstatelow, SBTML_INT_ACK);
1139 AND_SBREG(sbh, &sb->sbtmstatelow, ~SBTML_INT_ACK);
1140 }
1141 }
1142
1143 sb_setcoreidx(sbh, origidx);
1144 INTR_RESTORE(si, intr_val);
1145 }
1146
1147 if ((inband | timeout | serror) != 0) {
1148 rc = TRUE;
1149 SB_ERROR(("sb_taclear: inband 0x%x, serror 0x%x, timeout 0x%x!\n", inband, serror, timeout));
1150 }
1151
1152 return (rc);
1153 }
1154
1155 /* do buffered registers update */
1156 void
1157 sb_commit(void *sbh)
1158 {
1159 sb_info_t *si;
1160 sbpciregs_t *pciregs;
1161 uint origidx;
1162 uint intr_val = 0;
1163
1164 si = SB_INFO(sbh);
1165
1166 origidx = si->curidx;
1167 ASSERT(GOODIDX(origidx));
1168
1169 INTR_OFF(si, intr_val);
1170 /* switch over to pci core */
1171 pciregs = (sbpciregs_t*) sb_setcore(sbh, SB_PCI, 0);
1172
1173 /* do the buffer registers update */
1174 W_REG(&pciregs->bcastaddr, SB_COMMIT);
1175 W_REG(&pciregs->bcastdata, 0x0);
1176
1177 /* restore core index */
1178 sb_setcoreidx(sbh, origidx);
1179 INTR_RESTORE(si, intr_val);
1180 }
1181
1182 /* reset and re-enable a core */
1183 void
1184 sb_core_reset(void *sbh, uint32 bits)
1185 {
1186 sb_info_t *si;
1187 sbconfig_t *sb;
1188 volatile uint32 dummy;
1189
1190 si = SB_INFO(sbh);
1191 ASSERT(GOODREGS(si->curmap));
1192 sb = REGS2SB(si->curmap);
1193
1194 /*
1195 * Must do the disable sequence first to work for arbitrary current core state.
1196 */
1197 sb_core_disable(sbh, bits);
1198
1199 /*
1200 * Now do the initialization sequence.
1201 */
1202
1203 /* set reset while enabling the clock and forcing them on throughout the core */
1204 W_SBREG(sbh, &sb->sbtmstatelow, (SBTML_FGC | SBTML_CLK | SBTML_RESET | bits));
1205 dummy = R_SBREG(sbh, &sb->sbtmstatelow);
1206
1207 if (sb_coreid(sbh) == SB_ILINE100) {
1208 bcm_mdelay(50);
1209 } else {
1210 OSL_DELAY(1);
1211 }
1212
1213 if (R_SBREG(sbh, &sb->sbtmstatehigh) & SBTMH_SERR) {
1214 W_SBREG(sbh, &sb->sbtmstatehigh, 0);
1215 }
1216 if ((dummy = R_SBREG(sbh, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) {
1217 AND_SBREG(sbh, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
1218 }
1219
1220 /* clear reset and allow it to propagate throughout the core */
1221 W_SBREG(sbh, &sb->sbtmstatelow, (SBTML_FGC | SBTML_CLK | bits));
1222 dummy = R_SBREG(sbh, &sb->sbtmstatelow);
1223 OSL_DELAY(1);
1224
1225 /* leave clock enabled */
1226 W_SBREG(sbh, &sb->sbtmstatelow, (SBTML_CLK | bits));
1227 dummy = R_SBREG(sbh, &sb->sbtmstatelow);
1228 OSL_DELAY(1);
1229 }
1230
1231 void
1232 sb_core_tofixup(void *sbh)
1233 {
1234 sb_info_t *si;
1235 sbconfig_t *sb;
1236
1237 si = SB_INFO(sbh);
1238
1239 if (si->pcirev >= 5)
1240 return;
1241
1242 ASSERT(GOODREGS(si->curmap));
1243 sb = REGS2SB(si->curmap);
1244
1245 if (si->bus == SB_BUS) {
1246 SET_SBREG(sbh, &sb->sbimconfiglow,
1247 SBIMCL_RTO_MASK | SBIMCL_STO_MASK,
1248 (0x5 << SBIMCL_RTO_SHIFT) | 0x3);
1249 } else {
1250 if (sb_coreid(sbh) == SB_PCI) {
1251 SET_SBREG(sbh, &sb->sbimconfiglow,
1252 SBIMCL_RTO_MASK | SBIMCL_STO_MASK,
1253 (0x3 << SBIMCL_RTO_SHIFT) | 0x2);
1254 } else {
1255 SET_SBREG(sbh, &sb->sbimconfiglow, (SBIMCL_RTO_MASK | SBIMCL_STO_MASK), 0);
1256 }
1257 }
1258
1259 sb_commit(sbh);
1260 }
1261
1262 void
1263 sb_core_disable(void *sbh, uint32 bits)
1264 {
1265 sb_info_t *si;
1266 volatile uint32 dummy;
1267 sbconfig_t *sb;
1268
1269 si = SB_INFO(sbh);
1270
1271 ASSERT(GOODREGS(si->curmap));
1272 sb = REGS2SB(si->curmap);
1273
1274 /* must return if core is already in reset */
1275 if (R_SBREG(sbh, &sb->sbtmstatelow) & SBTML_RESET)
1276 return;
1277
1278 /* put into reset and return if clocks are not enabled */
1279 if ((R_SBREG(sbh, &sb->sbtmstatelow) & SBTML_CLK) == 0)
1280 goto disable;
1281
1282 /* set the reject bit */
1283 W_SBREG(sbh, &sb->sbtmstatelow, (SBTML_CLK | SBTML_REJ));
1284
1285 /* spin until reject is set */
1286 while ((R_SBREG(sbh, &sb->sbtmstatelow) & SBTML_REJ) == 0)
1287 OSL_DELAY(1);
1288
1289 /* spin until sbtmstatehigh.busy is clear */
1290 while (R_SBREG(sbh, &sb->sbtmstatehigh) & SBTMH_BUSY)
1291 OSL_DELAY(1);
1292
1293 /* set reset and reject while enabling the clocks */
1294 W_SBREG(sbh, &sb->sbtmstatelow, (bits | SBTML_FGC | SBTML_CLK | SBTML_REJ | SBTML_RESET));
1295 dummy = R_SBREG(sbh, &sb->sbtmstatelow);
1296 OSL_DELAY(10);
1297
1298 disable:
1299 /* leave reset and reject asserted */
1300 W_SBREG(sbh, &sb->sbtmstatelow, (bits | SBTML_REJ | SBTML_RESET));
1301 OSL_DELAY(1);
1302 }
1303
1304 void
1305 sb_watchdog(void *sbh, uint ticks)
1306 {
1307 sb_info_t *si = SB_INFO(sbh);
1308
1309 /* instant NMI */
1310 switch (si->gpioid) {
1311 case SB_CC:
1312 sb_corereg(sbh, si->gpioidx, OFFSETOF(chipcregs_t, watchdog), ~0, ticks);
1313 break;
1314 case SB_EXTIF:
1315 sb_corereg(sbh, si->gpioidx, OFFSETOF(extifregs_t, watchdog), ~0, ticks);
1316 break;
1317 }
1318 }
1319
1320 /* initialize the pcmcia core */
1321 void
1322 sb_pcmcia_init(void *sbh)
1323 {
1324 sb_info_t *si;
1325 uint8 cor;
1326
1327 si = SB_INFO(sbh);
1328
1329 /* enable d11 mac interrupts */
1330 if (si->chip == BCM4301_DEVICE_ID) {
1331 /* Have to use FCR2 in 4301 */
1332 OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_FCR2 + PCMCIA_COR, &cor, 1);
1333 cor |= COR_IRQEN | COR_FUNEN;
1334 OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_FCR2 + PCMCIA_COR, &cor, 1);
1335 } else {
1336 OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_FCR0 + PCMCIA_COR, &cor, 1);
1337 cor |= COR_IRQEN | COR_FUNEN;
1338 OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_FCR0 + PCMCIA_COR, &cor, 1);
1339 }
1340
1341 }
1342
1343
1344 /*
1345 * Configure the pci core for pci client (NIC) action
1346 * and get appropriate dma offset value.
1347 * coremask is the bitvec of cores by index to be enabled.
1348 */
1349 void
1350 sb_pci_setup(void *sbh, uint32 *dmaoffset, uint coremask)
1351 {
1352 sb_info_t *si;
1353 sbconfig_t *sb;
1354 sbpciregs_t *pciregs;
1355 uint32 sbflag;
1356 uint32 w;
1357 uint idx;
1358
1359 si = SB_INFO(sbh);
1360
1361 if (dmaoffset)
1362 *dmaoffset = 0;
1363
1364 /* if not pci bus, we're done */
1365 if (si->bus != PCI_BUS)
1366 return;
1367
1368 ASSERT(si->pciidx);
1369
1370 /* get current core index */
1371 idx = si->curidx;
1372
1373 /* we interrupt on this backplane flag number */
1374 ASSERT(GOODREGS(si->curmap));
1375 sb = REGS2SB(si->curmap);
1376 sbflag = R_SBREG(sbh, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
1377
1378 /* switch over to pci core */
1379 pciregs = (sbpciregs_t*) sb_setcoreidx(sbh, si->pciidx);
1380 sb = REGS2SB(pciregs);
1381
1382 /*
1383 * Enable sb->pci interrupts. Assume
1384 * PCI rev 2.3 support was added in pci core rev 6 and things changed..
1385 */
1386 if (si->pcirev < 6) {
1387 /* set sbintvec bit for our flag number */
1388 OR_SBREG(sbh, &sb->sbintvec, (1 << sbflag));
1389 } else {
1390 /* pci config write to set this core bit in PCIIntMask */
1391 w = OSL_PCI_READ_CONFIG(si->osh, PCI_INT_MASK, sizeof(uint32));
1392 w |= (coremask << PCI_SBIM_SHIFT);
1393 OSL_PCI_WRITE_CONFIG(si->osh, PCI_INT_MASK, sizeof(uint32), w);
1394 }
1395
1396 /* enable prefetch and bursts for sonics-to-pci translation 2 */
1397 OR_REG(&pciregs->sbtopci2, (SBTOPCI_PREF|SBTOPCI_BURST));
1398
1399 if (si->pcirev < 5) {
1400 SET_SBREG(sbh, &sb->sbimconfiglow, SBIMCL_RTO_MASK | SBIMCL_STO_MASK,
1401 (0x3 << SBIMCL_RTO_SHIFT) | 0x2);
1402 sb_commit(sbh);
1403 }
1404
1405 /* switch back to previous core */
1406 sb_setcoreidx(sbh, idx);
1407
1408 /* use large sb pci dma window */
1409 if (dmaoffset)
1410 *dmaoffset = SB_PCI_DMA;
1411 }
1412
1413 uint32
1414 sb_base(uint32 admatch)
1415 {
1416 uint32 base;
1417 uint type;
1418
1419 type = admatch & SBAM_TYPE_MASK;
1420 ASSERT(type < 3);
1421
1422 base = 0;
1423
1424 if (type == 0) {
1425 base = admatch & SBAM_BASE0_MASK;
1426 } else if (type == 1) {
1427 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
1428 base = admatch & SBAM_BASE1_MASK;
1429 } else if (type == 2) {
1430 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
1431 base = admatch & SBAM_BASE2_MASK;
1432 }
1433
1434 return (base);
1435 }
1436
1437 uint32
1438 sb_size(uint32 admatch)
1439 {
1440 uint32 size;
1441 uint type;
1442
1443 type = admatch & SBAM_TYPE_MASK;
1444 ASSERT(type < 3);
1445
1446 size = 0;
1447
1448 if (type == 0) {
1449 size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1);
1450 } else if (type == 1) {
1451 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
1452 size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1);
1453 } else if (type == 2) {
1454 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
1455 size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1);
1456 }
1457
1458 return (size);
1459 }
1460
1461 /* return the core-type instantiation # of the current core */
1462 uint
1463 sb_coreunit(void *sbh)
1464 {
1465 sb_info_t *si;
1466 uint idx;
1467 uint coreid;
1468 uint coreunit;
1469 uint i;
1470
1471 si = SB_INFO(sbh);
1472 coreunit = 0;
1473
1474 idx = si->curidx;
1475
1476 ASSERT(GOODREGS(si->curmap));
1477 coreid = sb_coreid(sbh);
1478
1479 /* count the cores of our type */
1480 for (i = 0; i < idx; i++)
1481 if (si->coreid[i] == coreid)
1482 coreunit++;
1483
1484 return (coreunit);
1485 }
1486
1487 static INLINE uint32
1488 factor6(uint32 x)
1489 {
1490 switch (x) {
1491 case CC_F6_2: return 2;
1492 case CC_F6_3: return 3;
1493 case CC_F6_4: return 4;
1494 case CC_F6_5: return 5;
1495 case CC_F6_6: return 6;
1496 case CC_F6_7: return 7;
1497 default: return 0;
1498 }
1499 }
1500
1501 /* calculate the speed the SB would run at given a set of clockcontrol values */
1502 uint32
1503 sb_clock_rate(uint32 pll_type, uint32 n, uint32 m)
1504 {
1505 uint32 n1, n2, clock, m1, m2, m3, mc;
1506
1507 n1 = n & CN_N1_MASK;
1508 n2 = (n & CN_N2_MASK) >> CN_N2_SHIFT;
1509
1510 if ((pll_type == PLL_TYPE1) || (pll_type == PLL_TYPE4)) {
1511 n1 = factor6(n1);
1512 n2 += CC_F5_BIAS;
1513 } else if (pll_type == PLL_TYPE2) {
1514 n1 += CC_T2_BIAS;
1515 n2 += CC_T2_BIAS;
1516 ASSERT((n1 >= 2) && (n1 <= 7));
1517 ASSERT((n2 >= 5) && (n2 <= 23));
1518 } else if (pll_type == PLL_TYPE3) {
1519 return (100000000);
1520 } else
1521 ASSERT((pll_type >= PLL_TYPE1) && (pll_type <= PLL_TYPE4));
1522
1523 clock = CC_CLOCK_BASE * n1 * n2;
1524
1525 if (clock == 0)
1526 return 0;
1527
1528 m1 = m & CC_M1_MASK;
1529 m2 = (m & CC_M2_MASK) >> CC_M2_SHIFT;
1530 m3 = (m & CC_M3_MASK) >> CC_M3_SHIFT;
1531 mc = (m & CC_MC_MASK) >> CC_MC_SHIFT;
1532
1533 if ((pll_type == PLL_TYPE1) || (pll_type == PLL_TYPE4)) {
1534 m1 = factor6(m1);
1535 if (pll_type == PLL_TYPE1)
1536 m2 += CC_F5_BIAS;
1537 else
1538 m2 = factor6(m2);
1539 m3 = factor6(m3);
1540
1541 switch (mc) {
1542 case CC_MC_BYPASS: return (clock);
1543 case CC_MC_M1: return (clock / m1);
1544 case CC_MC_M1M2: return (clock / (m1 * m2));
1545 case CC_MC_M1M2M3: return (clock / (m1 * m2 * m3));
1546 case CC_MC_M1M3: return (clock / (m1 * m3));
1547 default: return (0);
1548 }
1549 } else {
1550 ASSERT(pll_type == PLL_TYPE2);
1551
1552 m1 += CC_T2_BIAS;
1553 m2 += CC_T2M2_BIAS;
1554 m3 += CC_T2_BIAS;
1555 ASSERT((m1 >= 2) && (m1 <= 7));
1556 ASSERT((m2 >= 3) && (m2 <= 10));
1557 ASSERT((m3 >= 2) && (m3 <= 7));
1558
1559 if ((mc & CC_T2MC_M1BYP) == 0)
1560 clock /= m1;
1561 if ((mc & CC_T2MC_M2BYP) == 0)
1562 clock /= m2;
1563 if ((mc & CC_T2MC_M3BYP) == 0)
1564 clock /= m3;
1565
1566 return(clock);
1567 }
1568 }
1569
1570 /* returns the current speed the SB is running at */
1571 uint32
1572 sb_clock(void *sbh)
1573 {
1574 sb_info_t *si;
1575 extifregs_t *eir;
1576 chipcregs_t *cc;
1577 uint32 n, m;
1578 uint idx;
1579 uint32 pll_type, rate;
1580 uint intr_val = 0;
1581
1582 si = SB_INFO(sbh);
1583 idx = si->curidx;
1584 pll_type = PLL_TYPE1;
1585
1586 INTR_OFF(si, intr_val);
1587
1588 /* switch to extif or chipc core */
1589 if ((eir = (extifregs_t *) sb_setcore(sbh, SB_EXTIF, 0))) {
1590 n = R_REG(&eir->clockcontrol_n);
1591 m = R_REG(&eir->clockcontrol_sb);
1592 } else if ((cc = (chipcregs_t *) sb_setcore(sbh, SB_CC, 0))) {
1593 pll_type = R_REG(&cc->capabilities) & CAP_PLL_MASK;
1594 n = R_REG(&cc->clockcontrol_n);
1595 m = R_REG(&cc->clockcontrol_sb);
1596 } else {
1597 INTR_RESTORE(si, intr_val);
1598 return 0;
1599 }
1600
1601 /* calculate rate */
1602 rate = sb_clock_rate(pll_type, n, m);
1603
1604 /* switch back to previous core */
1605 sb_setcoreidx(sbh, idx);
1606
1607 INTR_RESTORE(si, intr_val);
1608
1609 return rate;
1610 }
1611
1612 /* change logical "focus" to the gpio core for optimized access */
1613 void*
1614 sb_gpiosetcore(void *sbh)
1615 {
1616 sb_info_t *si;
1617
1618 si = SB_INFO(sbh);
1619
1620 return (sb_setcoreidx(sbh, si->gpioidx));
1621 }
1622
1623 /* mask&set gpiocontrol bits */
1624 uint32
1625 sb_gpiocontrol(void *sbh, uint32 mask, uint32 val)
1626 {
1627 sb_info_t *si;
1628 uint regoff;
1629
1630 si = SB_INFO(sbh);
1631 regoff = 0;
1632
1633 switch (si->gpioid) {
1634 case SB_CC:
1635 regoff = OFFSETOF(chipcregs_t, gpiocontrol);
1636 break;
1637
1638 case SB_PCI:
1639 regoff = OFFSETOF(sbpciregs_t, gpiocontrol);
1640 break;
1641
1642 case SB_EXTIF:
1643 return (0);
1644 }
1645
1646 return (sb_corereg(sbh, si->gpioidx, regoff, mask, val));
1647 }
1648
1649 /* mask&set gpio output enable bits */
1650 uint32
1651 sb_gpioouten(void *sbh, uint32 mask, uint32 val)
1652 {
1653 sb_info_t *si;
1654 uint regoff;
1655
1656 si = SB_INFO(sbh);
1657 regoff = 0;
1658
1659 switch (si->gpioid) {
1660 case SB_CC:
1661 regoff = OFFSETOF(chipcregs_t, gpioouten);
1662 break;
1663
1664 case SB_PCI:
1665 regoff = OFFSETOF(sbpciregs_t, gpioouten);
1666 break;
1667
1668 case SB_EXTIF:
1669 regoff = OFFSETOF(extifregs_t, gpio[0].outen);
1670 break;
1671 }
1672
1673 return (sb_corereg(sbh, si->gpioidx, regoff, mask, val));
1674 }
1675
1676 /* mask&set gpio output bits */
1677 uint32
1678 sb_gpioout(void *sbh, uint32 mask, uint32 val)
1679 {
1680 sb_info_t *si;
1681 uint regoff;
1682
1683 si = SB_INFO(sbh);
1684 regoff = 0;
1685
1686 switch (si->gpioid) {
1687 case SB_CC:
1688 regoff = OFFSETOF(chipcregs_t, gpioout);
1689 break;
1690
1691 case SB_PCI:
1692 regoff = OFFSETOF(sbpciregs_t, gpioout);
1693 break;
1694
1695 case SB_EXTIF:
1696 regoff = OFFSETOF(extifregs_t, gpio[0].out);
1697 break;
1698 }
1699
1700 return (sb_corereg(sbh, si->gpioidx, regoff, mask, val));
1701 }
1702
1703 /* return the current gpioin register value */
1704 uint32
1705 sb_gpioin(void *sbh)
1706 {
1707 sb_info_t *si;
1708 uint regoff;
1709
1710 si = SB_INFO(sbh);
1711 regoff = 0;
1712
1713 switch (si->gpioid) {
1714 case SB_CC:
1715 regoff = OFFSETOF(chipcregs_t, gpioin);
1716 break;
1717
1718 case SB_PCI:
1719 regoff = OFFSETOF(sbpciregs_t, gpioin);
1720 break;
1721
1722 case SB_EXTIF:
1723 regoff = OFFSETOF(extifregs_t, gpioin);
1724 break;
1725 }
1726
1727 return (sb_corereg(sbh, si->gpioidx, regoff, 0, 0));
1728 }
1729
1730 /* mask&set gpio interrupt polarity bits */
1731 uint32
1732 sb_gpiointpolarity(void *sbh, uint32 mask, uint32 val)
1733 {
1734 sb_info_t *si;
1735 uint regoff;
1736
1737 si = SB_INFO(sbh);
1738 regoff = 0;
1739
1740 switch (si->gpioid) {
1741 case SB_CC:
1742 regoff = OFFSETOF(chipcregs_t, gpiointpolarity);
1743 break;
1744
1745 case SB_PCI:
1746 /* pci gpio implementation does not support interrupt polarity */
1747 ASSERT(0);
1748 break;
1749
1750 case SB_EXTIF:
1751 regoff = OFFSETOF(extifregs_t, gpiointpolarity);
1752 break;
1753 }
1754
1755 return (sb_corereg(sbh, si->gpioidx, regoff, mask, val));
1756 }
1757
1758 /* mask&set gpio interrupt mask bits */
1759 uint32
1760 sb_gpiointmask(void *sbh, uint32 mask, uint32 val)
1761 {
1762 sb_info_t *si;
1763 uint regoff;
1764
1765 si = SB_INFO(sbh);
1766 regoff = 0;
1767
1768 switch (si->gpioid) {
1769 case SB_CC:
1770 regoff = OFFSETOF(chipcregs_t, gpiointmask);
1771 break;
1772
1773 case SB_PCI:
1774 /* pci gpio implementation does not support interrupt mask */
1775 ASSERT(0);
1776 break;
1777
1778 case SB_EXTIF:
1779 regoff = OFFSETOF(extifregs_t, gpiointmask);
1780 break;
1781 }
1782
1783 return (sb_corereg(sbh, si->gpioidx, regoff, mask, val));
1784 }
1785
1786
1787 /*
1788 * Return the slow clock source.
1789 * Three sources of SLOW CLOCK: LPO, Xtal, PCI
1790 */
1791 static uint
1792 sb_slowclk_src(void *sbh)
1793 {
1794 sb_info_t *si;
1795 chipcregs_t *cc;
1796 uint32 v;
1797
1798 si = SB_INFO(sbh);
1799
1800 ASSERT(sb_coreid(sbh) == SB_CC);
1801
1802 if (si->ccrev < 6) {
1803 switch (si->bus) {
1804 case PCMCIA_BUS: return (SCC_SS_XTAL);
1805 case PCI_BUS:
1806 v = OSL_PCI_READ_CONFIG(si->osh, PCI_GPIO_OUT, sizeof (uint32));
1807 if (v & PCI_CFG_GPIO_SCS)
1808 return (SCC_SS_PCI);
1809 else
1810 return (SCC_SS_XTAL);
1811 default: return (SCC_SS_XTAL);
1812 }
1813 } else if (si->ccrev < 10) {
1814 cc = (chipcregs_t*) sb_setcoreidx(sbh, si->curidx);
1815 v = R_REG(&cc->slow_clk_ctl) & SCC_SS_MASK;
1816 return (v);
1817 } else {
1818 return (SCC_SS_XTAL);
1819 }
1820 }
1821
1822 /*
1823 * Return the slowclock min or max frequency.
1824 * Three sources of SLOW CLOCK:
1825 * 1. On Chip LPO - 32khz or 160khz
1826 * 2. On Chip Xtal OSC - 20mhz/4*(divider+1)
1827 * 3. External PCI clock - 66mhz/4*(divider+1)
1828 */
1829 static uint
1830 sb_slowclk_freq(void *sbh, bool max)
1831 {
1832 sb_info_t *si;
1833 chipcregs_t *cc;
1834 uint32 slowclk;
1835 uint div;
1836
1837 si = SB_INFO(sbh);
1838
1839 ASSERT(sb_coreid(sbh) == SB_CC);
1840
1841 cc = (chipcregs_t*) sb_setcoreidx(sbh, si->curidx);
1842
1843 /* shouldn't be here unless we've established the chip has dynamic power control */
1844 ASSERT(R_REG(&cc->capabilities) & CAP_PWR_CTL);
1845
1846 slowclk = sb_slowclk_src(sbh);
1847 if (si->ccrev < 6) {
1848 if (slowclk == SCC_SS_PCI)
1849 return (max? (PCIMAXFREQ/64) : (PCIMINFREQ/64));
1850 else
1851 return (max? (XTALMAXFREQ/32) : (XTALMINFREQ/32));
1852 } else if (si->ccrev < 10) {
1853 div = 4 * (((R_REG(&cc->slow_clk_ctl) & SCC_CD_MASK) >> SCC_CD_SHF) + 1);
1854 if (slowclk == SCC_SS_LPO)
1855 return (max? LPOMAXFREQ : LPOMINFREQ);
1856 else if (slowclk == SCC_SS_XTAL)
1857 return (max? (XTALMAXFREQ/div) : (XTALMINFREQ/div));
1858 else if (slowclk == SCC_SS_PCI)
1859 return (max? (PCIMAXFREQ/div) : (PCIMINFREQ/div));
1860 else
1861 ASSERT(0);
1862 } else {
1863 /* Chipc rev 10 is InstaClock */
1864 div = R_REG(&cc->system_clk_ctl) >> SYCC_CD_SHF;
1865 div = 4 * (div + 1);
1866 return (max ? XTALMAXFREQ : (XTALMINFREQ/div));
1867 }
1868 return (0);
1869 }
1870
1871 static void
1872 sb_pwrctl_setdelay(void *sbh, void *chipcregs)
1873 {
1874 chipcregs_t * cc;
1875 uint slowmaxfreq, pll_delay, slowclk;
1876 uint pll_on_delay, fref_sel_delay;
1877
1878 pll_delay = PLL_DELAY;
1879
1880 /* If the slow clock is not sourced by the xtal then add the xtal_on_delay
1881 * since the xtal will also be powered down by dynamic power control logic.
1882 */
1883 slowclk = sb_slowclk_src(sbh);
1884 if (slowclk != SCC_SS_XTAL)
1885 pll_delay += XTAL_ON_DELAY;
1886
1887 slowmaxfreq = sb_slowclk_freq(sbh, TRUE);
1888
1889 pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000;
1890 fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000;
1891
1892 cc = (chipcregs_t *)chipcregs;
1893 W_REG(&cc->pll_on_delay, pll_on_delay);
1894 W_REG(&cc->fref_sel_delay, fref_sel_delay);
1895 }
1896
1897 /* set or get slow clock divider */
1898 int
1899 sb_pwrctl_slowclk(void *sbh, bool set, uint *div)
1900 {
1901 sb_info_t *si;
1902 uint origidx;
1903 chipcregs_t *cc;
1904 uint intr_val = 0;
1905 uint err = 0;
1906
1907 si = SB_INFO(sbh);
1908
1909 /* chipcommon cores prior to rev6 don't support slowclkcontrol */
1910 if (si->ccrev < 6)
1911 return 1;
1912
1913 /* chipcommon cores rev10 are a whole new ball game */
1914 if (si->ccrev >= 10)
1915 return 1;
1916
1917 if (set && ((*div % 4) || (*div < 4)))
1918 return 2;
1919
1920 INTR_OFF(si, intr_val);
1921 origidx = si->curidx;
1922 cc = (chipcregs_t*) sb_setcore(sbh, SB_CC, 0);
1923 ASSERT(cc != NULL);
1924
1925 if (!(R_REG(&cc->capabilities) & CAP_PWR_CTL)) {
1926 err = 3;
1927 goto done;
1928 }
1929
1930 if (set) {
1931 SET_REG(&cc->slow_clk_ctl, SCC_CD_MASK, ((*div / 4 - 1) << SCC_CD_SHF));
1932 sb_pwrctl_setdelay(sbh, (void *)cc);
1933 } else
1934 *div = 4 * (((R_REG(&cc->slow_clk_ctl) & SCC_CD_MASK) >> SCC_CD_SHF) + 1);
1935
1936 done:
1937 sb_setcoreidx(sbh, origidx);
1938 INTR_RESTORE(si, intr_val);
1939 return err;
1940 }
1941
1942 /* initialize power control delay registers */
1943 void
1944 sb_pwrctl_init(void *sbh)
1945 {
1946 sb_info_t *si;
1947 uint origidx;
1948 chipcregs_t *cc;
1949
1950 si = SB_INFO(sbh);
1951
1952 if (si->bus == SB_BUS)
1953 return;
1954
1955 origidx = si->curidx;
1956
1957 if ((cc = (chipcregs_t*) sb_setcore(sbh, SB_CC, 0)) == NULL)
1958 return;
1959
1960 if (!(R_REG(&cc->capabilities) & CAP_PWR_CTL))
1961 goto done;
1962
1963 /* 4317pc does not work with SlowClock less than 5Mhz */
1964 if (si->bus == PCMCIA_BUS) {
1965 if ((si->ccrev >= 6) && (si->ccrev < 10))
1966 SET_REG(&cc->slow_clk_ctl, SCC_CD_MASK, (SCC_DEF_DIV << SCC_CD_SHF));
1967 }
1968
1969 sb_pwrctl_setdelay(sbh, (void *)cc);
1970
1971 done:
1972 sb_setcoreidx(sbh, origidx);
1973 }
1974
1975 /* return the value suitable for writing to the dot11 core FAST_PWRUP_DELAY register */
1976 uint16
1977 sb_pwrctl_fast_pwrup_delay(void *sbh)
1978 {
1979 sb_info_t *si;
1980 uint origidx;
1981 chipcregs_t *cc;
1982 uint slowminfreq;
1983 uint16 fpdelay;
1984 uint intr_val = 0;
1985
1986 si = SB_INFO(sbh);
1987 fpdelay = 0;
1988 origidx = si->curidx;
1989
1990 if (si->bus == SB_BUS)
1991 goto done;
1992
1993 INTR_OFF(si, intr_val);
1994
1995 if ((cc = (chipcregs_t*) sb_setcore(sbh, SB_CC, 0)) == NULL)
1996 goto done;
1997
1998 if (!(R_REG(&cc->capabilities) & CAP_PWR_CTL))
1999 goto done;
2000
2001 slowminfreq = sb_slowclk_freq(sbh, FALSE);
2002 fpdelay = (((R_REG(&cc->pll_on_delay) + 2) * 1000000) + (slowminfreq - 1)) / slowminfreq;
2003
2004 done:
2005 sb_setcoreidx(sbh, origidx);
2006 INTR_RESTORE(si, intr_val);
2007 return (fpdelay);
2008 }
2009
2010 /* turn primary xtal and/or pll off/on */
2011 int
2012 sb_pwrctl_xtal(void *sbh, uint what, bool on)
2013 {
2014 sb_info_t *si;
2015 uint32 in, out, outen;
2016
2017 si = SB_INFO(sbh);
2018
2019 switch (si->bus) {
2020
2021
2022 case PCMCIA_BUS:
2023 return (0);
2024
2025
2026 case PCI_BUS:
2027
2028 in = OSL_PCI_READ_CONFIG(si->osh, PCI_GPIO_IN, sizeof (uint32));
2029 out = OSL_PCI_READ_CONFIG(si->osh, PCI_GPIO_OUT, sizeof (uint32));
2030 outen = OSL_PCI_READ_CONFIG(si->osh, PCI_GPIO_OUTEN, sizeof (uint32));
2031
2032 /*
2033 * We can't actually read the state of the PLLPD so we infer it
2034 * by the value of XTAL_PU which *is* readable via gpioin.
2035 */
2036 if (on && (in & PCI_CFG_GPIO_XTAL))
2037 return (0);
2038
2039 if (what & XTAL)
2040 outen |= PCI_CFG_GPIO_XTAL;
2041 if (what & PLL)
2042 outen |= PCI_CFG_GPIO_PLL;
2043
2044 if (on) {
2045 /* turn primary xtal on */
2046 if (what & XTAL) {
2047 out |= PCI_CFG_GPIO_XTAL;
2048 if (what & PLL)
2049 out |= PCI_CFG_GPIO_PLL;
2050 OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUT, sizeof (uint32), out);
2051 OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUTEN, sizeof (uint32), outen);
2052 OSL_DELAY(XTAL_ON_DELAY);
2053 }
2054
2055 /* turn pll on */
2056 if (what & PLL) {
2057 out &= ~PCI_CFG_GPIO_PLL;
2058 OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUT, sizeof (uint32), out);
2059 OSL_DELAY(2000);
2060 }
2061 } else {
2062 if (what & XTAL)
2063 out &= ~PCI_CFG_GPIO_XTAL;
2064 if (what & PLL)
2065 out |= PCI_CFG_GPIO_PLL;
2066 OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUT, sizeof (uint32), out);
2067 OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUTEN, sizeof (uint32), outen);
2068 }
2069
2070 default:
2071 return (-1);
2072 }
2073
2074 return (0);
2075 }
2076
2077 /* set dynamic power control mode (forceslow, forcefast, dynamic) */
2078 /* returns true if ignore pll off is set and false if it is not */
2079 bool
2080 sb_pwrctl_clk(void *sbh, uint mode)
2081 {
2082 sb_info_t *si;
2083 uint origidx;
2084 chipcregs_t *cc;
2085 uint32 scc;
2086 bool forcefastclk=FALSE;
2087 uint intr_val = 0;
2088
2089 si = SB_INFO(sbh);
2090
2091 /* chipcommon cores prior to rev6 don't support slowclkcontrol */
2092 if (si->ccrev < 6)
2093 return (FALSE);
2094
2095 /* chipcommon cores rev10 are a whole new ball game */
2096 if (si->ccrev >= 10)
2097 return (FALSE);
2098
2099 INTR_OFF(si, intr_val);
2100
2101 origidx = si->curidx;
2102
2103 cc = (chipcregs_t*) sb_setcore(sbh, SB_CC, 0);
2104 ASSERT(cc != NULL);
2105
2106 if (!(R_REG(&cc->capabilities) & CAP_PWR_CTL))
2107 goto done;
2108
2109 switch (mode) {
2110 case CLK_FAST: /* force fast (pll) clock */
2111 /* don't forget to force xtal back on before we clear SCC_DYN_XTAL.. */
2112 sb_pwrctl_xtal(sbh, XTAL, ON);
2113
2114 SET_REG(&cc->slow_clk_ctl, (SCC_XC | SCC_FS | SCC_IP), SCC_IP);
2115 break;
2116
2117 case CLK_SLOW: /* force slow clock */
2118 if ((si->bus == SDIO_BUS) || (si->bus == PCMCIA_BUS))
2119 return (-1);
2120
2121 if (si->ccrev >= 6)
2122 OR_REG(&cc->slow_clk_ctl, SCC_FS);
2123 break;
2124
2125 case CLK_DYNAMIC: /* enable dynamic power control */
2126 scc = R_REG(&cc->slow_clk_ctl);
2127 scc &= ~(SCC_FS | SCC_IP | SCC_XC);
2128 if ((scc & SCC_SS_MASK) != SCC_SS_XTAL)
2129 scc |= SCC_XC;
2130 W_REG(&cc->slow_clk_ctl, scc);
2131
2132 /* for dynamic control, we have to release our xtal_pu "force on" */
2133 if (scc & SCC_XC)
2134 sb_pwrctl_xtal(sbh, XTAL, OFF);
2135 break;
2136 }
2137
2138 /* Is the h/w forcing the use of the fast clk */
2139 forcefastclk = (bool)((R_REG(&cc->slow_clk_ctl) & SCC_IP) == SCC_IP);
2140
2141 done:
2142 sb_setcoreidx(sbh, origidx);
2143 INTR_RESTORE(si, intr_val);
2144 return (forcefastclk);
2145 }
2146
2147 /* register driver interrupt disabling and restoring callback functions */
2148 void
2149 sb_register_intr_callback(void *sbh, void *intrsoff_fn, void *intrsrestore_fn, void *intrsenabled_fn, void *intr_arg)
2150 {
2151 sb_info_t *si;
2152
2153 si = SB_INFO(sbh);
2154 si->intr_arg = intr_arg;
2155 si->intrsoff_fn = (sb_intrsoff_t)intrsoff_fn;
2156 si->intrsrestore_fn = (sb_intrsrestore_t)intrsrestore_fn;
2157 si->intrsenabled_fn = (sb_intrsenabled_t)intrsenabled_fn;
2158 /* save current core id. when this function called, the current core
2159 * must be the core which provides driver functions(il, et, wl, etc.)
2160 */
2161 si->dev_coreid = si->coreid[si->curidx];
2162 }
2163
2164
This page took 0.142622 seconds and 5 git commands to generate.