[s3c24xx] bump to 2.6.30-rc6
[openwrt.git] / target / linux / s3c24xx / files-2.6.30 / drivers / mfd / glamo / glamo-mci.c
1 /*
2 * linux/drivers/mmc/host/glamo-mmc.c - Glamo MMC driver
3 *
4 * Copyright (C) 2007 Openmoko, Inc, Andy Green <andy@openmoko.com>
5 * Based on S3C MMC driver that was:
6 * Copyright (C) 2004-2006 maintech GmbH, Thomas Kleffel <tk@maintech.de>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 #include <linux/module.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/clk.h>
16 #include <linux/mmc/mmc.h>
17 #include <linux/mmc/host.h>
18 #include <linux/platform_device.h>
19 #include <linux/irq.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h>
23 #include <linux/workqueue.h>
24
25 #include <asm/dma.h>
26 #include <asm/dma-mapping.h>
27 #include <asm/io.h>
28
29 #include "glamo-mci.h"
30 #include "glamo-core.h"
31 #include "glamo-regs.h"
32
33 /* from glamo-core.c */
34 extern struct glamo_mci_pdata glamo_mci_def_pdata;
35
36 static spinlock_t clock_lock;
37
38 #define DRIVER_NAME "glamo-mci"
39 #define RESSIZE(ressource) (((ressource)->end - (ressource)->start) + 1)
40
41 static void glamo_mci_send_request(struct mmc_host *mmc);
42
43 /*
44 * Max SD clock rate
45 *
46 * held at /(3 + 1) due to concerns of 100R recommended series resistor
47 * allows 16MHz @ 4-bit --> 8MBytes/sec raw
48 *
49 * you can override this on kernel commandline using
50 *
51 * glamo_mci.sd_max_clk=10000000
52 *
53 * for example
54 */
55
56 static int sd_max_clk = 50000000 / 3;
57 module_param(sd_max_clk, int, 0644);
58
59 /*
60 * Slow SD clock rate
61 *
62 * you can override this on kernel commandline using
63 *
64 * glamo_mci.sd_slow_ratio=8
65 *
66 * for example
67 *
68 * platform callback is used to decide effective clock rate, if not
69 * defined then max is used, if defined and returns nonzero, rate is
70 * divided by this factor
71 */
72
73 static int sd_slow_ratio = 8;
74 module_param(sd_slow_ratio, int, 0644);
75
76 /*
77 * Post-power SD clock rate
78 *
79 * you can override this on kernel commandline using
80 *
81 * glamo_mci.sd_post_power_clock=1000000
82 *
83 * for example
84 *
85 * After changing power to card, clock is held at this rate until first bulk
86 * transfer completes
87 */
88
89 static int sd_post_power_clock = 1000000;
90 module_param(sd_post_power_clock, int, 0644);
91
92
93 /*
94 * SD Signal drive strength
95 *
96 * you can override this on kernel commandline using
97 *
98 * glamo_mci.sd_drive=0
99 *
100 * for example
101 */
102
103 static int sd_drive;
104 module_param(sd_drive, int, 0644);
105
106 /*
107 * SD allow SD clock to run while idle
108 *
109 * you can override this on kernel commandline using
110 *
111 * glamo_mci.sd_idleclk=0
112 *
113 * for example
114 */
115
116 static int sd_idleclk = 0; /* disallow idle clock by default */
117 module_param(sd_idleclk, int, 0644);
118
119 /* used to stash real idleclk state in suspend: we force it to run in there */
120 static int suspend_sd_idleclk;
121
122
123 unsigned char CRC7(u8 * pu8, int cnt)
124 {
125 u8 crc = 0;
126
127 while (cnt--) {
128 int n;
129 u8 d = *pu8++;
130 for (n = 0; n < 8; n++) {
131 crc <<= 1;
132 if ((d & 0x80) ^ (crc & 0x80))
133 crc ^= 0x09;
134 d <<= 1;
135 }
136 }
137 return (crc << 1) | 1;
138 }
139
140 static int get_data_buffer(struct glamo_mci_host *host,
141 volatile u32 *words, volatile u16 **pointer)
142 {
143 struct scatterlist *sg;
144
145 *words = 0;
146 *pointer = NULL;
147
148 if (host->pio_active == XFER_NONE)
149 return -EINVAL;
150
151 if ((!host->mrq) || (!host->mrq->data))
152 return -EINVAL;
153
154 if (host->pio_sgptr >= host->mrq->data->sg_len) {
155 dev_dbg(&host->pdev->dev, "no more buffers (%i/%i)\n",
156 host->pio_sgptr, host->mrq->data->sg_len);
157 return -EBUSY;
158 }
159 sg = &host->mrq->data->sg[host->pio_sgptr];
160
161 *words = sg->length >> 1; /* we are working with a 16-bit data bus */
162 *pointer = page_address(sg_page(sg)) + sg->offset;
163
164 BUG_ON(((long)(*pointer)) & 1);
165
166 host->pio_sgptr++;
167
168 /* dev_info(&host->pdev->dev, "new buffer (%i/%i)\n",
169 host->pio_sgptr, host->mrq->data->sg_len); */
170 return 0;
171 }
172
173 static void do_pio_read(struct glamo_mci_host *host)
174 {
175 int res;
176 u16 __iomem *from_ptr = host->base_data + (RESSIZE(host->mem_data) /
177 sizeof(u16) / 2);
178 #ifdef DEBUG
179 u16 * block;
180 #endif
181
182 while (1) {
183 res = get_data_buffer(host, &host->pio_words, &host->pio_ptr);
184 if (res) {
185 host->pio_active = XFER_NONE;
186 host->complete_what = COMPLETION_FINALIZE;
187
188 dev_dbg(&host->pdev->dev, "pio_read(): "
189 "complete (no more data).\n");
190 return;
191 }
192
193 dev_dbg(&host->pdev->dev, "pio_read(): host->pio_words: %d\n",
194 host->pio_words);
195
196 host->pio_count += host->pio_words << 1;
197
198 #ifdef DEBUG
199 block = (u16 *)host->pio_ptr;
200 res = host->pio_words << 1;
201 #endif
202 #if 0
203 /* u16-centric memcpy */
204 while (host->pio_words--)
205 *host->pio_ptr++ = *from_ptr++;
206 #else
207 /* memcpy can be faster? */
208 memcpy((void *)host->pio_ptr, from_ptr, host->pio_words << 1);
209 host->pio_ptr += host->pio_words;
210 #endif
211
212 #ifdef DEBUG
213 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 1,
214 (void *)block, res, 1);
215 #endif
216 }
217 }
218
219 static int do_pio_write(struct glamo_mci_host *host)
220 {
221 int res = 0;
222 volatile u16 __iomem *to_ptr = host->base_data;
223 int err = 0;
224
225 dev_dbg(&host->pdev->dev, "pio_write():\n");
226 while (!res) {
227 res = get_data_buffer(host, &host->pio_words, &host->pio_ptr);
228 if (res)
229 continue;
230
231 dev_dbg(&host->pdev->dev, "pio_write():new source: [%i]@[%p]\n",
232 host->pio_words, host->pio_ptr);
233
234 host->pio_count += host->pio_words << 1;
235 while (host->pio_words--)
236 writew(*host->pio_ptr++, to_ptr++);
237 }
238
239 dev_dbg(&host->pdev->dev, "pio_write(): complete\n");
240 host->pio_active = XFER_NONE;
241 return err;
242 }
243
244 static void __glamo_mci_fix_card_div(struct glamo_mci_host *host, int div)
245 {
246 unsigned long flags;
247
248 spin_lock_irqsave(&clock_lock, flags);
249
250 if (div < 0) {
251 /* stop clock - remove clock from divider input */
252 writew(readw(glamo_mci_def_pdata.pglamo->base +
253 GLAMO_REG_CLOCK_GEN5_1) & (~GLAMO_CLOCK_GEN51_EN_DIV_TCLK),
254 glamo_mci_def_pdata.pglamo->base + GLAMO_REG_CLOCK_GEN5_1);
255
256 goto done;
257 } else {
258 /* set the nearest prescaler factor
259 *
260 * register shared with SCLK divisor -- no chance of race because
261 * we don't use sensor interface
262 */
263 writew((readw(glamo_mci_def_pdata.pglamo->base +
264 GLAMO_REG_CLOCK_GEN8) & 0xff00) | div,
265 glamo_mci_def_pdata.pglamo->base + GLAMO_REG_CLOCK_GEN8);
266 /* enable clock to divider input */
267 writew(readw(glamo_mci_def_pdata.pglamo->base +
268 GLAMO_REG_CLOCK_GEN5_1) | GLAMO_CLOCK_GEN51_EN_DIV_TCLK,
269 glamo_mci_def_pdata.pglamo->base + GLAMO_REG_CLOCK_GEN5_1);
270 }
271
272 if (host->force_slow_during_powerup)
273 div = host->clk_rate / sd_post_power_clock;
274 else
275 if (host->pdata->glamo_mci_use_slow)
276 if ((host->pdata->glamo_mci_use_slow)())
277 div = div * sd_slow_ratio;
278
279 if (div > 255)
280 div = 255;
281
282 /*
283 * set the nearest prescaler factor
284 *
285 * register shared with SCLK divisor -- no chance of race because
286 * we don't use sensor interface
287 */
288 writew((readw(glamo_mci_def_pdata.pglamo->base +
289 GLAMO_REG_CLOCK_GEN8) & 0xff00) | div,
290 glamo_mci_def_pdata.pglamo->base + GLAMO_REG_CLOCK_GEN8);
291 /* enable clock to divider input */
292 writew(readw(glamo_mci_def_pdata.pglamo->base +
293 GLAMO_REG_CLOCK_GEN5_1) | GLAMO_CLOCK_GEN51_EN_DIV_TCLK,
294 glamo_mci_def_pdata.pglamo->base + GLAMO_REG_CLOCK_GEN5_1);
295
296 done:
297 spin_unlock_irqrestore(&clock_lock, flags);
298 }
299
300 static int __glamo_mci_set_card_clock(struct glamo_mci_host *host, int freq,
301 int *division)
302 {
303 int div = 0;
304 int real_rate = 0;
305
306 if (freq) {
307 /* Set clock */
308 for (div = 0; div < 256; div++) {
309 real_rate = host->clk_rate / (div + 1);
310 if (real_rate <= freq)
311 break;
312 }
313 if (div > 255)
314 div = 255;
315
316 if (division)
317 *division = div;
318
319 __glamo_mci_fix_card_div(host, div);
320
321 } else {
322 /* stop clock */
323 if (division)
324 *division = 0xff;
325
326 if (!sd_idleclk && !host->force_slow_during_powerup)
327 /* clock off */
328 __glamo_mci_fix_card_div(host, -1);
329 }
330
331 return real_rate;
332 }
333
334
335 static void glamo_mci_irq_worker(struct work_struct *work)
336 {
337 struct glamo_mci_host *host =
338 container_of(work, struct glamo_mci_host, irq_work);
339 struct mmc_command *cmd = host->mrq->cmd;
340
341 if (host->pio_active == XFER_READ)
342 do_pio_read(host);
343
344 host->mrq->data->bytes_xfered = host->pio_count;
345 dev_dbg(&host->pdev->dev, "count=%d\n", host->pio_count);
346
347 /* issue STOP if we have been given one to use */
348 if (host->mrq->stop) {
349 host->cmd_is_stop = 1;
350 glamo_mci_send_request(host->mmc);
351 host->cmd_is_stop = 0;
352 }
353
354 if (!sd_idleclk && !host->force_slow_during_powerup)
355 /* clock off */
356 __glamo_mci_fix_card_div(host, -1);
357
358 host->complete_what = COMPLETION_NONE;
359 host->mrq = NULL;
360 mmc_request_done(host->mmc, cmd->mrq);
361 }
362
363 static void glamo_mci_irq_host(struct glamo_mci_host *host)
364 {
365 u16 status;
366 struct mmc_command *cmd;
367 unsigned long iflags;
368
369 if (host->suspending) { /* bad news, dangerous time */
370 dev_err(&host->pdev->dev, "****glamo_mci_irq before resumed\n");
371 return;
372 }
373
374 if (!host->mrq)
375 return;
376 cmd = host->mrq->cmd;
377 if (!cmd)
378 return;
379
380 spin_lock_irqsave(&host->complete_lock, iflags);
381
382 status = readw(host->base + GLAMO_REG_MMC_RB_STAT1);
383 dev_dbg(&host->pdev->dev, "status = 0x%04x\n", status);
384
385 /* ack this interrupt source */
386 writew(GLAMO_IRQ_MMC,
387 glamo_mci_def_pdata.pglamo->base + GLAMO_REG_IRQ_CLEAR);
388
389 /* we ignore a data timeout report if we are also told the data came */
390 if (status & GLAMO_STAT1_MMC_RB_DRDY)
391 status &= ~GLAMO_STAT1_MMC_DTOUT;
392
393 if (status & (GLAMO_STAT1_MMC_RTOUT |
394 GLAMO_STAT1_MMC_DTOUT))
395 cmd->error = -ETIMEDOUT;
396 if (status & (GLAMO_STAT1_MMC_BWERR |
397 GLAMO_STAT1_MMC_BRERR))
398 cmd->error = -EILSEQ;
399 if (cmd->error) {
400 dev_info(&host->pdev->dev, "Error after cmd: 0x%x\n", status);
401 goto done;
402 }
403
404 /*
405 * disable the initial slow start after first bulk transfer
406 */
407 if (host->force_slow_during_powerup)
408 host->force_slow_during_powerup--;
409
410 /*
411 * we perform the memcpy out of Glamo memory outside of IRQ context
412 * so we don't block other interrupts
413 */
414 schedule_work(&host->irq_work);
415
416 goto leave;
417
418 done:
419 host->complete_what = COMPLETION_NONE;
420 host->mrq = NULL;
421 mmc_request_done(host->mmc, cmd->mrq);
422 leave:
423 spin_unlock_irqrestore(&host->complete_lock, iflags);
424 }
425
426 static void glamo_mci_irq(unsigned int irq, struct irq_desc *desc)
427 {
428 struct glamo_mci_host *host = (struct glamo_mci_host *)
429 desc->handler_data;
430
431 if (host)
432 glamo_mci_irq_host(host);
433
434 }
435
436 static int glamo_mci_send_command(struct glamo_mci_host *host,
437 struct mmc_command *cmd)
438 {
439 u8 u8a[6];
440 u16 fire = 0;
441
442 /* if we can't do it, reject as busy */
443 if (!readw(host->base + GLAMO_REG_MMC_RB_STAT1) &
444 GLAMO_STAT1_MMC_IDLE) {
445 host->mrq = NULL;
446 cmd->error = -EBUSY;
447 mmc_request_done(host->mmc, host->mrq);
448 return -EBUSY;
449 }
450
451 /* create an array in wire order for CRC computation */
452 u8a[0] = 0x40 | (cmd->opcode & 0x3f);
453 u8a[1] = (u8)(cmd->arg >> 24);
454 u8a[2] = (u8)(cmd->arg >> 16);
455 u8a[3] = (u8)(cmd->arg >> 8);
456 u8a[4] = (u8)cmd->arg;
457 u8a[5] = CRC7(&u8a[0], 5); /* CRC7 on first 5 bytes of packet */
458
459 /* issue the wire-order array including CRC in register order */
460 writew((u8a[4] << 8) | u8a[5], host->base + GLAMO_REG_MMC_CMD_REG1);
461 writew((u8a[2] << 8) | u8a[3], host->base + GLAMO_REG_MMC_CMD_REG2);
462 writew((u8a[0] << 8) | u8a[1], host->base + GLAMO_REG_MMC_CMD_REG3);
463
464 /* command index toggle */
465 fire |= (host->ccnt & 1) << 12;
466
467 /* set type of command */
468 switch (mmc_cmd_type(cmd)) {
469 case MMC_CMD_BC:
470 fire |= GLAMO_FIRE_MMC_CMDT_BNR;
471 break;
472 case MMC_CMD_BCR:
473 fire |= GLAMO_FIRE_MMC_CMDT_BR;
474 break;
475 case MMC_CMD_AC:
476 fire |= GLAMO_FIRE_MMC_CMDT_AND;
477 break;
478 case MMC_CMD_ADTC:
479 fire |= GLAMO_FIRE_MMC_CMDT_AD;
480 break;
481 }
482 /*
483 * if it expects a response, set the type expected
484 *
485 * R1, Length : 48bit, Normal response
486 * R1b, Length : 48bit, same R1, but added card busy status
487 * R2, Length : 136bit (really 128 bits with CRC snipped)
488 * R3, Length : 48bit (OCR register value)
489 * R4, Length : 48bit, SDIO_OP_CONDITION, Reverse SDIO Card
490 * R5, Length : 48bit, IO_RW_DIRECTION, Reverse SDIO Card
491 * R6, Length : 48bit (RCA register)
492 * R7, Length : 48bit (interface condition, VHS(voltage supplied),
493 * check pattern, CRC7)
494 */
495 switch (mmc_resp_type(cmd)) {
496 case MMC_RSP_R6: /* same index as R7 and R1 */
497 fire |= GLAMO_FIRE_MMC_RSPT_R1;
498 break;
499 case MMC_RSP_R1B:
500 fire |= GLAMO_FIRE_MMC_RSPT_R1b;
501 break;
502 case MMC_RSP_R2:
503 fire |= GLAMO_FIRE_MMC_RSPT_R2;
504 break;
505 case MMC_RSP_R3:
506 fire |= GLAMO_FIRE_MMC_RSPT_R3;
507 break;
508 /* R4 and R5 supported by chip not defined in linux/mmc/core.h (sdio) */
509 }
510 /*
511 * From the command index, set up the command class in the host ctrllr
512 *
513 * missing guys present on chip but couldn't figure out how to use yet:
514 * 0x0 "stream read"
515 * 0x9 "cancel running command"
516 */
517 switch (cmd->opcode) {
518 case MMC_READ_SINGLE_BLOCK:
519 fire |= GLAMO_FIRE_MMC_CC_SBR; /* single block read */
520 break;
521 case MMC_SWITCH: /* 64 byte payload */
522 case 0x33: /* observed issued by MCI */
523 case MMC_READ_MULTIPLE_BLOCK:
524 /* we will get an interrupt off this */
525 if (!cmd->mrq->stop)
526 /* multiblock no stop */
527 fire |= GLAMO_FIRE_MMC_CC_MBRNS;
528 else
529 /* multiblock with stop */
530 fire |= GLAMO_FIRE_MMC_CC_MBRS;
531 break;
532 case MMC_WRITE_BLOCK:
533 fire |= GLAMO_FIRE_MMC_CC_SBW; /* single block write */
534 break;
535 case MMC_WRITE_MULTIPLE_BLOCK:
536 if (cmd->mrq->stop)
537 /* multiblock with stop */
538 fire |= GLAMO_FIRE_MMC_CC_MBWS;
539 else
540 // /* multiblock NO stop-- 'RESERVED'? */
541 fire |= GLAMO_FIRE_MMC_CC_MBWNS;
542 break;
543 case MMC_STOP_TRANSMISSION:
544 fire |= GLAMO_FIRE_MMC_CC_STOP; /* STOP */
545 break;
546 default:
547 fire |= GLAMO_FIRE_MMC_CC_BASIC; /* "basic command" */
548 break;
549 }
550
551 /* always largest timeout */
552 writew(0xfff, host->base + GLAMO_REG_MMC_TIMEOUT);
553
554 /* Generate interrupt on txfer */
555 writew((readw(host->base + GLAMO_REG_MMC_BASIC) & 0x3e) |
556 0x0800 | GLAMO_BASIC_MMC_NO_CLK_RD_WAIT |
557 GLAMO_BASIC_MMC_EN_COMPL_INT | (sd_drive << 6),
558 host->base + GLAMO_REG_MMC_BASIC);
559
560 /* send the command out on the wire */
561 /* dev_info(&host->pdev->dev, "Using FIRE %04X\n", fire); */
562 writew(fire, host->base + GLAMO_REG_MMC_CMD_FIRE);
563 cmd->error = 0;
564 return 0;
565 }
566
567 static int glamo_mci_prepare_pio(struct glamo_mci_host *host,
568 struct mmc_data *data)
569 {
570 /*
571 * the S-Media-internal RAM offset for our MMC buffer
572 * Read is halfway up the buffer and write is at the start
573 */
574 if (data->flags & MMC_DATA_READ) {
575 writew((u16)(GLAMO_FB_SIZE + (RESSIZE(host->mem_data) / 2)),
576 host->base + GLAMO_REG_MMC_WDATADS1);
577 writew((u16)((GLAMO_FB_SIZE +
578 (RESSIZE(host->mem_data) / 2)) >> 16),
579 host->base + GLAMO_REG_MMC_WDATADS2);
580 } else {
581 writew((u16)GLAMO_FB_SIZE, host->base +
582 GLAMO_REG_MMC_RDATADS1);
583 writew((u16)(GLAMO_FB_SIZE >> 16), host->base +
584 GLAMO_REG_MMC_RDATADS2);
585 }
586
587 /* set up the block info */
588 writew(data->blksz, host->base + GLAMO_REG_MMC_DATBLKLEN);
589 writew(data->blocks, host->base + GLAMO_REG_MMC_DATBLKCNT);
590 dev_dbg(&host->pdev->dev, "(blksz=%d, count=%d)\n",
591 data->blksz, data->blocks);
592 host->pio_sgptr = 0;
593 host->pio_words = 0;
594 host->pio_count = 0;
595 host->pio_active = 0;
596 /* if write, prep the write into the shared RAM before the command */
597 if (data->flags & MMC_DATA_WRITE) {
598 host->pio_active = XFER_WRITE;
599 return do_pio_write(host);
600 }
601 host->pio_active = XFER_READ;
602 return 0;
603 }
604
605 static void glamo_mci_send_request(struct mmc_host *mmc)
606 {
607 struct glamo_mci_host *host = mmc_priv(mmc);
608 struct mmc_request *mrq = host->mrq;
609 struct mmc_command *cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd;
610 u16 * pu16 = (u16 *)&cmd->resp[0];
611 u16 * reg_resp = (u16 *)(host->base + GLAMO_REG_MMC_CMD_RSP1);
612 u16 status;
613 int n;
614 int timeout = 1000000;
615 int insanity_timeout = 1000000;
616
617 if (host->suspending) {
618 dev_err(&host->pdev->dev, "IGNORING glamo_mci_send_request while "
619 "suspended\n");
620 cmd->error = -EIO;
621 if (cmd->data)
622 cmd->data->error = -EIO;
623 mmc_request_done(mmc, mrq);
624 return;
625 }
626
627 host->ccnt++;
628 /*
629 * somehow 2.6.24 MCI manages to issue MMC_WRITE_BLOCK *without* the
630 * MMC_DATA_WRITE flag, WTF? Work around the madness.
631 */
632 if (cmd->opcode == MMC_WRITE_BLOCK)
633 if (mrq->data)
634 mrq->data->flags |= MMC_DATA_WRITE;
635
636 /* this guy has data to read/write? */
637 if ((!host->cmd_is_stop) && cmd->data) {
638 int res;
639 host->dcnt++;
640 res = glamo_mci_prepare_pio(host, cmd->data);
641 if (res) {
642 cmd->error = -EIO;
643 cmd->data->error = -EIO;
644 mmc_request_done(mmc, mrq);
645 return;
646 }
647 }
648
649 dev_dbg(&host->pdev->dev,"cmd 0x%x, "
650 "arg 0x%x data=%p mrq->stop=%p flags 0x%x\n",
651 cmd->opcode, cmd->arg, cmd->data, cmd->mrq->stop,
652 cmd->flags);
653
654 /* resume requested clock rate
655 * scale it down by sd_slow_ratio if platform requests it
656 */
657 __glamo_mci_fix_card_div(host, host->clk_div);
658
659 if (glamo_mci_send_command(host, cmd))
660 goto bail;
661
662 /* we are deselecting card? because it isn't going to ack then... */
663 if ((cmd->opcode == 7) && (cmd->arg == 0))
664 goto done;
665
666 /*
667 * we must spin until response is ready or timed out
668 * -- we don't get interrupts unless there is a bulk rx
669 */
670 do
671 status = readw(host->base + GLAMO_REG_MMC_RB_STAT1);
672 while (((((status >> 15) & 1) != (host->ccnt & 1)) ||
673 (!(status & (GLAMO_STAT1_MMC_RB_RRDY |
674 GLAMO_STAT1_MMC_RTOUT |
675 GLAMO_STAT1_MMC_DTOUT |
676 GLAMO_STAT1_MMC_BWERR |
677 GLAMO_STAT1_MMC_BRERR)))) && (insanity_timeout--));
678
679 if (insanity_timeout < 0)
680 dev_info(&host->pdev->dev, "command timeout, continuing\n");
681
682 if (status & (GLAMO_STAT1_MMC_RTOUT |
683 GLAMO_STAT1_MMC_DTOUT))
684 cmd->error = -ETIMEDOUT;
685 if (status & (GLAMO_STAT1_MMC_BWERR |
686 GLAMO_STAT1_MMC_BRERR))
687 cmd->error = -EILSEQ;
688
689 if (host->cmd_is_stop)
690 goto bail;
691
692 if (cmd->error) {
693 dev_info(&host->pdev->dev, "Error after cmd: 0x%x\n", status);
694 goto done;
695 }
696 /*
697 * mangle the response registers in two different exciting
698 * undocumented ways discovered by trial and error
699 */
700 if (mmc_resp_type(cmd) == MMC_RSP_R2)
701 /* grab the response */
702 for (n = 0; n < 8; n++) /* super mangle power 1 */
703 pu16[n ^ 6] = readw(&reg_resp[n]);
704 else
705 for (n = 0; n < 3; n++) /* super mangle power 2 */
706 pu16[n] = (readw(&reg_resp[n]) >> 8) |
707 (readw(&reg_resp[n + 1]) << 8);
708 /*
709 * if we don't have bulk data to take care of, we're done
710 */
711 if (!cmd->data)
712 goto done;
713 if (!(cmd->data->flags & (MMC_DATA_READ | MMC_DATA_WRITE)))
714 goto done;
715
716 /*
717 * Otherwise can can use the interrupt as async completion --
718 * if there is read data coming, or we wait for write data to complete,
719 * exit without mmc_request_done() as the payload interrupt
720 * will service it
721 */
722 dev_dbg(&host->pdev->dev, "Waiting for payload data\n");
723 /*
724 * if the glamo INT# line isn't wired (*cough* it can happen)
725 * I'm afraid we have to spin on the IRQ status bit and "be
726 * our own INT# line"
727 */
728 if (!glamo_mci_def_pdata.pglamo->irq_works) {
729 /*
730 * we have faith we will get an "interrupt"...
731 * but something insane like suspend problems can mean
732 * we spin here forever, so we timeout after a LONG time
733 */
734 while ((!(readw(glamo_mci_def_pdata.pglamo->base +
735 GLAMO_REG_IRQ_STATUS) & GLAMO_IRQ_MMC)) &&
736 (timeout--))
737 ;
738
739 if (timeout < 0) {
740 if (cmd->data->error)
741 cmd->data->error = -ETIMEDOUT;
742 dev_err(&host->pdev->dev, "Payload timeout\n");
743 goto bail;
744 }
745
746 /* yay we are an interrupt controller! -- call the ISR
747 * it will stop clock to card
748 */
749 glamo_mci_irq_host(host);
750 }
751 return;
752
753 done:
754 host->complete_what = COMPLETION_NONE;
755 host->mrq = NULL;
756 mmc_request_done(host->mmc, cmd->mrq);
757 bail:
758 if (!sd_idleclk && !host->force_slow_during_powerup)
759 /* stop the clock to card */
760 __glamo_mci_fix_card_div(host, -1);
761 }
762
763 static void glamo_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
764 {
765 struct glamo_mci_host *host = mmc_priv(mmc);
766
767 host->cmd_is_stop = 0;
768 host->mrq = mrq;
769 glamo_mci_send_request(mmc);
770 }
771
772 #if 1
773 static void glamo_mci_reset(struct glamo_mci_host *host)
774 {
775 if (host->suspending) {
776 dev_err(&host->pdev->dev, "IGNORING glamo_mci_reset while "
777 "suspended\n");
778 return;
779 }
780 dev_dbg(&host->pdev->dev, "******* glamo_mci_reset\n");
781 /* reset MMC controller */
782 writew(GLAMO_CLOCK_MMC_RESET | GLAMO_CLOCK_MMC_DG_TCLK |
783 GLAMO_CLOCK_MMC_EN_TCLK | GLAMO_CLOCK_MMC_DG_M9CLK |
784 GLAMO_CLOCK_MMC_EN_M9CLK,
785 glamo_mci_def_pdata.pglamo->base + GLAMO_REG_CLOCK_MMC);
786 udelay(10);
787 /* and disable reset */
788 writew(GLAMO_CLOCK_MMC_DG_TCLK |
789 GLAMO_CLOCK_MMC_EN_TCLK | GLAMO_CLOCK_MMC_DG_M9CLK |
790 GLAMO_CLOCK_MMC_EN_M9CLK,
791 glamo_mci_def_pdata.pglamo->base + GLAMO_REG_CLOCK_MMC);
792 }
793 #endif
794 static inline int glamo_mci_get_mv(int vdd)
795 {
796 int mv = 1650;
797
798 if (vdd > 7)
799 mv += 350 + 100 * (vdd - 8);
800
801 return mv;
802 }
803
804 static void glamo_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
805 {
806 struct glamo_mci_host *host = mmc_priv(mmc);
807 struct regulator *regulator;
808 int n = 0;
809 int div;
810 int powering = 0;
811 int mv;
812
813 if (host->suspending) {
814 dev_err(&host->pdev->dev, "IGNORING glamo_mci_set_ios while "
815 "suspended\n");
816 return;
817 }
818
819 regulator = host->regulator;
820
821 /* Set power */
822 switch(ios->power_mode) {
823 case MMC_POWER_UP:
824 if (host->pdata->glamo_can_set_mci_power()) {
825 mv = glamo_mci_get_mv(ios->vdd);
826 regulator_set_voltage(regulator, mv * 1000, mv * 1000);
827 regulator_enable(regulator);
828 }
829 break;
830 case MMC_POWER_ON:
831 /*
832 * we should use very slow clock until first bulk
833 * transfer completes OK
834 */
835 host->force_slow_during_powerup = 1;
836
837 if (host->vdd_current != ios->vdd) {
838 if (host->pdata->glamo_can_set_mci_power()) {
839 mv = glamo_mci_get_mv(ios->vdd);
840 regulator_set_voltage(regulator, mv * 1000, mv * 1000);
841 printk(KERN_INFO "SD power -> %dmV\n", mv);
842 }
843 host->vdd_current = ios->vdd;
844 }
845 if (host->power_mode_current == MMC_POWER_OFF) {
846 glamo_engine_enable(glamo_mci_def_pdata.pglamo,
847 GLAMO_ENGINE_MMC);
848 powering = 1;
849 }
850 break;
851
852 case MMC_POWER_OFF:
853 default:
854 if (host->power_mode_current == MMC_POWER_OFF)
855 break;
856 /* never want clocking with dead card */
857 __glamo_mci_fix_card_div(host, -1);
858
859 glamo_engine_disable(glamo_mci_def_pdata.pglamo,
860 GLAMO_ENGINE_MMC);
861 regulator_disable(regulator);
862 host->vdd_current = -1;
863 break;
864 }
865 host->power_mode_current = ios->power_mode;
866
867 host->real_rate = __glamo_mci_set_card_clock(host, ios->clock, &div);
868 host->clk_div = div;
869
870 /* after power-up, we are meant to give it >= 74 clocks so it can
871 * initialize itself. Doubt any modern cards need it but anyway...
872 */
873 if (powering)
874 mdelay(1);
875
876 if (!sd_idleclk && !host->force_slow_during_powerup)
877 /* stop the clock to card, because we are idle until transfer */
878 __glamo_mci_fix_card_div(host, -1);
879
880 if ((ios->power_mode == MMC_POWER_ON) ||
881 (ios->power_mode == MMC_POWER_UP)) {
882 dev_info(&host->pdev->dev,
883 "powered (vdd = %d) clk: %lukHz div=%d (req: %ukHz). "
884 "Bus width=%d\n",(int)ios->vdd,
885 host->real_rate / 1000, (int)host->clk_div,
886 ios->clock / 1000, (int)ios->bus_width);
887 } else
888 dev_info(&host->pdev->dev, "glamo_mci_set_ios: power down.\n");
889
890 /* set bus width */
891 host->bus_width = ios->bus_width;
892 if (host->bus_width == MMC_BUS_WIDTH_4)
893 n = GLAMO_BASIC_MMC_EN_4BIT_DATA;
894 writew((readw(host->base + GLAMO_REG_MMC_BASIC) &
895 (~(GLAMO_BASIC_MMC_EN_4BIT_DATA |
896 GLAMO_BASIC_MMC_EN_DR_STR0 |
897 GLAMO_BASIC_MMC_EN_DR_STR1))) | n |
898 sd_drive << 6, host->base + GLAMO_REG_MMC_BASIC);
899 }
900
901
902 /*
903 * no physical write protect supported by us
904 */
905 static int glamo_mci_get_ro(struct mmc_host *mmc)
906 {
907 return 0;
908 }
909
910 static struct mmc_host_ops glamo_mci_ops = {
911 .request = glamo_mci_request,
912 .set_ios = glamo_mci_set_ios,
913 .get_ro = glamo_mci_get_ro,
914 };
915
916 static int glamo_mci_probe(struct platform_device *pdev)
917 {
918 struct mmc_host *mmc;
919 struct glamo_mci_host *host;
920 int ret;
921
922 dev_info(&pdev->dev, "glamo_mci driver (C)2007 Openmoko, Inc\n");
923
924 mmc = mmc_alloc_host(sizeof(struct glamo_mci_host), &pdev->dev);
925 if (!mmc) {
926 ret = -ENOMEM;
927 goto probe_out;
928 }
929
930 host = mmc_priv(mmc);
931 host->mmc = mmc;
932 host->pdev = pdev;
933 host->pdata = &glamo_mci_def_pdata;
934 host->power_mode_current = MMC_POWER_OFF;
935
936 host->complete_what = COMPLETION_NONE;
937 host->pio_active = XFER_NONE;
938
939 spin_lock_init(&host->complete_lock);
940 INIT_WORK(&host->irq_work, glamo_mci_irq_worker);
941
942 host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
943 if (!host->mem) {
944 dev_err(&pdev->dev,
945 "failed to get io memory region resouce.\n");
946
947 ret = -ENOENT;
948 goto probe_free_host;
949 }
950
951 host->mem = request_mem_region(host->mem->start,
952 RESSIZE(host->mem), pdev->name);
953
954 if (!host->mem) {
955 dev_err(&pdev->dev, "failed to request io memory region.\n");
956 ret = -ENOENT;
957 goto probe_free_host;
958 }
959
960 host->base = ioremap(host->mem->start, RESSIZE(host->mem));
961 if (!host->base) {
962 dev_err(&pdev->dev, "failed to ioremap() io memory region.\n");
963 ret = -EINVAL;
964 goto probe_free_mem_region;
965 }
966
967 host->regulator = regulator_get(&pdev->dev, "SD_3V3");
968 if (!host->regulator) {
969 dev_err(&pdev->dev, "Cannot proceed without regulator.\n");
970 return -ENODEV;
971 }
972
973 /* set the handler for our bit of the shared chip irq register */
974 set_irq_handler(IRQ_GLAMO(GLAMO_IRQIDX_MMC), glamo_mci_irq);
975 /* stash host as our handler's private data */
976 set_irq_data(IRQ_GLAMO(GLAMO_IRQIDX_MMC), host);
977
978 /* Get ahold of our data buffer we use for data in and out on MMC */
979 host->mem_data = platform_get_resource(pdev, IORESOURCE_MEM, 1);
980 if (!host->mem_data) {
981 dev_err(&pdev->dev,
982 "failed to get io memory region resource.\n");
983 ret = -ENOENT;
984 goto probe_iounmap;
985 }
986
987 host->mem_data = request_mem_region(host->mem_data->start,
988 RESSIZE(host->mem_data), pdev->name);
989
990 if (!host->mem_data) {
991 dev_err(&pdev->dev, "failed to request io memory region.\n");
992 ret = -ENOENT;
993 goto probe_iounmap;
994 }
995 host->base_data = ioremap(host->mem_data->start,
996 RESSIZE(host->mem_data));
997 host->data_max_size = RESSIZE(host->mem_data);
998
999 if (host->base_data == 0) {
1000 dev_err(&pdev->dev, "failed to ioremap() io memory region.\n");
1001 ret = -EINVAL;
1002 goto probe_free_mem_region_data;
1003 }
1004
1005 host->vdd_current = 0;
1006 host->clk_rate = 50000000; /* really it's 49152000 */
1007 host->clk_div = 16;
1008
1009 /* explain our host controller capabilities */
1010 mmc->ops = &glamo_mci_ops;
1011 mmc->ocr_avail = host->pdata->ocr_avail;
1012 mmc->caps = MMC_CAP_4_BIT_DATA |
1013 MMC_CAP_MMC_HIGHSPEED |
1014 MMC_CAP_SD_HIGHSPEED;
1015 mmc->f_min = host->clk_rate / 256;
1016 mmc->f_max = sd_max_clk;
1017
1018 mmc->max_blk_count = (1 << 16) - 1; /* GLAMO_REG_MMC_RB_BLKCNT */
1019 mmc->max_blk_size = (1 << 12) - 1; /* GLAMO_REG_MMC_RB_BLKLEN */
1020 mmc->max_req_size = RESSIZE(host->mem_data) / 2;
1021 mmc->max_seg_size = mmc->max_req_size;
1022 mmc->max_phys_segs = 1; /* hw doesn't talk about segs??? */
1023 mmc->max_hw_segs = 1;
1024
1025 dev_info(&host->pdev->dev, "probe: mapped mci_base:%p irq:%u.\n",
1026 host->base, host->irq);
1027
1028 platform_set_drvdata(pdev, mmc);
1029
1030 glamo_engine_enable(glamo_mci_def_pdata.pglamo, GLAMO_ENGINE_MMC);
1031 glamo_mci_reset(host);
1032
1033 if ((ret = mmc_add_host(mmc))) {
1034 dev_err(&pdev->dev, "failed to add mmc host.\n");
1035 goto probe_free_mem_region_data;
1036 }
1037
1038 dev_info(&pdev->dev,"initialisation done.\n");
1039 return 0;
1040
1041 probe_free_mem_region_data:
1042 release_mem_region(host->mem_data->start, RESSIZE(host->mem_data));
1043
1044 probe_iounmap:
1045 iounmap(host->base);
1046
1047 probe_free_mem_region:
1048 release_mem_region(host->mem->start, RESSIZE(host->mem));
1049
1050 probe_free_host:
1051 mmc_free_host(mmc);
1052 probe_out:
1053 return ret;
1054 }
1055
1056 static int glamo_mci_remove(struct platform_device *pdev)
1057 {
1058 struct mmc_host *mmc = platform_get_drvdata(pdev);
1059 struct glamo_mci_host *host = mmc_priv(mmc);
1060 struct regulator *regulator;
1061
1062 mmc_remove_host(mmc);
1063 /* stop using our handler, revert it to default */
1064 set_irq_handler(IRQ_GLAMO(GLAMO_IRQIDX_MMC), handle_level_irq);
1065 iounmap(host->base);
1066 iounmap(host->base_data);
1067 release_mem_region(host->mem->start, RESSIZE(host->mem));
1068 release_mem_region(host->mem_data->start, RESSIZE(host->mem_data));
1069
1070 regulator = host->regulator;
1071 regulator_put(regulator);
1072
1073 mmc_free_host(mmc);
1074
1075 glamo_engine_disable(glamo_mci_def_pdata.pglamo, GLAMO_ENGINE_MMC);
1076 return 0;
1077 }
1078
1079
1080 #ifdef CONFIG_PM
1081
1082 static int glamo_mci_suspend(struct platform_device *dev, pm_message_t state)
1083 {
1084 struct mmc_host *mmc = platform_get_drvdata(dev);
1085 struct glamo_mci_host *host = mmc_priv(mmc);
1086 int ret;
1087
1088 cancel_work_sync(&host->irq_work);
1089
1090 /*
1091 * possible workaround for SD corruption during suspend - resume
1092 * make sure the clock was running during suspend and consequently
1093 * resume
1094 */
1095 __glamo_mci_fix_card_div(host, host->clk_div);
1096
1097 /* we are going to do more commands to override this in
1098 * mmc_suspend_host(), so we need to change sd_idleclk for the
1099 * duration as well
1100 */
1101 suspend_sd_idleclk = sd_idleclk;
1102 sd_idleclk = 1;
1103
1104 ret = mmc_suspend_host(mmc, state);
1105
1106 host->suspending++;
1107 /* so that when we resume, we use any modified max rate */
1108 mmc->f_max = sd_max_clk;
1109
1110 return ret;
1111 }
1112
1113 int glamo_mci_resume(struct platform_device *dev)
1114 {
1115 struct mmc_host *mmc = platform_get_drvdata(dev);
1116 struct glamo_mci_host *host = mmc_priv(mmc);
1117 int ret;
1118
1119 sd_idleclk = 1;
1120
1121 glamo_engine_enable(host->pdata->pglamo, GLAMO_ENGINE_MMC);
1122 glamo_mci_reset(host);
1123
1124 host->suspending--;
1125
1126 ret = mmc_resume_host(mmc);
1127
1128 /* put sd_idleclk back to pre-suspend state */
1129 sd_idleclk = suspend_sd_idleclk;
1130
1131 return ret;
1132 }
1133 EXPORT_SYMBOL_GPL(glamo_mci_resume);
1134
1135 #else /* CONFIG_PM */
1136 #define glamo_mci_suspend NULL
1137 #define glamo_mci_resume NULL
1138 #endif /* CONFIG_PM */
1139
1140
1141 static struct platform_driver glamo_mci_driver =
1142 {
1143 .driver.name = "glamo-mci",
1144 .probe = glamo_mci_probe,
1145 .remove = glamo_mci_remove,
1146 .suspend = glamo_mci_suspend,
1147 .resume = glamo_mci_resume,
1148 };
1149
1150 static int __init glamo_mci_init(void)
1151 {
1152 spin_lock_init(&clock_lock);
1153 platform_driver_register(&glamo_mci_driver);
1154 return 0;
1155 }
1156
1157 static void __exit glamo_mci_exit(void)
1158 {
1159 platform_driver_unregister(&glamo_mci_driver);
1160 }
1161
1162 module_init(glamo_mci_init);
1163 module_exit(glamo_mci_exit);
1164
1165 MODULE_DESCRIPTION("Glamo MMC/SD Card Interface driver");
1166 MODULE_LICENSE("GPL");
1167 MODULE_AUTHOR("Andy Green <andy@openmoko.com>");
This page took 0.101021 seconds and 5 git commands to generate.