2 * linux/drivers/mmc/host/glamo-mmc.c - Glamo MMC driver
4 * Copyright (C) 2007 Openmoko, Inc, Andy Green <andy@openmoko.com>
5 * Based on S3C MMC driver that was:
6 * Copyright (C) 2004-2006 maintech GmbH, Thomas Kleffel <tk@maintech.de>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/mmc/mmc.h>
15 #include <linux/mmc/sd.h>
16 #include <linux/mmc/host.h>
17 #include <linux/platform_device.h>
18 #include <linux/irq.h>
19 #include <linux/delay.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22 #include <linux/workqueue.h>
23 #include <linux/crc7.h>
24 #include <linux/scatterlist.h>
26 #include <linux/mfd/glamo.h>
28 #include "glamo-mci.h"
29 #include "glamo-core.h"
30 #include "glamo-regs.h"
32 #define DRIVER_NAME "glamo-mci"
34 static void glamo_mci_send_request(struct mmc_host
*mmc
);
35 static void glamo_mci_send_command(struct glamo_mci_host
*host
,
36 struct mmc_command
*cmd
);
41 * held at /(3 + 1) due to concerns of 100R recommended series resistor
42 * allows 16MHz @ 4-bit --> 8MBytes/sec raw
44 * you can override this on kernel commandline using
46 * glamo_mci.sd_max_clk=10000000
51 static int sd_max_clk
= 50000000 / 3;
52 module_param(sd_max_clk
, int, 0644);
57 * you can override this on kernel commandline using
59 * glamo_mci.sd_slow_ratio=8
63 * platform callback is used to decide effective clock rate, if not
64 * defined then max is used, if defined and returns nonzero, rate is
65 * divided by this factor
68 static int sd_slow_ratio
= 8;
69 module_param(sd_slow_ratio
, int, 0644);
72 * Post-power SD clock rate
74 * you can override this on kernel commandline using
76 * glamo_mci.sd_post_power_clock=1000000
80 * After changing power to card, clock is held at this rate until first bulk
84 static int sd_post_power_clock
= 1000000;
85 module_param(sd_post_power_clock
, int, 0644);
89 * SD Signal drive strength
91 * you can override this on kernel commandline using
93 * glamo_mci.sd_drive=0
99 module_param(sd_drive
, int, 0644);
102 * SD allow SD clock to run while idle
104 * you can override this on kernel commandline using
106 * glamo_mci.sd_idleclk=0
111 static int sd_idleclk
= 0; /* disallow idle clock by default */
112 module_param(sd_idleclk
, int, 0644);
114 /* used to stash real idleclk state in suspend: we force it to run in there */
115 static int suspend_sd_idleclk
;
117 static inline void glamo_reg_write(struct glamo_mci_host
*glamo
,
118 u_int16_t reg
, u_int16_t val
)
120 writew(val
, glamo
->mmio_base
+ reg
);
123 static inline u_int16_t
glamo_reg_read(struct glamo_mci_host
*glamo
,
126 return readw(glamo
->mmio_base
+ reg
);
129 static void glamo_reg_set_bit_mask(struct glamo_mci_host
*glamo
,
130 u_int16_t reg
, u_int16_t mask
,
137 tmp
= glamo_reg_read(glamo
, reg
);
140 glamo_reg_write(glamo
, reg
, tmp
);
143 static void do_pio_read(struct glamo_mci_host
*host
)
145 struct scatterlist
*sg
;
146 u16 __iomem
*from_ptr
= host
->data_base
;
147 struct mmc_data
*data
= host
->mrq
->data
;
150 dev_dbg(&host
->pdev
->dev
, "pio_read():\n");
151 for (sg
= data
->sg
; sg
; sg
= sg_next(sg
)) {
152 sg_pointer
= page_address(sg_page(sg
)) + sg
->offset
;
155 memcpy(sg_pointer
, from_ptr
, sg
->length
);
156 from_ptr
+= sg
->length
>> 1;
158 data
->bytes_xfered
+= sg
->length
;
161 dev_dbg(&host
->pdev
->dev
, "pio_read(): "
162 "complete (no more data).\n");
165 static void do_pio_write(struct glamo_mci_host
*host
)
167 struct scatterlist
*sg
;
168 u16 __iomem
*to_ptr
= host
->data_base
;
169 struct mmc_data
*data
= host
->mrq
->data
;
172 dev_dbg(&host
->pdev
->dev
, "pio_write():\n");
173 for (sg
= data
->sg
; sg
; sg
= sg_next(sg
)) {
174 sg_pointer
= page_address(sg_page(sg
)) + sg
->offset
;
176 data
->bytes_xfered
+= sg
->length
;
178 memcpy(to_ptr
, sg_pointer
, sg
->length
);
179 to_ptr
+= sg
->length
>> 1;
182 dev_dbg(&host
->pdev
->dev
, "pio_write(): complete\n");
185 static void glamo_mci_fix_card_div(struct glamo_mci_host
*host
, int div
)
189 spin_lock_irqsave(&host
->pdata
->core
->lock
, flags
);
192 /* stop clock - remove clock from divider input */
193 writew(readw(host
->pdata
->core
->base
+
194 GLAMO_REG_CLOCK_GEN5_1
) & (~GLAMO_CLOCK_GEN51_EN_DIV_TCLK
),
195 host
->pdata
->core
->base
+ GLAMO_REG_CLOCK_GEN5_1
);
198 if (host
->force_slow_during_powerup
)
199 div
= host
->clk_rate
/ sd_post_power_clock
;
200 else if (host
->pdata
->glamo_mmc_use_slow
&&
201 host
->pdata
->glamo_mmc_use_slow())
202 div
= div
* sd_slow_ratio
;
207 * set the nearest prescaler factor
209 * register shared with SCLK divisor -- no chance of race because
210 * we don't use sensor interface
212 writew((readw(host
->pdata
->core
->base
+
213 GLAMO_REG_CLOCK_GEN8
) & 0xff00) | div
,
214 host
->pdata
->core
->base
+ GLAMO_REG_CLOCK_GEN8
);
215 /* enable clock to divider input */
216 writew(readw(host
->pdata
->core
->base
+
217 GLAMO_REG_CLOCK_GEN5_1
) | GLAMO_CLOCK_GEN51_EN_DIV_TCLK
,
218 host
->pdata
->core
->base
+ GLAMO_REG_CLOCK_GEN5_1
);
220 spin_unlock_irqrestore(&host
->pdata
->core
->lock
, flags
);
224 static int glamo_mci_set_card_clock(struct glamo_mci_host
*host
, int freq
)
231 for (div
= 0; div
< 255; div
++) {
232 real_rate
= host
->clk_rate
/ (div
+ 1);
233 if (real_rate
<= freq
)
238 glamo_mci_fix_card_div(host
, div
);
241 host
->clk_div
= 0xff;
243 if (!sd_idleclk
&& !host
->force_slow_during_powerup
)
245 glamo_mci_fix_card_div(host
, -1);
247 host
->real_rate
= real_rate
;
252 static void glamo_mci_irq_worker(struct work_struct
*work
)
254 struct glamo_mci_host
*host
=
255 container_of(work
, struct glamo_mci_host
, irq_work
);
256 struct mmc_command
*cmd
= host
->mrq
->cmd
;
258 if (cmd
->data
->flags
& MMC_DATA_READ
) {
262 /* issue STOP if we have been given one to use */
263 if (host
->mrq
->stop
) {
264 glamo_mci_send_command(host
, host
->mrq
->stop
);
267 if (!sd_idleclk
&& !host
->force_slow_during_powerup
)
269 glamo_mci_fix_card_div(host
, -1);
272 mmc_request_done(host
->mmc
, cmd
->mrq
);
275 static irqreturn_t
glamo_mci_irq(int irq
, void *devid
)
277 struct glamo_mci_host
*host
= (struct glamo_mci_host
*)devid
;
279 struct mmc_command
*cmd
;
282 if (host
->suspending
) { /* bad news, dangerous time */
283 dev_err(&host
->pdev
->dev
, "****glamo_mci_irq before resumed\n");
289 cmd
= host
->mrq
->cmd
;
293 spin_lock_irqsave(&host
->lock
, flags
);
295 status
= readw(host
->mmio_base
+ GLAMO_REG_MMC_RB_STAT1
);
296 dev_dbg(&host
->pdev
->dev
, "status = 0x%04x\n", status
);
298 /* we ignore a data timeout report if we are also told the data came */
299 if (status
& GLAMO_STAT1_MMC_RB_DRDY
)
300 status
&= ~GLAMO_STAT1_MMC_DTOUT
;
302 if (status
& (GLAMO_STAT1_MMC_RTOUT
|
303 GLAMO_STAT1_MMC_DTOUT
))
304 cmd
->error
= -ETIMEDOUT
;
305 if (status
& (GLAMO_STAT1_MMC_BWERR
|
306 GLAMO_STAT1_MMC_BRERR
))
307 cmd
->error
= -EILSEQ
;
309 dev_info(&host
->pdev
->dev
, "Error after cmd: 0x%x\n", status
);
314 * disable the initial slow start after first bulk transfer
316 if (host
->force_slow_during_powerup
)
317 host
->force_slow_during_powerup
--;
320 * we perform the memcpy out of Glamo memory outside of IRQ context
321 * so we don't block other interrupts
323 schedule_work(&host
->irq_work
);
329 mmc_request_done(host
->mmc
, cmd
->mrq
);
331 spin_unlock_irqrestore(&host
->lock
, flags
);
336 static void glamo_mci_send_command(struct glamo_mci_host
*host
,
337 struct mmc_command
*cmd
)
341 unsigned int timeout
= 1000000;
342 u16
* reg_resp
= (u16
*)(host
->mmio_base
+ GLAMO_REG_MMC_CMD_RSP1
);
345 /* if we can't do it, reject as busy */
346 if (!readw(host
->mmio_base
+ GLAMO_REG_MMC_RB_STAT1
) &
347 GLAMO_STAT1_MMC_IDLE
) {
350 mmc_request_done(host
->mmc
, host
->mrq
);
354 /* create an array in wire order for CRC computation */
355 u8a
[0] = 0x40 | (cmd
->opcode
& 0x3f);
356 u8a
[1] = (u8
)(cmd
->arg
>> 24);
357 u8a
[2] = (u8
)(cmd
->arg
>> 16);
358 u8a
[3] = (u8
)(cmd
->arg
>> 8);
359 u8a
[4] = (u8
)cmd
->arg
;
360 u8a
[5] = (crc7(0, u8a
, 5) << 1) | 0x01; /* crc7 on first 5 bytes of packet */
362 /* issue the wire-order array including CRC in register order */
363 writew((u8a
[4] << 8) | u8a
[5], host
->mmio_base
+ GLAMO_REG_MMC_CMD_REG1
);
364 writew((u8a
[2] << 8) | u8a
[3], host
->mmio_base
+ GLAMO_REG_MMC_CMD_REG2
);
365 writew((u8a
[0] << 8) | u8a
[1], host
->mmio_base
+ GLAMO_REG_MMC_CMD_REG3
);
367 /* command index toggle */
368 fire
|= (host
->request_counter
& 1) << 12;
370 /* set type of command */
371 switch (mmc_cmd_type(cmd
)) {
373 fire
|= GLAMO_FIRE_MMC_CMDT_BNR
;
376 fire
|= GLAMO_FIRE_MMC_CMDT_BR
;
379 fire
|= GLAMO_FIRE_MMC_CMDT_AND
;
382 fire
|= GLAMO_FIRE_MMC_CMDT_AD
;
386 * if it expects a response, set the type expected
388 * R1, Length : 48bit, Normal response
389 * R1b, Length : 48bit, same R1, but added card busy status
390 * R2, Length : 136bit (really 128 bits with CRC snipped)
391 * R3, Length : 48bit (OCR register value)
392 * R4, Length : 48bit, SDIO_OP_CONDITION, Reverse SDIO Card
393 * R5, Length : 48bit, IO_RW_DIRECTION, Reverse SDIO Card
394 * R6, Length : 48bit (RCA register)
395 * R7, Length : 48bit (interface condition, VHS(voltage supplied),
396 * check pattern, CRC7)
398 switch (mmc_resp_type(cmd
)) {
399 case MMC_RSP_R1
: /* same index as R6 and R7 */
400 fire
|= GLAMO_FIRE_MMC_RSPT_R1
;
403 fire
|= GLAMO_FIRE_MMC_RSPT_R1b
;
406 fire
|= GLAMO_FIRE_MMC_RSPT_R2
;
409 fire
|= GLAMO_FIRE_MMC_RSPT_R3
;
411 /* R4 and R5 supported by chip not defined in linux/mmc/core.h (sdio) */
414 * From the command index, set up the command class in the host ctrllr
416 * missing guys present on chip but couldn't figure out how to use yet:
418 * 0x9 "cancel running command"
420 switch (cmd
->opcode
) {
421 case MMC_READ_SINGLE_BLOCK
:
422 fire
|= GLAMO_FIRE_MMC_CC_SBR
; /* single block read */
424 case MMC_SWITCH
: /* 64 byte payload */
425 case SD_APP_SEND_SCR
:
426 case MMC_READ_MULTIPLE_BLOCK
:
427 /* we will get an interrupt off this */
429 /* multiblock no stop */
430 fire
|= GLAMO_FIRE_MMC_CC_MBRNS
;
432 /* multiblock with stop */
433 fire
|= GLAMO_FIRE_MMC_CC_MBRS
;
435 case MMC_WRITE_BLOCK
:
436 fire
|= GLAMO_FIRE_MMC_CC_SBW
; /* single block write */
438 case MMC_WRITE_MULTIPLE_BLOCK
:
440 /* multiblock with stop */
441 fire
|= GLAMO_FIRE_MMC_CC_MBWS
;
443 /* multiblock NO stop-- 'RESERVED'? */
444 fire
|= GLAMO_FIRE_MMC_CC_MBWNS
;
446 case MMC_STOP_TRANSMISSION
:
447 fire
|= GLAMO_FIRE_MMC_CC_STOP
; /* STOP */
450 fire
|= GLAMO_FIRE_MMC_CC_BASIC
; /* "basic command" */
453 /* always largest timeout */
454 writew(0xfff, host
->mmio_base
+ GLAMO_REG_MMC_TIMEOUT
);
456 /* Generate interrupt on txfer */
457 glamo_reg_set_bit_mask(host
, GLAMO_REG_MMC_BASIC
, ~0x3e,
458 0x0800 | GLAMO_BASIC_MMC_NO_CLK_RD_WAIT
|
459 GLAMO_BASIC_MMC_EN_COMPL_INT
| (sd_drive
<< 6));
461 /* send the command out on the wire */
462 /* dev_info(&host->pdev->dev, "Using FIRE %04X\n", fire); */
463 writew(fire
, host
->mmio_base
+ GLAMO_REG_MMC_CMD_FIRE
);
465 /* we are deselecting card? because it isn't going to ack then... */
466 if ((cmd
->opcode
== 7) && (cmd
->arg
== 0))
470 * we must spin until response is ready or timed out
471 * -- we don't get interrupts unless there is a bulk rx
475 status
= readw(host
->mmio_base
+ GLAMO_REG_MMC_RB_STAT1
);
476 while (((((status
>> 15) & 1) != (host
->request_counter
& 1)) ||
477 (!(status
& (GLAMO_STAT1_MMC_RB_RRDY
|
478 GLAMO_STAT1_MMC_RTOUT
|
479 GLAMO_STAT1_MMC_DTOUT
|
480 GLAMO_STAT1_MMC_BWERR
|
481 GLAMO_STAT1_MMC_BRERR
)))) && (timeout
--));
483 if ((status
& (GLAMO_STAT1_MMC_RTOUT
|
484 GLAMO_STAT1_MMC_DTOUT
)) ||
486 cmd
->error
= -ETIMEDOUT
;
487 } else if (status
& (GLAMO_STAT1_MMC_BWERR
|
488 GLAMO_STAT1_MMC_BRERR
)) {
489 cmd
->error
= -EILSEQ
;
492 if (cmd
->flags
& MMC_RSP_PRESENT
) {
493 if (cmd
->flags
& MMC_RSP_136
) {
494 cmd
->resp
[3] = readw(®_resp
[0]) |
495 (readw(®_resp
[1]) << 16);
496 cmd
->resp
[2] = readw(®_resp
[2]) |
497 (readw(®_resp
[3]) << 16);
498 cmd
->resp
[1] = readw(®_resp
[4]) |
499 (readw(®_resp
[5]) << 16);
500 cmd
->resp
[0] = readw(®_resp
[6]) |
501 (readw(®_resp
[7]) << 16);
503 cmd
->resp
[0] = (readw(®_resp
[0]) >> 8) |
504 (readw(®_resp
[1]) << 8) |
505 ((readw(®_resp
[2])) << 24);
510 static int glamo_mci_prepare_pio(struct glamo_mci_host
*host
,
511 struct mmc_data
*data
)
513 /* set up the block info */
514 writew(data
->blksz
, host
->mmio_base
+ GLAMO_REG_MMC_DATBLKLEN
);
515 writew(data
->blocks
, host
->mmio_base
+ GLAMO_REG_MMC_DATBLKCNT
);
516 dev_dbg(&host
->pdev
->dev
, "(blksz=%d, count=%d)\n",
517 data
->blksz
, data
->blocks
);
518 data
->bytes_xfered
= 0;
520 /* if write, prep the write into the shared RAM before the command */
521 if (data
->flags
& MMC_DATA_WRITE
) {
527 static void glamo_mci_send_request(struct mmc_host
*mmc
)
529 struct glamo_mci_host
*host
= mmc_priv(mmc
);
530 struct mmc_request
*mrq
= host
->mrq
;
531 struct mmc_command
*cmd
= mrq
->cmd
;
532 int timeout
= 1000000;
534 host
->request_counter
++;
535 /* this guy has data to read/write? */
537 if(glamo_mci_prepare_pio(host
, cmd
->data
)) {
538 cmd
->data
->error
= -EIO
;
543 dev_dbg(&host
->pdev
->dev
,"cmd 0x%x, "
544 "arg 0x%x data=%p mrq->stop=%p flags 0x%x\n",
545 cmd
->opcode
, cmd
->arg
, cmd
->data
, cmd
->mrq
->stop
,
548 /* resume requested clock rate
549 * scale it down by sd_slow_ratio if platform requests it
551 glamo_mci_fix_card_div(host
, host
->clk_div
);
553 glamo_mci_send_command(host
, cmd
);
556 * if we don't have bulk data to take care of, we're done
558 if (!cmd
->data
|| cmd
->error
)
562 * Otherwise can can use the interrupt as async completion --
563 * if there is read data coming, or we wait for write data to complete,
564 * exit without mmc_request_done() as the payload interrupt
567 dev_dbg(&host
->pdev
->dev
, "Waiting for payload data\n");
569 * if the glamo INT# line isn't wired (*cough* it can happen)
570 * I'm afraid we have to spin on the IRQ status bit and "be
573 if (!host
->pdata
->core
->irq_works
) {
575 * we have faith we will get an "interrupt"...
576 * but something insane like suspend problems can mean
577 * we spin here forever, so we timeout after a LONG time
579 while ((!(readw(host
->pdata
->core
->base
+
580 GLAMO_REG_IRQ_STATUS
) & GLAMO_IRQ_MMC
)) &&
584 if (cmd
->data
->error
)
585 cmd
->data
->error
= -ETIMEDOUT
;
586 dev_err(&host
->pdev
->dev
, "Payload timeout\n");
589 /* ack this interrupt source */
590 writew(GLAMO_IRQ_MMC
, host
->pdata
->core
->base
+
591 GLAMO_REG_IRQ_CLEAR
);
593 /* yay we are an interrupt controller! -- call the ISR
594 * it will stop clock to card
596 glamo_mci_irq(IRQ_GLAMO(GLAMO_IRQIDX_MMC
), host
);
601 mmc_request_done(host
->mmc
, cmd
->mrq
);
603 if (!sd_idleclk
&& !host
->force_slow_during_powerup
)
604 /* stop the clock to card */
605 glamo_mci_fix_card_div(host
, -1);
608 static void glamo_mci_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
610 struct glamo_mci_host
*host
= mmc_priv(mmc
);
613 glamo_mci_send_request(mmc
);
616 static void glamo_mci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
618 struct glamo_mci_host
*host
= mmc_priv(mmc
);
623 if (host
->suspending
) {
624 dev_err(&host
->pdev
->dev
, "IGNORING glamo_mci_set_ios while "
630 switch(ios
->power_mode
) {
632 ret
= regulator_enable(host
->regulator
);
634 dev_err(&host
->pdev
->dev
, "Failed to enable regulator: %d\n", ret
);
638 * we should use very slow clock until first bulk
639 * transfer completes OK
641 host
->force_slow_during_powerup
= 1;
643 if (host
->power_mode_current
== MMC_POWER_OFF
) {
644 glamo_engine_enable(host
->pdata
->core
,
652 if (host
->power_mode_current
== MMC_POWER_OFF
)
654 /* never want clocking with dead card */
655 glamo_mci_fix_card_div(host
, -1);
657 glamo_engine_disable(host
->pdata
->core
,
660 ret
= regulator_disable(host
->regulator
);
662 dev_warn(&host
->pdev
->dev
, "Failed to disable regulator: %d\n", ret
);
665 host
->power_mode_current
= ios
->power_mode
;
667 if (host
->vdd_current
!= ios
->vdd
) {
668 ret
= mmc_regulator_set_ocr(host
->regulator
, ios
->vdd
);
670 dev_err(&host
->pdev
->dev
, "Failed to set regulator voltage: %d\n", ret
);
672 host
->vdd_current
= ios
->vdd
;
674 glamo_mci_set_card_clock(host
, ios
->clock
);
676 /* after power-up, we are meant to give it >= 74 clocks so it can
677 * initialize itself. Doubt any modern cards need it but anyway...
682 if (!sd_idleclk
&& !host
->force_slow_during_powerup
)
683 /* stop the clock to card, because we are idle until transfer */
684 glamo_mci_fix_card_div(host
, -1);
686 if ((ios
->power_mode
== MMC_POWER_ON
) ||
687 (ios
->power_mode
== MMC_POWER_UP
)) {
688 dev_info(&host
->pdev
->dev
,
689 "powered (vdd = %d) clk: %lukHz div=%d (req: %ukHz). "
690 "Bus width=%d\n",(int)ios
->vdd
,
691 host
->real_rate
/ 1000, (int)host
->clk_div
,
692 ios
->clock
/ 1000, (int)ios
->bus_width
);
694 dev_info(&host
->pdev
->dev
, "glamo_mci_set_ios: power down.\n");
697 if (ios
->bus_width
== MMC_BUS_WIDTH_4
)
698 bus_width
= GLAMO_BASIC_MMC_EN_4BIT_DATA
;
699 glamo_reg_set_bit_mask(host
, GLAMO_REG_MMC_BASIC
,
700 GLAMO_BASIC_MMC_EN_4BIT_DATA
|
701 GLAMO_BASIC_MMC_EN_DR_STR0
|
702 GLAMO_BASIC_MMC_EN_DR_STR1
,
703 bus_width
| sd_drive
<< 6);
708 * no physical write protect supported by us
710 static int glamo_mci_get_ro(struct mmc_host
*mmc
)
715 static struct mmc_host_ops glamo_mci_ops
= {
716 .request
= glamo_mci_request
,
717 .set_ios
= glamo_mci_set_ios
,
718 .get_ro
= glamo_mci_get_ro
,
721 static int glamo_mci_probe(struct platform_device
*pdev
)
723 struct mmc_host
*mmc
;
724 struct glamo_mci_host
*host
;
727 dev_info(&pdev
->dev
, "glamo_mci driver (C)2007 Openmoko, Inc\n");
729 mmc
= mmc_alloc_host(sizeof(struct glamo_mci_host
), &pdev
->dev
);
735 host
= mmc_priv(mmc
);
738 host
->pdata
= pdev
->dev
.platform_data
;
739 host
->power_mode_current
= MMC_POWER_OFF
;
741 spin_lock_init(&host
->lock
);
742 INIT_WORK(&host
->irq_work
, glamo_mci_irq_worker
);
744 host
->regulator
= regulator_get(pdev
->dev
.parent
, "SD_3V3");
745 if (!host
->regulator
) {
746 dev_err(&pdev
->dev
, "Cannot proceed without regulator.\n");
748 goto probe_free_host
;
751 host
->mmio_mem
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
752 if (!host
->mmio_mem
) {
754 "failed to get io memory region resouce.\n");
756 goto probe_regulator_put
;
759 host
->mmio_mem
= request_mem_region(host
->mmio_mem
->start
,
760 resource_size(host
->mmio_mem
),
763 if (!host
->mmio_mem
) {
764 dev_err(&pdev
->dev
, "failed to request io memory region.\n");
766 goto probe_regulator_put
;
769 host
->mmio_base
= ioremap(host
->mmio_mem
->start
,
770 resource_size(host
->mmio_mem
));
771 if (!host
->mmio_base
) {
772 dev_err(&pdev
->dev
, "failed to ioremap() io memory region.\n");
774 goto probe_free_mem_region_mmio
;
778 /* Get ahold of our data buffer we use for data in and out on MMC */
779 host
->data_mem
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
780 if (!host
->data_mem
) {
782 "failed to get io memory region resource.\n");
784 goto probe_iounmap_mmio
;
787 host
->data_mem
= request_mem_region(host
->data_mem
->start
,
788 resource_size(host
->data_mem
),
791 if (!host
->data_mem
) {
792 dev_err(&pdev
->dev
, "failed to request io memory region.\n");
794 goto probe_iounmap_mmio
;
796 host
->data_base
= ioremap(host
->data_mem
->start
,
797 resource_size(host
->data_mem
));
799 if (host
->data_base
== 0) {
800 dev_err(&pdev
->dev
, "failed to ioremap() io memory region.\n");
802 goto probe_free_mem_region_data
;
805 ret
= request_irq(IRQ_GLAMO(GLAMO_IRQIDX_MMC
), glamo_mci_irq
, IRQF_SHARED
,
808 dev_err(&pdev
->dev
, "failed to register irq.\n");
809 goto probe_iounmap_data
;
813 host
->vdd_current
= 0;
814 host
->clk_rate
= 50000000; /* really it's 49152000 */
817 /* explain our host controller capabilities */
818 mmc
->ops
= &glamo_mci_ops
;
819 mmc
->ocr_avail
= mmc_regulator_get_ocrmask(host
->regulator
);
820 mmc
->caps
= MMC_CAP_4_BIT_DATA
|
821 MMC_CAP_MMC_HIGHSPEED
|
822 MMC_CAP_SD_HIGHSPEED
;
823 mmc
->f_min
= host
->clk_rate
/ 256;
824 mmc
->f_max
= sd_max_clk
;
826 mmc
->max_blk_count
= (1 << 16) - 1; /* GLAMO_REG_MMC_RB_BLKCNT */
827 mmc
->max_blk_size
= (1 << 12) - 1; /* GLAMO_REG_MMC_RB_BLKLEN */
828 mmc
->max_req_size
= resource_size(host
->data_mem
);
829 mmc
->max_seg_size
= mmc
->max_req_size
;
830 mmc
->max_phys_segs
= 128;
831 mmc
->max_hw_segs
= 128;
833 if (mmc
->ocr_avail
< 0) {
834 dev_warn(&pdev
->dev
, "Failed to get ocr list for regulator: %d.\n",
836 mmc
->ocr_avail
= MMC_VDD_32_33
| MMC_VDD_33_34
;
839 platform_set_drvdata(pdev
, mmc
);
841 glamo_engine_enable(host
->pdata
->core
, GLAMO_ENGINE_MMC
);
842 glamo_engine_reset(host
->pdata
->core
, GLAMO_ENGINE_MMC
);
844 if ((ret
= mmc_add_host(mmc
))) {
845 dev_err(&pdev
->dev
, "failed to add mmc host.\n");
849 writew((u16
)(host
->data_mem
->start
),
850 host
->mmio_base
+ GLAMO_REG_MMC_WDATADS1
);
851 writew((u16
)((host
->data_mem
->start
) >> 16),
852 host
->mmio_base
+ GLAMO_REG_MMC_WDATADS2
);
854 writew((u16
)host
->data_mem
->start
, host
->mmio_base
+
855 GLAMO_REG_MMC_RDATADS1
);
856 writew((u16
)(host
->data_mem
->start
>> 16), host
->mmio_base
+
857 GLAMO_REG_MMC_RDATADS2
);
859 dev_info(&pdev
->dev
,"initialisation done.\n");
863 free_irq(IRQ_GLAMO(GLAMO_IRQIDX_MMC
), host
);
865 iounmap(host
->data_base
);
866 probe_free_mem_region_data
:
867 release_mem_region(host
->data_mem
->start
, resource_size(host
->data_mem
));
869 iounmap(host
->mmio_base
);
870 probe_free_mem_region_mmio
:
871 release_mem_region(host
->mmio_mem
->start
, resource_size(host
->mmio_mem
));
873 regulator_put(host
->regulator
);
880 static int glamo_mci_remove(struct platform_device
*pdev
)
882 struct mmc_host
*mmc
= platform_get_drvdata(pdev
);
883 struct glamo_mci_host
*host
= mmc_priv(mmc
);
885 free_irq(IRQ_GLAMO(GLAMO_IRQIDX_MMC
), host
);
887 mmc_remove_host(mmc
);
888 iounmap(host
->mmio_base
);
889 iounmap(host
->data_base
);
890 release_mem_region(host
->mmio_mem
->start
, resource_size(host
->mmio_mem
));
891 release_mem_region(host
->data_mem
->start
, resource_size(host
->data_mem
));
893 regulator_put(host
->regulator
);
897 glamo_engine_disable(host
->pdata
->core
, GLAMO_ENGINE_MMC
);
904 static int glamo_mci_suspend(struct platform_device
*dev
, pm_message_t state
)
906 struct mmc_host
*mmc
= platform_get_drvdata(dev
);
907 struct glamo_mci_host
*host
= mmc_priv(mmc
);
910 cancel_work_sync(&host
->irq_work
);
913 * possible workaround for SD corruption during suspend - resume
914 * make sure the clock was running during suspend and consequently
917 glamo_mci_fix_card_div(host
, host
->clk_div
);
919 /* we are going to do more commands to override this in
920 * mmc_suspend_host(), so we need to change sd_idleclk for the
923 suspend_sd_idleclk
= sd_idleclk
;
926 ret
= mmc_suspend_host(mmc
, state
);
933 int glamo_mci_resume(struct platform_device
*dev
)
935 struct mmc_host
*mmc
= platform_get_drvdata(dev
);
936 struct glamo_mci_host
*host
= mmc_priv(mmc
);
941 glamo_engine_enable(host
->pdata
->core
, GLAMO_ENGINE_MMC
);
942 glamo_engine_reset(host
->pdata
->core
, GLAMO_ENGINE_MMC
);
946 ret
= mmc_resume_host(mmc
);
948 /* put sd_idleclk back to pre-suspend state */
949 sd_idleclk
= suspend_sd_idleclk
;
953 EXPORT_SYMBOL_GPL(glamo_mci_resume
);
955 #else /* CONFIG_PM */
956 #define glamo_mci_suspend NULL
957 #define glamo_mci_resume NULL
958 #endif /* CONFIG_PM */
961 static struct platform_driver glamo_mci_driver
=
963 .driver
.name
= "glamo-mci",
964 .probe
= glamo_mci_probe
,
965 .remove
= glamo_mci_remove
,
966 .suspend
= glamo_mci_suspend
,
967 .resume
= glamo_mci_resume
,
970 static int __init
glamo_mci_init(void)
972 platform_driver_register(&glamo_mci_driver
);
976 static void __exit
glamo_mci_exit(void)
978 platform_driver_unregister(&glamo_mci_driver
);
981 module_init(glamo_mci_init
);
982 module_exit(glamo_mci_exit
);
984 MODULE_DESCRIPTION("Glamo MMC/SD Card Interface driver");
985 MODULE_LICENSE("GPL");
986 MODULE_AUTHOR("Andy Green <andy@openmoko.com>");