2 * linux/drivers/mmc/host/glamo-mmc.c - Glamo MMC driver
4 * Copyright (C) 2007 Openmoko, Inc, Andy Green <andy@openmoko.com>
5 * Based on S3C MMC driver that was:
6 * Copyright (C) 2004-2006 maintech GmbH, Thomas Kleffel <tk@maintech.de>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/mmc/mmc.h>
15 #include <linux/mmc/sd.h>
16 #include <linux/mmc/host.h>
17 #include <linux/platform_device.h>
18 #include <linux/irq.h>
19 #include <linux/delay.h>
20 #include <linux/interrupt.h>
21 #include <linux/workqueue.h>
22 #include <linux/crc7.h>
23 #include <linux/scatterlist.h>
25 #include <linux/regulator/consumer.h>
26 #include <linux/mfd/glamo.h>
28 #include "glamo-core.h"
29 #include "glamo-regs.h"
31 #define DRIVER_NAME "glamo-mci"
33 struct glamo_mci_host
{
34 struct glamo_mmc_platform_data
*pdata
;
35 struct platform_device
*pdev
;
36 struct glamo_core
*core
;
38 struct resource
*mmio_mem
;
39 struct resource
*data_mem
;
40 void __iomem
*mmio_base
;
41 u16 __iomem
*data_base
;
43 struct regulator
*regulator
;
44 struct mmc_request
*mrq
;
46 unsigned int clk_rate
;
51 unsigned char request_counter
;
53 struct timer_list disable_timer
;
55 struct work_struct irq_work
;
56 struct work_struct read_work
;
58 unsigned clk_enabled
: 1;
62 static void glamo_mci_send_request(struct mmc_host
*mmc
, struct mmc_request
* mrq
);
63 static void glamo_mci_send_command(struct glamo_mci_host
*host
,
64 struct mmc_command
*cmd
);
69 * held at /(3 + 1) due to concerns of 100R recommended series resistor
70 * allows 16MHz @ 4-bit --> 8MBytes/sec raw
72 * you can override this on kernel commandline using
74 * glamo_mci.sd_max_clk=10000000
79 static int sd_max_clk
= 21000000;
80 module_param(sd_max_clk
, int, 0644);
85 * you can override this on kernel commandline using
87 * glamo_mci.sd_slow_ratio=8
91 * platform callback is used to decide effective clock rate, if not
92 * defined then max is used, if defined and returns nonzero, rate is
93 * divided by this factor
96 static int sd_slow_ratio
= 8;
97 module_param(sd_slow_ratio
, int, 0644);
100 * Post-power SD clock rate
102 * you can override this on kernel commandline using
104 * glamo_mci.sd_post_power_clock=1000000
108 * After changing power to card, clock is held at this rate until first bulk
112 static int sd_post_power_clock
= 1000000;
113 module_param(sd_post_power_clock
, int, 0644);
116 static inline void glamo_reg_write(struct glamo_mci_host
*glamo
,
117 u_int16_t reg
, u_int16_t val
)
119 writew(val
, glamo
->mmio_base
+ reg
);
122 static inline u_int16_t
glamo_reg_read(struct glamo_mci_host
*glamo
,
125 return readw(glamo
->mmio_base
+ reg
);
128 static void glamo_reg_set_bit_mask(struct glamo_mci_host
*glamo
,
129 u_int16_t reg
, u_int16_t mask
,
136 tmp
= glamo_reg_read(glamo
, reg
);
139 glamo_reg_write(glamo
, reg
, tmp
);
142 static void glamo_mci_clock_disable(struct glamo_mci_host
*host
) {
143 if (host
->clk_enabled
) {
144 glamo_engine_div_disable(host
->core
, GLAMO_ENGINE_MMC
);
145 host
->clk_enabled
= 0;
149 static void glamo_mci_clock_enable(struct glamo_mci_host
*host
) {
150 del_timer_sync(&host
->disable_timer
);
152 if (!host
->clk_enabled
) {
153 glamo_engine_div_enable(host
->core
, GLAMO_ENGINE_MMC
);
154 host
->clk_enabled
= 1;
158 static void glamo_mci_disable_timer(unsigned long data
) {
159 struct glamo_mci_host
*host
= (struct glamo_mci_host
*)data
;
160 glamo_mci_clock_disable(host
);
164 static void do_pio_read(struct glamo_mci_host
*host
, struct mmc_data
*data
)
166 struct scatterlist
*sg
;
167 u16 __iomem
*from_ptr
= host
->data_base
;
170 dev_dbg(&host
->pdev
->dev
, "pio_read():\n");
171 for (sg
= data
->sg
; sg
; sg
= sg_next(sg
)) {
172 sg_pointer
= page_address(sg_page(sg
)) + sg
->offset
;
175 memcpy(sg_pointer
, from_ptr
, sg
->length
);
176 from_ptr
+= sg
->length
>> 1;
178 data
->bytes_xfered
+= sg
->length
;
181 dev_dbg(&host
->pdev
->dev
, "pio_read(): "
182 "complete (no more data).\n");
185 static void do_pio_write(struct glamo_mci_host
*host
, struct mmc_data
*data
)
187 struct scatterlist
*sg
;
188 u16 __iomem
*to_ptr
= host
->data_base
;
191 dev_dbg(&host
->pdev
->dev
, "pio_write():\n");
192 for (sg
= data
->sg
; sg
; sg
= sg_next(sg
)) {
193 sg_pointer
= page_address(sg_page(sg
)) + sg
->offset
;
195 data
->bytes_xfered
+= sg
->length
;
197 memcpy(to_ptr
, sg_pointer
, sg
->length
);
198 to_ptr
+= sg
->length
>> 1;
201 dev_dbg(&host
->pdev
->dev
, "pio_write(): complete\n");
204 static int glamo_mci_set_card_clock(struct glamo_mci_host
*host
, int freq
)
209 glamo_mci_clock_enable(host
);
210 real_rate
= glamo_engine_reclock(host
->core
, GLAMO_ENGINE_MMC
, freq
);
212 glamo_mci_clock_disable(host
);
218 static void glamo_mci_request_done(struct glamo_mci_host
*host
, struct
220 mod_timer(&host
->disable_timer
, jiffies
+ HZ
/ 16);
221 mmc_request_done(host
->mmc
, mrq
);
225 static void glamo_mci_irq_worker(struct work_struct
*work
)
227 struct glamo_mci_host
*host
= container_of(work
, struct glamo_mci_host
,
229 struct mmc_command
*cmd
;
231 if (!host
->mrq
|| !host
->mrq
->cmd
)
234 cmd
= host
->mrq
->cmd
;
237 if (cmd
->data
->flags
& MMC_DATA_READ
) {
242 status
= glamo_reg_read(host
, GLAMO_REG_MMC_RB_STAT1
);
243 dev_dbg(&host
->pdev
->dev
, "status = 0x%04x\n", status
);
245 /* we ignore a data timeout report if we are also told the data came */
246 if (status
& GLAMO_STAT1_MMC_RB_DRDY
)
247 status
&= ~GLAMO_STAT1_MMC_DTOUT
;
249 if (status
& (GLAMO_STAT1_MMC_RTOUT
| GLAMO_STAT1_MMC_DTOUT
))
250 cmd
->error
= -ETIMEDOUT
;
251 if (status
& (GLAMO_STAT1_MMC_BWERR
| GLAMO_STAT1_MMC_BRERR
)) {
252 cmd
->error
= -EILSEQ
;
255 dev_info(&host
->pdev
->dev
, "Error after cmd: 0x%x\n", status
);
259 /* issue STOP if we have been given one to use */
260 if (host
->mrq
->stop
) {
261 glamo_mci_send_command(host
, host
->mrq
->stop
);
264 if (cmd
->data
->flags
& MMC_DATA_READ
)
265 do_pio_read(host
, cmd
->data
);
269 glamo_mci_request_done(host
, cmd
->mrq
);
272 static void glamo_mci_read_worker(struct work_struct
*work
)
274 struct glamo_mci_host
*host
= container_of(work
, struct glamo_mci_host
,
276 struct mmc_command
*cmd
;
278 uint16_t blocks_ready
;
279 size_t data_read
= 0;
281 struct scatterlist
*sg
;
282 u16 __iomem
*from_ptr
= host
->data_base
;
286 cmd
= host
->mrq
->cmd
;
289 status
= glamo_reg_read(host
, GLAMO_REG_MMC_RB_STAT1
);
291 if (status
& (GLAMO_STAT1_MMC_RTOUT
| GLAMO_STAT1_MMC_DTOUT
))
292 cmd
->error
= -ETIMEDOUT
;
293 if (status
& (GLAMO_STAT1_MMC_BWERR
| GLAMO_STAT1_MMC_BRERR
))
294 cmd
->error
= -EILSEQ
;
296 dev_info(&host
->pdev
->dev
, "Error after cmd: 0x%x\n", status
);
300 blocks_ready
= glamo_reg_read(host
, GLAMO_REG_MMC_RB_BLKCNT
);
301 data_ready
= blocks_ready
* cmd
->data
->blksz
;
303 if (data_ready
== data_read
)
306 while(sg
&& data_read
+ sg
->length
<= data_ready
) {
307 sg_pointer
= page_address(sg_page(sg
)) + sg
->offset
;
308 memcpy(sg_pointer
, from_ptr
, sg
->length
);
309 from_ptr
+= sg
->length
>> 1;
311 data_read
+= sg
->length
;
316 cmd
->data
->bytes_xfered
= data_read
;
319 status
= glamo_reg_read(host
, GLAMO_REG_MMC_RB_STAT1
);
320 } while (!(status
& GLAMO_STAT1_MMC_IDLE
));
323 glamo_mci_send_command(host
, host
->mrq
->stop
);
326 status
= glamo_reg_read(host
, GLAMO_REG_MMC_RB_STAT1
);
327 } while (!(status
& GLAMO_STAT1_MMC_IDLE
));
330 glamo_mci_request_done(host
, cmd
->mrq
);
333 static irqreturn_t
glamo_mci_irq(int irq
, void *devid
)
335 struct glamo_mci_host
*host
= (struct glamo_mci_host
*)devid
;
336 schedule_work(&host
->irq_work
);
341 static void glamo_mci_send_command(struct glamo_mci_host
*host
,
342 struct mmc_command
*cmd
)
346 unsigned int timeout
= 1000000;
347 u16
* reg_resp
= (u16
*)(host
->mmio_base
+ GLAMO_REG_MMC_CMD_RSP1
);
349 int triggers_int
= 1;
351 /* if we can't do it, reject as busy */
352 if (!glamo_reg_read(host
, GLAMO_REG_MMC_RB_STAT1
) &
353 GLAMO_STAT1_MMC_IDLE
) {
358 /* create an array in wire order for CRC computation */
359 u8a
[0] = 0x40 | (cmd
->opcode
& 0x3f);
360 u8a
[1] = (u8
)(cmd
->arg
>> 24);
361 u8a
[2] = (u8
)(cmd
->arg
>> 16);
362 u8a
[3] = (u8
)(cmd
->arg
>> 8);
363 u8a
[4] = (u8
)cmd
->arg
;
364 u8a
[5] = (crc7(0, u8a
, 5) << 1) | 0x01; /* crc7 on first 5 bytes of packet */
366 /* issue the wire-order array including CRC in register order */
367 glamo_reg_write(host
, GLAMO_REG_MMC_CMD_REG1
, ((u8a
[4] << 8) | u8a
[5]));
368 glamo_reg_write(host
, GLAMO_REG_MMC_CMD_REG2
, ((u8a
[2] << 8) | u8a
[3]));
369 glamo_reg_write(host
, GLAMO_REG_MMC_CMD_REG3
, ((u8a
[0] << 8) | u8a
[1]));
371 /* command index toggle */
372 fire
|= (host
->request_counter
& 1) << 12;
374 /* set type of command */
375 switch (mmc_cmd_type(cmd
)) {
377 fire
|= GLAMO_FIRE_MMC_CMDT_BNR
;
380 fire
|= GLAMO_FIRE_MMC_CMDT_BR
;
383 fire
|= GLAMO_FIRE_MMC_CMDT_AND
;
386 fire
|= GLAMO_FIRE_MMC_CMDT_AD
;
390 * if it expects a response, set the type expected
392 * R1, Length : 48bit, Normal response
393 * R1b, Length : 48bit, same R1, but added card busy status
394 * R2, Length : 136bit (really 128 bits with CRC snipped)
395 * R3, Length : 48bit (OCR register value)
396 * R4, Length : 48bit, SDIO_OP_CONDITION, Reverse SDIO Card
397 * R5, Length : 48bit, IO_RW_DIRECTION, Reverse SDIO Card
398 * R6, Length : 48bit (RCA register)
399 * R7, Length : 48bit (interface condition, VHS(voltage supplied),
400 * check pattern, CRC7)
402 switch (mmc_resp_type(cmd
)) {
403 case MMC_RSP_R1
: /* same index as R6 and R7 */
404 fire
|= GLAMO_FIRE_MMC_RSPT_R1
;
407 fire
|= GLAMO_FIRE_MMC_RSPT_R1b
;
410 fire
|= GLAMO_FIRE_MMC_RSPT_R2
;
413 fire
|= GLAMO_FIRE_MMC_RSPT_R3
;
415 /* R4 and R5 supported by chip not defined in linux/mmc/core.h (sdio) */
418 * From the command index, set up the command class in the host ctrllr
420 * missing guys present on chip but couldn't figure out how to use yet:
422 * 0x9 "cancel running command"
424 switch (cmd
->opcode
) {
425 case MMC_READ_SINGLE_BLOCK
:
426 fire
|= GLAMO_FIRE_MMC_CC_SBR
; /* single block read */
428 case MMC_SWITCH
: /* 64 byte payload */
429 case SD_APP_SEND_SCR
:
430 case MMC_READ_MULTIPLE_BLOCK
:
431 /* we will get an interrupt off this */
433 /* multiblock no stop */
434 fire
|= GLAMO_FIRE_MMC_CC_MBRNS
;
436 /* multiblock with stop */
437 fire
|= GLAMO_FIRE_MMC_CC_MBRS
;
439 case MMC_WRITE_BLOCK
:
440 fire
|= GLAMO_FIRE_MMC_CC_SBW
; /* single block write */
442 case MMC_WRITE_MULTIPLE_BLOCK
:
444 /* multiblock with stop */
445 fire
|= GLAMO_FIRE_MMC_CC_MBWS
;
447 /* multiblock NO stop-- 'RESERVED'? */
448 fire
|= GLAMO_FIRE_MMC_CC_MBWNS
;
450 case MMC_STOP_TRANSMISSION
:
451 fire
|= GLAMO_FIRE_MMC_CC_STOP
; /* STOP */
455 fire
|= GLAMO_FIRE_MMC_CC_BASIC
; /* "basic command" */
461 host
->mrq
= cmd
->mrq
;
463 /* always largest timeout */
464 glamo_reg_write(host
, GLAMO_REG_MMC_TIMEOUT
, 0xfff);
466 /* Generate interrupt on txfer */
467 glamo_reg_set_bit_mask(host
, GLAMO_REG_MMC_BASIC
, 0xff36,
469 GLAMO_BASIC_MMC_NO_CLK_RD_WAIT
|
470 GLAMO_BASIC_MMC_EN_COMPL_INT
|
471 GLAMO_BASIC_MMC_EN_DATA_PUPS
|
472 GLAMO_BASIC_MMC_EN_CMD_PUP
);
474 /* send the command out on the wire */
475 /* dev_info(&host->pdev->dev, "Using FIRE %04X\n", fire); */
476 glamo_reg_write(host
, GLAMO_REG_MMC_CMD_FIRE
, fire
);
478 /* we are deselecting card? because it isn't going to ack then... */
479 if ((cmd
->opcode
== 7) && (cmd
->arg
== 0))
483 * we must spin until response is ready or timed out
484 * -- we don't get interrupts unless there is a bulk rx
487 status
= glamo_reg_read(host
, GLAMO_REG_MMC_RB_STAT1
);
488 while (((((status
>> 15) & 1) != (host
->request_counter
& 1)) ||
489 (!(status
& (GLAMO_STAT1_MMC_RB_RRDY
|
490 GLAMO_STAT1_MMC_RTOUT
|
491 GLAMO_STAT1_MMC_DTOUT
|
492 GLAMO_STAT1_MMC_BWERR
|
493 GLAMO_STAT1_MMC_BRERR
)))) && (timeout
--));
495 if ((status
& (GLAMO_STAT1_MMC_RTOUT
|
496 GLAMO_STAT1_MMC_DTOUT
)) ||
498 cmd
->error
= -ETIMEDOUT
;
499 } else if (status
& (GLAMO_STAT1_MMC_BWERR
| GLAMO_STAT1_MMC_BRERR
)) {
500 cmd
->error
= -EILSEQ
;
503 if (cmd
->flags
& MMC_RSP_PRESENT
) {
504 if (cmd
->flags
& MMC_RSP_136
) {
505 cmd
->resp
[3] = readw(®_resp
[0]) |
506 (readw(®_resp
[1]) << 16);
507 cmd
->resp
[2] = readw(®_resp
[2]) |
508 (readw(®_resp
[3]) << 16);
509 cmd
->resp
[1] = readw(®_resp
[4]) |
510 (readw(®_resp
[5]) << 16);
511 cmd
->resp
[0] = readw(®_resp
[6]) |
512 (readw(®_resp
[7]) << 16);
514 cmd
->resp
[0] = (readw(®_resp
[0]) >> 8) |
515 (readw(®_resp
[1]) << 8) |
516 ((readw(®_resp
[2])) << 24);
521 /* We'll only get an interrupt when all data has been transfered.
522 By starting to copy data when it's avaiable we can increase throughput by
524 if (cmd
->data
&& (cmd
->data
->flags
& MMC_DATA_READ
))
525 schedule_work(&host
->read_work
);
530 static int glamo_mci_prepare_pio(struct glamo_mci_host
*host
,
531 struct mmc_data
*data
)
533 /* set up the block info */
534 glamo_reg_write(host
, GLAMO_REG_MMC_DATBLKLEN
, data
->blksz
);
535 glamo_reg_write(host
, GLAMO_REG_MMC_DATBLKCNT
, data
->blocks
);
537 data
->bytes_xfered
= 0;
539 /* if write, prep the write into the shared RAM before the command */
540 if (data
->flags
& MMC_DATA_WRITE
) {
541 do_pio_write(host
, data
);
544 dev_dbg(&host
->pdev
->dev
, "(blksz=%d, count=%d)\n",
545 data
->blksz
, data
->blocks
);
549 static int glamo_mci_irq_poll(struct glamo_mci_host
*host
,
550 struct mmc_command
*cmd
)
552 int timeout
= 1000000;
554 * if the glamo INT# line isn't wired (*cough* it can happen)
555 * I'm afraid we have to spin on the IRQ status bit and "be
559 * we have faith we will get an "interrupt"...
560 * but something insane like suspend problems can mean
561 * we spin here forever, so we timeout after a LONG time
563 while ((!(readw(host
->core
->base
+
564 GLAMO_REG_IRQ_STATUS
) & GLAMO_IRQ_MMC
)) &&
568 if (cmd
->data
->error
)
569 cmd
->data
->error
= -ETIMEDOUT
;
570 dev_err(&host
->pdev
->dev
, "Payload timeout\n");
573 /* ack this interrupt source */
574 writew(GLAMO_IRQ_MMC
, host
->core
->base
+
575 GLAMO_REG_IRQ_CLEAR
);
577 /* yay we are an interrupt controller! -- call the ISR
578 * it will stop clock to card
580 glamo_mci_irq(IRQ_GLAMO(GLAMO_IRQIDX_MMC
), host
);
585 static void glamo_mci_send_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
587 struct glamo_mci_host
*host
= mmc_priv(mmc
);
588 struct mmc_command
*cmd
= mrq
->cmd
;
590 glamo_mci_clock_enable(host
);
591 host
->request_counter
++;
593 if(glamo_mci_prepare_pio(host
, cmd
->data
)) {
595 cmd
->data
->error
= -EIO
;
600 dev_dbg(&host
->pdev
->dev
,"cmd 0x%x, "
601 "arg 0x%x data=%p mrq->stop=%p flags 0x%x\n",
602 cmd
->opcode
, cmd
->arg
, cmd
->data
, cmd
->mrq
->stop
,
605 glamo_mci_send_command(host
, cmd
);
608 * if we don't have bulk data to take care of, we're done
610 if (!cmd
->data
|| cmd
->error
)
614 if (!host
->core
->irq_works
) {
615 if (glamo_mci_irq_poll(host
, mrq
->cmd
))
620 * Otherwise can can use the interrupt as async completion --
621 * if there is read data coming, or we wait for write data to complete,
622 * exit without mmc_request_done() as the payload interrupt
625 dev_dbg(&host
->pdev
->dev
, "Waiting for payload data\n");
628 glamo_mci_request_done(host
, mrq
);
631 static void glamo_mci_set_power_mode(struct glamo_mci_host
*host
,
632 unsigned char power_mode
) {
635 if (power_mode
== host
->power_mode
)
640 if (host
->power_mode
== MMC_POWER_OFF
) {
641 ret
= regulator_enable(host
->regulator
);
643 dev_err(&host
->pdev
->dev
, "Failed to enable regulator: %d\n", ret
);
650 glamo_engine_disable(host
->core
,
653 ret
= regulator_disable(host
->regulator
);
655 dev_warn(&host
->pdev
->dev
, "Failed to disable regulator: %d\n", ret
);
658 host
->power_mode
= power_mode
;
661 static void glamo_mci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
663 struct glamo_mci_host
*host
= mmc_priv(mmc
);
670 glamo_mci_set_power_mode(host
, ios
->power_mode
);
672 if (host
->vdd
!= ios
->vdd
) {
673 ret
= mmc_regulator_set_ocr(host
->regulator
, ios
->vdd
);
675 dev_err(&host
->pdev
->dev
, "Failed to set regulator voltage: %d\n", ret
);
677 host
->vdd
= ios
->vdd
;
679 rate
= glamo_mci_set_card_clock(host
, ios
->clock
);
681 if ((ios
->power_mode
== MMC_POWER_ON
) ||
682 (ios
->power_mode
== MMC_POWER_UP
)) {
683 dev_info(&host
->pdev
->dev
,
684 "powered (vdd = %hu) clk: %dkHz div=%hu (req: %ukHz). "
685 "Bus width=%d\n", ios
->vdd
,
687 ios
->clock
/ 1000, (int)ios
->bus_width
);
689 dev_info(&host
->pdev
->dev
, "glamo_mci_set_ios: power down.\n");
693 if (ios
->bus_width
== MMC_BUS_WIDTH_4
)
694 bus_width
= GLAMO_BASIC_MMC_EN_4BIT_DATA
;
696 sd_drive
= (rate
* 4) / host
->clk_rate
;
700 glamo_reg_set_bit_mask(host
, GLAMO_REG_MMC_BASIC
,
701 GLAMO_BASIC_MMC_EN_4BIT_DATA
| 0xb0,
702 bus_width
| sd_drive
<< 6);
707 * no physical write protect supported by us
709 static int glamo_mci_get_ro(struct mmc_host
*mmc
)
714 static struct mmc_host_ops glamo_mci_ops
= {
715 .request
= glamo_mci_send_request
,
716 .set_ios
= glamo_mci_set_ios
,
717 .get_ro
= glamo_mci_get_ro
,
720 static int glamo_mci_probe(struct platform_device
*pdev
)
722 struct mmc_host
*mmc
;
723 struct glamo_mci_host
*host
;
724 struct glamo_core
*core
= dev_get_drvdata(pdev
->dev
.parent
);
727 dev_info(&pdev
->dev
, "glamo_mci driver (C)2007 Openmoko, Inc\n");
729 mmc
= mmc_alloc_host(sizeof(struct glamo_mci_host
), &pdev
->dev
);
735 host
= mmc_priv(mmc
);
739 host
->pdata
= core
->pdata
->mmc_data
;
740 host
->power_mode
= MMC_POWER_OFF
;
741 host
->clk_enabled
= 0;
744 INIT_WORK(&host
->irq_work
, glamo_mci_irq_worker
);
745 INIT_WORK(&host
->read_work
, glamo_mci_read_worker
);
747 host
->regulator
= regulator_get(pdev
->dev
.parent
, "SD_3V3");
748 if (!host
->regulator
) {
749 dev_err(&pdev
->dev
, "Cannot proceed without regulator.\n");
751 goto probe_free_host
;
754 host
->mmio_mem
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
755 if (!host
->mmio_mem
) {
757 "failed to get io memory region resouce.\n");
759 goto probe_regulator_put
;
762 host
->mmio_mem
= request_mem_region(host
->mmio_mem
->start
,
763 resource_size(host
->mmio_mem
),
766 if (!host
->mmio_mem
) {
767 dev_err(&pdev
->dev
, "failed to request io memory region.\n");
769 goto probe_regulator_put
;
772 host
->mmio_base
= ioremap(host
->mmio_mem
->start
,
773 resource_size(host
->mmio_mem
));
774 if (!host
->mmio_base
) {
775 dev_err(&pdev
->dev
, "failed to ioremap() io memory region.\n");
777 goto probe_free_mem_region_mmio
;
781 /* Get ahold of our data buffer we use for data in and out on MMC */
782 host
->data_mem
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
783 if (!host
->data_mem
) {
785 "failed to get io memory region resource.\n");
787 goto probe_iounmap_mmio
;
790 host
->data_mem
= request_mem_region(host
->data_mem
->start
,
791 resource_size(host
->data_mem
),
794 if (!host
->data_mem
) {
795 dev_err(&pdev
->dev
, "failed to request io memory region.\n");
797 goto probe_iounmap_mmio
;
799 host
->data_base
= ioremap(host
->data_mem
->start
,
800 resource_size(host
->data_mem
));
802 if (host
->data_base
== 0) {
803 dev_err(&pdev
->dev
, "failed to ioremap() io memory region.\n");
805 goto probe_free_mem_region_data
;
808 ret
= request_irq(IRQ_GLAMO(GLAMO_IRQIDX_MMC
), glamo_mci_irq
, IRQF_SHARED
,
811 dev_err(&pdev
->dev
, "failed to register irq.\n");
812 goto probe_iounmap_data
;
817 host
->clk_rate
= glamo_pll_rate(host
->core
, GLAMO_PLL1
);
819 /* explain our host controller capabilities */
820 mmc
->ops
= &glamo_mci_ops
;
821 mmc
->ocr_avail
= mmc_regulator_get_ocrmask(host
->regulator
);
822 mmc
->caps
= MMC_CAP_4_BIT_DATA
|
823 MMC_CAP_MMC_HIGHSPEED
|
824 MMC_CAP_SD_HIGHSPEED
;
825 mmc
->f_min
= host
->clk_rate
/ 256;
826 mmc
->f_max
= sd_max_clk
;
828 mmc
->max_blk_count
= (1 << 16) - 1; /* GLAMO_REG_MMC_RB_BLKCNT */
829 mmc
->max_blk_size
= (1 << 12) - 1; /* GLAMO_REG_MMC_RB_BLKLEN */
830 mmc
->max_req_size
= resource_size(host
->data_mem
);
831 mmc
->max_seg_size
= mmc
->max_req_size
;
832 mmc
->max_phys_segs
= 128;
833 mmc
->max_hw_segs
= 128;
835 if (mmc
->ocr_avail
< 0) {
836 dev_warn(&pdev
->dev
, "Failed to get ocr list for regulator: %d.\n",
838 mmc
->ocr_avail
= MMC_VDD_32_33
| MMC_VDD_33_34
;
841 platform_set_drvdata(pdev
, mmc
);
843 glamo_engine_enable(host
->core
, GLAMO_ENGINE_MMC
);
844 glamo_engine_reset(host
->core
, GLAMO_ENGINE_MMC
);
846 glamo_reg_write(host
, GLAMO_REG_MMC_WDATADS1
,
847 (u16
)(host
->data_mem
->start
));
848 glamo_reg_write(host
, GLAMO_REG_MMC_WDATADS2
,
849 (u16
)(host
->data_mem
->start
>> 16));
851 glamo_reg_write(host
, GLAMO_REG_MMC_RDATADS1
,
852 (u16
)(host
->data_mem
->start
));
853 glamo_reg_write(host
, GLAMO_REG_MMC_RDATADS2
,
854 (u16
)(host
->data_mem
->start
>> 16));
856 setup_timer(&host
->disable_timer
, glamo_mci_disable_timer
,
857 (unsigned long)host
);
859 if ((ret
= mmc_add_host(mmc
))) {
860 dev_err(&pdev
->dev
, "failed to add mmc host.\n");
864 dev_info(&pdev
->dev
,"initialisation done.\n");
868 free_irq(IRQ_GLAMO(GLAMO_IRQIDX_MMC
), host
);
870 iounmap(host
->data_base
);
871 probe_free_mem_region_data
:
872 release_mem_region(host
->data_mem
->start
, resource_size(host
->data_mem
));
874 iounmap(host
->mmio_base
);
875 probe_free_mem_region_mmio
:
876 release_mem_region(host
->mmio_mem
->start
, resource_size(host
->mmio_mem
));
878 regulator_put(host
->regulator
);
885 static int glamo_mci_remove(struct platform_device
*pdev
)
887 struct mmc_host
*mmc
= platform_get_drvdata(pdev
);
888 struct glamo_mci_host
*host
= mmc_priv(mmc
);
890 free_irq(IRQ_GLAMO(GLAMO_IRQIDX_MMC
), host
);
892 mmc_remove_host(mmc
);
893 iounmap(host
->mmio_base
);
894 iounmap(host
->data_base
);
895 release_mem_region(host
->mmio_mem
->start
, resource_size(host
->mmio_mem
));
896 release_mem_region(host
->data_mem
->start
, resource_size(host
->data_mem
));
898 regulator_put(host
->regulator
);
902 glamo_engine_disable(host
->core
, GLAMO_ENGINE_MMC
);
909 static int glamo_mci_suspend(struct device
*dev
)
911 struct mmc_host
*mmc
= dev_get_drvdata(dev
);
912 struct glamo_mci_host
*host
= mmc_priv(mmc
);
915 cancel_work_sync(&host
->irq_work
);
917 ret
= mmc_suspend_host(mmc
, PMSG_SUSPEND
);
918 glamo_mci_clock_enable(host
);
923 static int glamo_mci_resume(struct device
*dev
)
925 struct mmc_host
*mmc
= dev_get_drvdata(dev
);
926 struct glamo_mci_host
*host
= mmc_priv(mmc
);
929 glamo_engine_enable(host
->core
, GLAMO_ENGINE_MMC
);
930 glamo_engine_reset(host
->core
, GLAMO_ENGINE_MMC
);
932 glamo_reg_write(host
, GLAMO_REG_MMC_WDATADS1
,
933 (u16
)(host
->data_mem
->start
));
934 glamo_reg_write(host
, GLAMO_REG_MMC_WDATADS2
,
935 (u16
)(host
->data_mem
->start
>> 16));
937 glamo_reg_write(host
, GLAMO_REG_MMC_RDATADS1
,
938 (u16
)(host
->data_mem
->start
));
939 glamo_reg_write(host
, GLAMO_REG_MMC_RDATADS2
,
940 (u16
)(host
->data_mem
->start
>> 16));
943 ret
= mmc_resume_host(host
->mmc
);
944 /* glamo_mci_clock_disable(host);*/
949 static struct dev_pm_ops glamo_mci_pm_ops
= {
950 .suspend
= glamo_mci_suspend
,
951 .resume
= glamo_mci_resume
,
953 #define GLAMO_MCI_PM_OPS (&glamo_mci_pm_ops)
955 #else /* CONFIG_PM */
956 #define GLAMO_MCI_PM_OPS NULL
957 #endif /* CONFIG_PM */
960 static struct platform_driver glamo_mci_driver
=
962 .probe
= glamo_mci_probe
,
963 .remove
= glamo_mci_remove
,
966 .owner
= THIS_MODULE
,
967 .pm
= GLAMO_MCI_PM_OPS
,
971 static int __init
glamo_mci_init(void)
973 platform_driver_register(&glamo_mci_driver
);
977 static void __exit
glamo_mci_exit(void)
979 platform_driver_unregister(&glamo_mci_driver
);
982 module_init(glamo_mci_init
);
983 module_exit(glamo_mci_exit
);
985 MODULE_DESCRIPTION("Glamo MMC/SD Card Interface driver");
986 MODULE_LICENSE("GPL");
987 MODULE_AUTHOR("Andy Green <andy@openmoko.com>");