[s3c24xx] glamo mmc regulator fixes.
[openwrt.git] / target / linux / s3c24xx / files-2.6.30 / drivers / mfd / glamo / glamo-mci.c
1 /*
2 * linux/drivers/mmc/host/glamo-mmc.c - Glamo MMC driver
3 *
4 * Copyright (C) 2007 Openmoko, Inc, Andy Green <andy@openmoko.com>
5 * Based on S3C MMC driver that was:
6 * Copyright (C) 2004-2006 maintech GmbH, Thomas Kleffel <tk@maintech.de>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 #include <linux/module.h>
14 #include <linux/mmc/mmc.h>
15 #include <linux/mmc/sd.h>
16 #include <linux/mmc/host.h>
17 #include <linux/platform_device.h>
18 #include <linux/irq.h>
19 #include <linux/delay.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22 #include <linux/workqueue.h>
23 #include <linux/crc7.h>
24 #include <linux/scatterlist.h>
25 #include <linux/io.h>
26 #include <linux/mfd/glamo.h>
27
28 #include "glamo-mci.h"
29 #include "glamo-core.h"
30 #include "glamo-regs.h"
31
32 #define DRIVER_NAME "glamo-mci"
33
34 static void glamo_mci_send_request(struct mmc_host *mmc);
35 static void glamo_mci_send_command(struct glamo_mci_host *host,
36 struct mmc_command *cmd);
37
38 /*
39 * Max SD clock rate
40 *
41 * held at /(3 + 1) due to concerns of 100R recommended series resistor
42 * allows 16MHz @ 4-bit --> 8MBytes/sec raw
43 *
44 * you can override this on kernel commandline using
45 *
46 * glamo_mci.sd_max_clk=10000000
47 *
48 * for example
49 */
50
51 static int sd_max_clk = 50000000 / 3;
52 module_param(sd_max_clk, int, 0644);
53
54 /*
55 * Slow SD clock rate
56 *
57 * you can override this on kernel commandline using
58 *
59 * glamo_mci.sd_slow_ratio=8
60 *
61 * for example
62 *
63 * platform callback is used to decide effective clock rate, if not
64 * defined then max is used, if defined and returns nonzero, rate is
65 * divided by this factor
66 */
67
68 static int sd_slow_ratio = 8;
69 module_param(sd_slow_ratio, int, 0644);
70
71 /*
72 * Post-power SD clock rate
73 *
74 * you can override this on kernel commandline using
75 *
76 * glamo_mci.sd_post_power_clock=1000000
77 *
78 * for example
79 *
80 * After changing power to card, clock is held at this rate until first bulk
81 * transfer completes
82 */
83
84 static int sd_post_power_clock = 1000000;
85 module_param(sd_post_power_clock, int, 0644);
86
87
88 /*
89 * SD Signal drive strength
90 *
91 * you can override this on kernel commandline using
92 *
93 * glamo_mci.sd_drive=0
94 *
95 * for example
96 */
97
98 static int sd_drive;
99 module_param(sd_drive, int, 0644);
100
101 /*
102 * SD allow SD clock to run while idle
103 *
104 * you can override this on kernel commandline using
105 *
106 * glamo_mci.sd_idleclk=0
107 *
108 * for example
109 */
110
111 static int sd_idleclk = 0; /* disallow idle clock by default */
112 module_param(sd_idleclk, int, 0644);
113
114 /* used to stash real idleclk state in suspend: we force it to run in there */
115 static int suspend_sd_idleclk;
116
117 static inline void glamo_reg_write(struct glamo_mci_host *glamo,
118 u_int16_t reg, u_int16_t val)
119 {
120 writew(val, glamo->mmio_base + reg);
121 }
122
123 static inline u_int16_t glamo_reg_read(struct glamo_mci_host *glamo,
124 u_int16_t reg)
125 {
126 return readw(glamo->mmio_base + reg);
127 }
128
129 static void glamo_reg_set_bit_mask(struct glamo_mci_host *glamo,
130 u_int16_t reg, u_int16_t mask,
131 u_int16_t val)
132 {
133 u_int16_t tmp;
134
135 val &= mask;
136
137 tmp = glamo_reg_read(glamo, reg);
138 tmp &= ~mask;
139 tmp |= val;
140 glamo_reg_write(glamo, reg, tmp);
141 }
142
143 static void do_pio_read(struct glamo_mci_host *host)
144 {
145 struct scatterlist *sg;
146 u16 __iomem *from_ptr = host->data_base;
147 struct mmc_data *data = host->mrq->data;
148 void *sg_pointer;
149
150 dev_dbg(&host->pdev->dev, "pio_read():\n");
151 for (sg = data->sg; sg; sg = sg_next(sg)) {
152 sg_pointer = page_address(sg_page(sg)) + sg->offset;
153
154
155 memcpy(sg_pointer, from_ptr, sg->length);
156 from_ptr += sg->length >> 1;
157
158 data->bytes_xfered += sg->length;
159 }
160
161 dev_dbg(&host->pdev->dev, "pio_read(): "
162 "complete (no more data).\n");
163 }
164
165 static void do_pio_write(struct glamo_mci_host *host)
166 {
167 struct scatterlist *sg;
168 u16 __iomem *to_ptr = host->data_base;
169 struct mmc_data *data = host->mrq->data;
170 void *sg_pointer;
171
172 dev_dbg(&host->pdev->dev, "pio_write():\n");
173 for (sg = data->sg; sg; sg = sg_next(sg)) {
174 sg_pointer = page_address(sg_page(sg)) + sg->offset;
175
176 data->bytes_xfered += sg->length;
177
178 memcpy(to_ptr, sg_pointer, sg->length);
179 to_ptr += sg->length >> 1;
180 }
181
182 dev_dbg(&host->pdev->dev, "pio_write(): complete\n");
183 }
184
185 static void glamo_mci_fix_card_div(struct glamo_mci_host *host, int div)
186 {
187 unsigned long flags;
188
189 spin_lock_irqsave(&host->pdata->core->lock, flags);
190
191 if (div < 0) {
192 /* stop clock - remove clock from divider input */
193 writew(readw(host->pdata->core->base +
194 GLAMO_REG_CLOCK_GEN5_1) & (~GLAMO_CLOCK_GEN51_EN_DIV_TCLK),
195 host->pdata->core->base + GLAMO_REG_CLOCK_GEN5_1);
196 } else {
197
198 if (host->force_slow_during_powerup)
199 div = host->clk_rate / sd_post_power_clock;
200 else if (host->pdata->glamo_mmc_use_slow &&
201 host->pdata->glamo_mmc_use_slow())
202 div = div * sd_slow_ratio;
203
204 if (div > 255)
205 div = 255;
206 /*
207 * set the nearest prescaler factor
208 *
209 * register shared with SCLK divisor -- no chance of race because
210 * we don't use sensor interface
211 */
212 writew((readw(host->pdata->core->base +
213 GLAMO_REG_CLOCK_GEN8) & 0xff00) | div,
214 host->pdata->core->base + GLAMO_REG_CLOCK_GEN8);
215 /* enable clock to divider input */
216 writew(readw(host->pdata->core->base +
217 GLAMO_REG_CLOCK_GEN5_1) | GLAMO_CLOCK_GEN51_EN_DIV_TCLK,
218 host->pdata->core->base + GLAMO_REG_CLOCK_GEN5_1);
219 }
220 spin_unlock_irqrestore(&host->pdata->core->lock, flags);
221 mdelay(5);
222 }
223
224 static int glamo_mci_set_card_clock(struct glamo_mci_host *host, int freq)
225 {
226 int div = 0;
227 int real_rate = 0;
228
229 if (freq) {
230 /* Set clock */
231 for (div = 0; div < 255; div++) {
232 real_rate = host->clk_rate / (div + 1);
233 if (real_rate <= freq)
234 break;
235 }
236
237 host->clk_div = div;
238 glamo_mci_fix_card_div(host, div);
239 } else {
240 /* stop clock */
241 host->clk_div = 0xff;
242
243 if (!sd_idleclk && !host->force_slow_during_powerup)
244 /* clock off */
245 glamo_mci_fix_card_div(host, -1);
246 }
247 host->real_rate = real_rate;
248 return real_rate;
249 }
250
251
252 static void glamo_mci_irq_worker(struct work_struct *work)
253 {
254 struct glamo_mci_host *host =
255 container_of(work, struct glamo_mci_host, irq_work);
256 struct mmc_command *cmd = host->mrq->cmd;
257
258 if (cmd->data->flags & MMC_DATA_READ) {
259 do_pio_read(host);
260 }
261
262 /* issue STOP if we have been given one to use */
263 if (host->mrq->stop) {
264 glamo_mci_send_command(host, host->mrq->stop);
265 }
266
267 if (!sd_idleclk && !host->force_slow_during_powerup)
268 /* clock off */
269 glamo_mci_fix_card_div(host, -1);
270
271 host->mrq = NULL;
272 mmc_request_done(host->mmc, cmd->mrq);
273 }
274
275 static irqreturn_t glamo_mci_irq(int irq, void *devid)
276 {
277 struct glamo_mci_host *host = (struct glamo_mci_host*)devid;
278 u16 status;
279 struct mmc_command *cmd;
280 unsigned long flags;
281
282 if (host->suspending) { /* bad news, dangerous time */
283 dev_err(&host->pdev->dev, "****glamo_mci_irq before resumed\n");
284 goto leave;
285 }
286
287 if (!host->mrq)
288 goto leave;
289 cmd = host->mrq->cmd;
290 if (!cmd)
291 goto leave;
292
293 spin_lock_irqsave(&host->lock, flags);
294
295 status = readw(host->mmio_base + GLAMO_REG_MMC_RB_STAT1);
296 dev_dbg(&host->pdev->dev, "status = 0x%04x\n", status);
297
298 /* we ignore a data timeout report if we are also told the data came */
299 if (status & GLAMO_STAT1_MMC_RB_DRDY)
300 status &= ~GLAMO_STAT1_MMC_DTOUT;
301
302 if (status & (GLAMO_STAT1_MMC_RTOUT |
303 GLAMO_STAT1_MMC_DTOUT))
304 cmd->error = -ETIMEDOUT;
305 if (status & (GLAMO_STAT1_MMC_BWERR |
306 GLAMO_STAT1_MMC_BRERR))
307 cmd->error = -EILSEQ;
308 if (cmd->error) {
309 dev_info(&host->pdev->dev, "Error after cmd: 0x%x\n", status);
310 goto done;
311 }
312
313 /*
314 * disable the initial slow start after first bulk transfer
315 */
316 if (host->force_slow_during_powerup)
317 host->force_slow_during_powerup--;
318
319 /*
320 * we perform the memcpy out of Glamo memory outside of IRQ context
321 * so we don't block other interrupts
322 */
323 schedule_work(&host->irq_work);
324
325 goto unlock;
326
327 done:
328 host->mrq = NULL;
329 mmc_request_done(host->mmc, cmd->mrq);
330 unlock:
331 spin_unlock_irqrestore(&host->lock, flags);
332 leave:
333 return IRQ_HANDLED;
334 }
335
336 static void glamo_mci_send_command(struct glamo_mci_host *host,
337 struct mmc_command *cmd)
338 {
339 u8 u8a[6];
340 u16 fire = 0;
341 unsigned int timeout = 1000000;
342 u16 * reg_resp = (u16 *)(host->mmio_base + GLAMO_REG_MMC_CMD_RSP1);
343 u16 status;
344
345 /* if we can't do it, reject as busy */
346 if (!readw(host->mmio_base + GLAMO_REG_MMC_RB_STAT1) &
347 GLAMO_STAT1_MMC_IDLE) {
348 host->mrq = NULL;
349 cmd->error = -EBUSY;
350 mmc_request_done(host->mmc, host->mrq);
351 return;
352 }
353
354 /* create an array in wire order for CRC computation */
355 u8a[0] = 0x40 | (cmd->opcode & 0x3f);
356 u8a[1] = (u8)(cmd->arg >> 24);
357 u8a[2] = (u8)(cmd->arg >> 16);
358 u8a[3] = (u8)(cmd->arg >> 8);
359 u8a[4] = (u8)cmd->arg;
360 u8a[5] = (crc7(0, u8a, 5) << 1) | 0x01; /* crc7 on first 5 bytes of packet */
361
362 /* issue the wire-order array including CRC in register order */
363 writew((u8a[4] << 8) | u8a[5], host->mmio_base + GLAMO_REG_MMC_CMD_REG1);
364 writew((u8a[2] << 8) | u8a[3], host->mmio_base + GLAMO_REG_MMC_CMD_REG2);
365 writew((u8a[0] << 8) | u8a[1], host->mmio_base + GLAMO_REG_MMC_CMD_REG3);
366
367 /* command index toggle */
368 fire |= (host->request_counter & 1) << 12;
369
370 /* set type of command */
371 switch (mmc_cmd_type(cmd)) {
372 case MMC_CMD_BC:
373 fire |= GLAMO_FIRE_MMC_CMDT_BNR;
374 break;
375 case MMC_CMD_BCR:
376 fire |= GLAMO_FIRE_MMC_CMDT_BR;
377 break;
378 case MMC_CMD_AC:
379 fire |= GLAMO_FIRE_MMC_CMDT_AND;
380 break;
381 case MMC_CMD_ADTC:
382 fire |= GLAMO_FIRE_MMC_CMDT_AD;
383 break;
384 }
385 /*
386 * if it expects a response, set the type expected
387 *
388 * R1, Length : 48bit, Normal response
389 * R1b, Length : 48bit, same R1, but added card busy status
390 * R2, Length : 136bit (really 128 bits with CRC snipped)
391 * R3, Length : 48bit (OCR register value)
392 * R4, Length : 48bit, SDIO_OP_CONDITION, Reverse SDIO Card
393 * R5, Length : 48bit, IO_RW_DIRECTION, Reverse SDIO Card
394 * R6, Length : 48bit (RCA register)
395 * R7, Length : 48bit (interface condition, VHS(voltage supplied),
396 * check pattern, CRC7)
397 */
398 switch (mmc_resp_type(cmd)) {
399 case MMC_RSP_R1: /* same index as R6 and R7 */
400 fire |= GLAMO_FIRE_MMC_RSPT_R1;
401 break;
402 case MMC_RSP_R1B:
403 fire |= GLAMO_FIRE_MMC_RSPT_R1b;
404 break;
405 case MMC_RSP_R2:
406 fire |= GLAMO_FIRE_MMC_RSPT_R2;
407 break;
408 case MMC_RSP_R3:
409 fire |= GLAMO_FIRE_MMC_RSPT_R3;
410 break;
411 /* R4 and R5 supported by chip not defined in linux/mmc/core.h (sdio) */
412 }
413 /*
414 * From the command index, set up the command class in the host ctrllr
415 *
416 * missing guys present on chip but couldn't figure out how to use yet:
417 * 0x0 "stream read"
418 * 0x9 "cancel running command"
419 */
420 switch (cmd->opcode) {
421 case MMC_READ_SINGLE_BLOCK:
422 fire |= GLAMO_FIRE_MMC_CC_SBR; /* single block read */
423 break;
424 case MMC_SWITCH: /* 64 byte payload */
425 case SD_APP_SEND_SCR:
426 case MMC_READ_MULTIPLE_BLOCK:
427 /* we will get an interrupt off this */
428 if (!cmd->mrq->stop)
429 /* multiblock no stop */
430 fire |= GLAMO_FIRE_MMC_CC_MBRNS;
431 else
432 /* multiblock with stop */
433 fire |= GLAMO_FIRE_MMC_CC_MBRS;
434 break;
435 case MMC_WRITE_BLOCK:
436 fire |= GLAMO_FIRE_MMC_CC_SBW; /* single block write */
437 break;
438 case MMC_WRITE_MULTIPLE_BLOCK:
439 if (cmd->mrq->stop)
440 /* multiblock with stop */
441 fire |= GLAMO_FIRE_MMC_CC_MBWS;
442 else
443 /* multiblock NO stop-- 'RESERVED'? */
444 fire |= GLAMO_FIRE_MMC_CC_MBWNS;
445 break;
446 case MMC_STOP_TRANSMISSION:
447 fire |= GLAMO_FIRE_MMC_CC_STOP; /* STOP */
448 break;
449 default:
450 fire |= GLAMO_FIRE_MMC_CC_BASIC; /* "basic command" */
451 break;
452 }
453 /* always largest timeout */
454 writew(0xfff, host->mmio_base + GLAMO_REG_MMC_TIMEOUT);
455
456 /* Generate interrupt on txfer */
457 glamo_reg_set_bit_mask(host, GLAMO_REG_MMC_BASIC, ~0x3e,
458 0x0800 | GLAMO_BASIC_MMC_NO_CLK_RD_WAIT |
459 GLAMO_BASIC_MMC_EN_COMPL_INT | (sd_drive << 6));
460
461 /* send the command out on the wire */
462 /* dev_info(&host->pdev->dev, "Using FIRE %04X\n", fire); */
463 writew(fire, host->mmio_base + GLAMO_REG_MMC_CMD_FIRE);
464
465 /* we are deselecting card? because it isn't going to ack then... */
466 if ((cmd->opcode == 7) && (cmd->arg == 0))
467 return;
468
469 /*
470 * we must spin until response is ready or timed out
471 * -- we don't get interrupts unless there is a bulk rx
472 */
473 udelay(5);
474 do
475 status = readw(host->mmio_base + GLAMO_REG_MMC_RB_STAT1);
476 while (((((status >> 15) & 1) != (host->request_counter & 1)) ||
477 (!(status & (GLAMO_STAT1_MMC_RB_RRDY |
478 GLAMO_STAT1_MMC_RTOUT |
479 GLAMO_STAT1_MMC_DTOUT |
480 GLAMO_STAT1_MMC_BWERR |
481 GLAMO_STAT1_MMC_BRERR)))) && (timeout--));
482
483 if ((status & (GLAMO_STAT1_MMC_RTOUT |
484 GLAMO_STAT1_MMC_DTOUT)) ||
485 (timeout == 0)) {
486 cmd->error = -ETIMEDOUT;
487 } else if (status & (GLAMO_STAT1_MMC_BWERR |
488 GLAMO_STAT1_MMC_BRERR)) {
489 cmd->error = -EILSEQ;
490 }
491
492 if (cmd->flags & MMC_RSP_PRESENT) {
493 if (cmd->flags & MMC_RSP_136) {
494 cmd->resp[3] = readw(&reg_resp[0]) |
495 (readw(&reg_resp[1]) << 16);
496 cmd->resp[2] = readw(&reg_resp[2]) |
497 (readw(&reg_resp[3]) << 16);
498 cmd->resp[1] = readw(&reg_resp[4]) |
499 (readw(&reg_resp[5]) << 16);
500 cmd->resp[0] = readw(&reg_resp[6]) |
501 (readw(&reg_resp[7]) << 16);
502 } else {
503 cmd->resp[0] = (readw(&reg_resp[0]) >> 8) |
504 (readw(&reg_resp[1]) << 8) |
505 ((readw(&reg_resp[2])) << 24);
506 }
507 }
508 }
509
510 static int glamo_mci_prepare_pio(struct glamo_mci_host *host,
511 struct mmc_data *data)
512 {
513 /* set up the block info */
514 writew(data->blksz, host->mmio_base + GLAMO_REG_MMC_DATBLKLEN);
515 writew(data->blocks, host->mmio_base + GLAMO_REG_MMC_DATBLKCNT);
516 dev_dbg(&host->pdev->dev, "(blksz=%d, count=%d)\n",
517 data->blksz, data->blocks);
518 data->bytes_xfered = 0;
519
520 /* if write, prep the write into the shared RAM before the command */
521 if (data->flags & MMC_DATA_WRITE) {
522 do_pio_write(host);
523 }
524 return 0;
525 }
526
527 static void glamo_mci_send_request(struct mmc_host *mmc)
528 {
529 struct glamo_mci_host *host = mmc_priv(mmc);
530 struct mmc_request *mrq = host->mrq;
531 struct mmc_command *cmd = mrq->cmd;
532 int timeout = 1000000;
533
534 host->request_counter++;
535 /* this guy has data to read/write? */
536 if (cmd->data) {
537 if(glamo_mci_prepare_pio(host, cmd->data)) {
538 cmd->data->error = -EIO;
539 goto done;
540 }
541 }
542
543 dev_dbg(&host->pdev->dev,"cmd 0x%x, "
544 "arg 0x%x data=%p mrq->stop=%p flags 0x%x\n",
545 cmd->opcode, cmd->arg, cmd->data, cmd->mrq->stop,
546 cmd->flags);
547
548 /* resume requested clock rate
549 * scale it down by sd_slow_ratio if platform requests it
550 */
551 glamo_mci_fix_card_div(host, host->clk_div);
552
553 glamo_mci_send_command(host, cmd);
554
555 /*
556 * if we don't have bulk data to take care of, we're done
557 */
558 if (!cmd->data || cmd->error)
559 goto done;
560
561 /*
562 * Otherwise can can use the interrupt as async completion --
563 * if there is read data coming, or we wait for write data to complete,
564 * exit without mmc_request_done() as the payload interrupt
565 * will service it
566 */
567 dev_dbg(&host->pdev->dev, "Waiting for payload data\n");
568 /*
569 * if the glamo INT# line isn't wired (*cough* it can happen)
570 * I'm afraid we have to spin on the IRQ status bit and "be
571 * our own INT# line"
572 */
573 if (!host->pdata->core->irq_works) {
574 /*
575 * we have faith we will get an "interrupt"...
576 * but something insane like suspend problems can mean
577 * we spin here forever, so we timeout after a LONG time
578 */
579 while ((!(readw(host->pdata->core->base +
580 GLAMO_REG_IRQ_STATUS) & GLAMO_IRQ_MMC)) &&
581 (timeout--));
582
583 if (timeout < 0) {
584 if (cmd->data->error)
585 cmd->data->error = -ETIMEDOUT;
586 dev_err(&host->pdev->dev, "Payload timeout\n");
587 goto bail;
588 }
589 /* ack this interrupt source */
590 writew(GLAMO_IRQ_MMC, host->pdata->core->base +
591 GLAMO_REG_IRQ_CLEAR);
592
593 /* yay we are an interrupt controller! -- call the ISR
594 * it will stop clock to card
595 */
596 glamo_mci_irq(IRQ_GLAMO(GLAMO_IRQIDX_MMC), host);
597 }
598 return;
599 done:
600 host->mrq = NULL;
601 mmc_request_done(host->mmc, cmd->mrq);
602 bail:
603 if (!sd_idleclk && !host->force_slow_during_powerup)
604 /* stop the clock to card */
605 glamo_mci_fix_card_div(host, -1);
606 }
607
608 static void glamo_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
609 {
610 struct glamo_mci_host *host = mmc_priv(mmc);
611
612 host->mrq = mrq;
613 glamo_mci_send_request(mmc);
614 }
615
616 static void glamo_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
617 {
618 struct glamo_mci_host *host = mmc_priv(mmc);
619 int bus_width = 0;
620 int powering = 0;
621 int ret;
622
623 if (host->suspending) {
624 dev_err(&host->pdev->dev, "IGNORING glamo_mci_set_ios while "
625 "suspended\n");
626 return;
627 }
628
629 /* Set power */
630 switch(ios->power_mode) {
631 case MMC_POWER_UP:
632 ret = regulator_enable(host->regulator);
633 if (ret)
634 dev_err(&host->pdev->dev, "Failed to enable regulator: %d\n", ret);
635 break;
636 case MMC_POWER_ON:
637 /*
638 * we should use very slow clock until first bulk
639 * transfer completes OK
640 */
641 host->force_slow_during_powerup = 1;
642
643 if (host->power_mode_current == MMC_POWER_OFF) {
644 glamo_engine_enable(host->pdata->core,
645 GLAMO_ENGINE_MMC);
646 powering = 1;
647 }
648 break;
649
650 case MMC_POWER_OFF:
651 default:
652 if (host->power_mode_current == MMC_POWER_OFF)
653 break;
654 /* never want clocking with dead card */
655 glamo_mci_fix_card_div(host, -1);
656
657 glamo_engine_disable(host->pdata->core,
658 GLAMO_ENGINE_MMC);
659
660 ret = regulator_disable(host->regulator);
661 if (ret)
662 dev_warn(&host->pdev->dev, "Failed to disable regulator: %d\n", ret);
663 break;
664 }
665 host->power_mode_current = ios->power_mode;
666
667 if (host->vdd_current != ios->vdd) {
668 ret = mmc_regulator_set_ocr(host->regulator, ios->vdd);
669 if (ret)
670 dev_err(&host->pdev->dev, "Failed to set regulator voltage: %d\n", ret);
671 else
672 host->vdd_current = ios->vdd;
673 }
674 glamo_mci_set_card_clock(host, ios->clock);
675
676 /* after power-up, we are meant to give it >= 74 clocks so it can
677 * initialize itself. Doubt any modern cards need it but anyway...
678 */
679 if (powering)
680 mdelay(1);
681
682 if (!sd_idleclk && !host->force_slow_during_powerup)
683 /* stop the clock to card, because we are idle until transfer */
684 glamo_mci_fix_card_div(host, -1);
685
686 if ((ios->power_mode == MMC_POWER_ON) ||
687 (ios->power_mode == MMC_POWER_UP)) {
688 dev_info(&host->pdev->dev,
689 "powered (vdd = %d) clk: %lukHz div=%d (req: %ukHz). "
690 "Bus width=%d\n",(int)ios->vdd,
691 host->real_rate / 1000, (int)host->clk_div,
692 ios->clock / 1000, (int)ios->bus_width);
693 } else
694 dev_info(&host->pdev->dev, "glamo_mci_set_ios: power down.\n");
695
696 /* set bus width */
697 if (ios->bus_width == MMC_BUS_WIDTH_4)
698 bus_width = GLAMO_BASIC_MMC_EN_4BIT_DATA;
699 glamo_reg_set_bit_mask(host, GLAMO_REG_MMC_BASIC,
700 GLAMO_BASIC_MMC_EN_4BIT_DATA |
701 GLAMO_BASIC_MMC_EN_DR_STR0 |
702 GLAMO_BASIC_MMC_EN_DR_STR1,
703 bus_width | sd_drive << 6);
704 }
705
706
707 /*
708 * no physical write protect supported by us
709 */
710 static int glamo_mci_get_ro(struct mmc_host *mmc)
711 {
712 return 0;
713 }
714
715 static struct mmc_host_ops glamo_mci_ops = {
716 .request = glamo_mci_request,
717 .set_ios = glamo_mci_set_ios,
718 .get_ro = glamo_mci_get_ro,
719 };
720
721 static int glamo_mci_probe(struct platform_device *pdev)
722 {
723 struct mmc_host *mmc;
724 struct glamo_mci_host *host;
725 int ret;
726
727 dev_info(&pdev->dev, "glamo_mci driver (C)2007 Openmoko, Inc\n");
728
729 mmc = mmc_alloc_host(sizeof(struct glamo_mci_host), &pdev->dev);
730 if (!mmc) {
731 ret = -ENOMEM;
732 goto probe_out;
733 }
734
735 host = mmc_priv(mmc);
736 host->mmc = mmc;
737 host->pdev = pdev;
738 host->pdata = pdev->dev.platform_data;
739 host->power_mode_current = MMC_POWER_OFF;
740
741 spin_lock_init(&host->lock);
742 INIT_WORK(&host->irq_work, glamo_mci_irq_worker);
743
744 host->regulator = regulator_get(pdev->dev.parent, "SD_3V3");
745 if (!host->regulator) {
746 dev_err(&pdev->dev, "Cannot proceed without regulator.\n");
747 ret = -ENODEV;
748 goto probe_free_host;
749 }
750
751 host->mmio_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
752 if (!host->mmio_mem) {
753 dev_err(&pdev->dev,
754 "failed to get io memory region resouce.\n");
755 ret = -ENOENT;
756 goto probe_regulator_put;
757 }
758
759 host->mmio_mem = request_mem_region(host->mmio_mem->start,
760 resource_size(host->mmio_mem),
761 pdev->name);
762
763 if (!host->mmio_mem) {
764 dev_err(&pdev->dev, "failed to request io memory region.\n");
765 ret = -ENOENT;
766 goto probe_regulator_put;
767 }
768
769 host->mmio_base = ioremap(host->mmio_mem->start,
770 resource_size(host->mmio_mem));
771 if (!host->mmio_base) {
772 dev_err(&pdev->dev, "failed to ioremap() io memory region.\n");
773 ret = -EINVAL;
774 goto probe_free_mem_region_mmio;
775 }
776
777
778 /* Get ahold of our data buffer we use for data in and out on MMC */
779 host->data_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
780 if (!host->data_mem) {
781 dev_err(&pdev->dev,
782 "failed to get io memory region resource.\n");
783 ret = -ENOENT;
784 goto probe_iounmap_mmio;
785 }
786
787 host->data_mem = request_mem_region(host->data_mem->start,
788 resource_size(host->data_mem),
789 pdev->name);
790
791 if (!host->data_mem) {
792 dev_err(&pdev->dev, "failed to request io memory region.\n");
793 ret = -ENOENT;
794 goto probe_iounmap_mmio;
795 }
796 host->data_base = ioremap(host->data_mem->start,
797 resource_size(host->data_mem));
798
799 if (host->data_base == 0) {
800 dev_err(&pdev->dev, "failed to ioremap() io memory region.\n");
801 ret = -EINVAL;
802 goto probe_free_mem_region_data;
803 }
804
805 ret = request_irq(IRQ_GLAMO(GLAMO_IRQIDX_MMC), glamo_mci_irq, IRQF_SHARED,
806 pdev->name, host);
807 if (ret) {
808 dev_err(&pdev->dev, "failed to register irq.\n");
809 goto probe_iounmap_data;
810 }
811
812
813 host->vdd_current = 0;
814 host->clk_rate = 50000000; /* really it's 49152000 */
815 host->clk_div = 16;
816
817 /* explain our host controller capabilities */
818 mmc->ops = &glamo_mci_ops;
819 mmc->ocr_avail = mmc_regulator_get_ocrmask(host->regulator);
820 mmc->caps = MMC_CAP_4_BIT_DATA |
821 MMC_CAP_MMC_HIGHSPEED |
822 MMC_CAP_SD_HIGHSPEED;
823 mmc->f_min = host->clk_rate / 256;
824 mmc->f_max = sd_max_clk;
825
826 mmc->max_blk_count = (1 << 16) - 1; /* GLAMO_REG_MMC_RB_BLKCNT */
827 mmc->max_blk_size = (1 << 12) - 1; /* GLAMO_REG_MMC_RB_BLKLEN */
828 mmc->max_req_size = resource_size(host->data_mem);
829 mmc->max_seg_size = mmc->max_req_size;
830 mmc->max_phys_segs = 128;
831 mmc->max_hw_segs = 128;
832
833 if (mmc->ocr_avail < 0) {
834 dev_warn(&pdev->dev, "Failed to get ocr list for regulator: %d.\n",
835 mmc->ocr_avail);
836 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
837 }
838
839 platform_set_drvdata(pdev, mmc);
840
841 glamo_engine_enable(host->pdata->core, GLAMO_ENGINE_MMC);
842 glamo_engine_reset(host->pdata->core, GLAMO_ENGINE_MMC);
843
844 if ((ret = mmc_add_host(mmc))) {
845 dev_err(&pdev->dev, "failed to add mmc host.\n");
846 goto probe_freeirq;
847 }
848
849 writew((u16)(host->data_mem->start),
850 host->mmio_base + GLAMO_REG_MMC_WDATADS1);
851 writew((u16)((host->data_mem->start) >> 16),
852 host->mmio_base + GLAMO_REG_MMC_WDATADS2);
853
854 writew((u16)host->data_mem->start, host->mmio_base +
855 GLAMO_REG_MMC_RDATADS1);
856 writew((u16)(host->data_mem->start >> 16), host->mmio_base +
857 GLAMO_REG_MMC_RDATADS2);
858
859 dev_info(&pdev->dev,"initialisation done.\n");
860 return 0;
861
862 probe_freeirq:
863 free_irq(IRQ_GLAMO(GLAMO_IRQIDX_MMC), host);
864 probe_iounmap_data:
865 iounmap(host->data_base);
866 probe_free_mem_region_data:
867 release_mem_region(host->data_mem->start, resource_size(host->data_mem));
868 probe_iounmap_mmio:
869 iounmap(host->mmio_base);
870 probe_free_mem_region_mmio:
871 release_mem_region(host->mmio_mem->start, resource_size(host->mmio_mem));
872 probe_regulator_put:
873 regulator_put(host->regulator);
874 probe_free_host:
875 mmc_free_host(mmc);
876 probe_out:
877 return ret;
878 }
879
880 static int glamo_mci_remove(struct platform_device *pdev)
881 {
882 struct mmc_host *mmc = platform_get_drvdata(pdev);
883 struct glamo_mci_host *host = mmc_priv(mmc);
884
885 free_irq(IRQ_GLAMO(GLAMO_IRQIDX_MMC), host);
886
887 mmc_remove_host(mmc);
888 iounmap(host->mmio_base);
889 iounmap(host->data_base);
890 release_mem_region(host->mmio_mem->start, resource_size(host->mmio_mem));
891 release_mem_region(host->data_mem->start, resource_size(host->data_mem));
892
893 regulator_put(host->regulator);
894
895 mmc_free_host(mmc);
896
897 glamo_engine_disable(host->pdata->core, GLAMO_ENGINE_MMC);
898 return 0;
899 }
900
901
902 #ifdef CONFIG_PM
903
904 static int glamo_mci_suspend(struct platform_device *dev, pm_message_t state)
905 {
906 struct mmc_host *mmc = platform_get_drvdata(dev);
907 struct glamo_mci_host *host = mmc_priv(mmc);
908 int ret;
909
910 cancel_work_sync(&host->irq_work);
911
912 /*
913 * possible workaround for SD corruption during suspend - resume
914 * make sure the clock was running during suspend and consequently
915 * resume
916 */
917 glamo_mci_fix_card_div(host, host->clk_div);
918
919 /* we are going to do more commands to override this in
920 * mmc_suspend_host(), so we need to change sd_idleclk for the
921 * duration as well
922 */
923 suspend_sd_idleclk = sd_idleclk;
924 sd_idleclk = 1;
925
926 ret = mmc_suspend_host(mmc, state);
927
928 host->suspending++;
929
930 return ret;
931 }
932
933 int glamo_mci_resume(struct platform_device *dev)
934 {
935 struct mmc_host *mmc = platform_get_drvdata(dev);
936 struct glamo_mci_host *host = mmc_priv(mmc);
937 int ret;
938
939 sd_idleclk = 1;
940
941 glamo_engine_enable(host->pdata->core, GLAMO_ENGINE_MMC);
942 glamo_engine_reset(host->pdata->core, GLAMO_ENGINE_MMC);
943
944 host->suspending--;
945
946 ret = mmc_resume_host(mmc);
947
948 /* put sd_idleclk back to pre-suspend state */
949 sd_idleclk = suspend_sd_idleclk;
950
951 return ret;
952 }
953 EXPORT_SYMBOL_GPL(glamo_mci_resume);
954
955 #else /* CONFIG_PM */
956 #define glamo_mci_suspend NULL
957 #define glamo_mci_resume NULL
958 #endif /* CONFIG_PM */
959
960
961 static struct platform_driver glamo_mci_driver =
962 {
963 .driver.name = "glamo-mci",
964 .probe = glamo_mci_probe,
965 .remove = glamo_mci_remove,
966 .suspend = glamo_mci_suspend,
967 .resume = glamo_mci_resume,
968 };
969
970 static int __init glamo_mci_init(void)
971 {
972 platform_driver_register(&glamo_mci_driver);
973 return 0;
974 }
975
976 static void __exit glamo_mci_exit(void)
977 {
978 platform_driver_unregister(&glamo_mci_driver);
979 }
980
981 module_init(glamo_mci_init);
982 module_exit(glamo_mci_exit);
983
984 MODULE_DESCRIPTION("Glamo MMC/SD Card Interface driver");
985 MODULE_LICENSE("GPL");
986 MODULE_AUTHOR("Andy Green <andy@openmoko.com>");
This page took 0.115278 seconds and 5 git commands to generate.