[package] ep80579 depend on their corresponding subtarget
[openwrt.git] / target / linux / cns3xxx / patches-2.6.31 / 102-cns3xxx_ata_support.patch
1 --- /dev/null
2 +++ b/drivers/ata/cns3xxx_ahci.c
3 @@ -0,0 +1,3281 @@
4 +/*
5 + * ahci.c - AHCI SATA support
6 + *
7 + * Maintained by: Jeff Garzik <jgarzik@pobox.com>
8 + * Please ALWAYS copy linux-ide@vger.kernel.org
9 + * on emails.
10 + *
11 + * Copyright 2004-2005 Red Hat, Inc.
12 + *
13 + *
14 + * This program is free software; you can redistribute it and/or modify
15 + * it under the terms of the GNU General Public License as published by
16 + * the Free Software Foundation; either version 2, or (at your option)
17 + * any later version.
18 + *
19 + * This program is distributed in the hope that it will be useful,
20 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 + * GNU General Public License for more details.
23 + *
24 + * You should have received a copy of the GNU General Public License
25 + * along with this program; see the file COPYING. If not, write to
26 + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 + *
28 + *
29 + * libata documentation is available via 'make {ps|pdf}docs',
30 + * as Documentation/DocBook/libata.*
31 + *
32 + * AHCI hardware documentation:
33 + * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
34 + * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
35 + *
36 + */
37 +/*
38 + * Cavium CNS3XXX notice
39 + * This driver is copy from ahci, and this driver only modify memory access function.
40 + * Let the driver support non-PCI device
41 + */
42 +#include <linux/kernel.h>
43 +#include <linux/module.h>
44 +#include <linux/pci.h>
45 +#include <linux/init.h>
46 +#include <linux/blkdev.h>
47 +#include <linux/delay.h>
48 +#include <linux/interrupt.h>
49 +#include <linux/dma-mapping.h>
50 +#include <linux/device.h>
51 +#include <linux/dmi.h>
52 +#include <scsi/scsi_host.h>
53 +#include <scsi/scsi_cmnd.h>
54 +#include <linux/libata.h>
55 +#include <linux/platform_device.h>
56 +#include <mach/pm.h>
57 +#include <mach/misc.h>
58 +
59 +#define DRV_NAME "cns3xxx_ahci"
60 +#define DRV_VERSION "3.0"
61 +
62 +#define MISC_REG_VALUE(offset) (*((volatile unsigned int *)(CNS3XXX_MISC_BASE_VIRT+offset)))
63 +#define CNS3XXX_MISC_REGISTER MISC_REG_VALUE(0x514)
64 +#define AHCI_REG_VALUE(offset) (*((volatile unsigned int *)(CNS3XXX_SATA2_BASE_VIRT+offset)))
65 +#define CNS3XXX_AHCI_HOSTCTL_REG AHCI_REG_VALUE(0x04)
66 +
67 +/* Enclosure Management Control */
68 +#define EM_CTRL_MSG_TYPE 0x000f0000
69 +
70 +/* Enclosure Management LED Message Type */
71 +#define EM_MSG_LED_HBA_PORT 0x0000000f
72 +#define EM_MSG_LED_PMP_SLOT 0x0000ff00
73 +#define EM_MSG_LED_VALUE 0xffff0000
74 +#define EM_MSG_LED_VALUE_ACTIVITY 0x00070000
75 +#define EM_MSG_LED_VALUE_OFF 0xfff80000
76 +#define EM_MSG_LED_VALUE_ON 0x00010000
77 +
78 +/* PHY Misc Define */
79 +#define MISC_SATA_POWER_MODE MISC_MEM_MAP_VALUE(0x310)
80 +#define MISC_SATA_CORE_ID MISC_MEM_MAP_VALUE(0x600)
81 +#define MISC_SATA_PORT0_PHY_CFG MISC_MEM_MAP_VALUE(0x604)
82 +#define MISC_SATA_PORT1_PHY_CFG MISC_MEM_MAP_VALUE(0x608)
83 +#define MISC_SATA_PORT0_PHY_TST MISC_MEM_MAP_VALUE(0x60C)
84 +#define MISC_SATA_PORT1_PHY_TST MISC_MEM_MAP_VALUE(0x610)
85 +
86 +
87 +static int ahci_skip_host_reset;
88 +static int ahci_ignore_sss;
89 +
90 +module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
91 +MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
92 +
93 +module_param_named(ignore_sss, ahci_ignore_sss, int, 0444);
94 +MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
95 +
96 +static int ahci_enable_alpm(struct ata_port *ap,
97 + enum link_pm policy);
98 +static void ahci_disable_alpm(struct ata_port *ap);
99 +static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
100 +static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
101 + size_t size);
102 +static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
103 + ssize_t size);
104 +
105 +enum {
106 + AHCI_PCI_BAR = 5,
107 + AHCI_MAX_PORTS = 32,
108 + AHCI_MAX_SG = 168, /* hardware max is 64K */
109 + AHCI_DMA_BOUNDARY = 0xffffffff,
110 + AHCI_MAX_CMDS = 32,
111 + AHCI_CMD_SZ = 32,
112 + AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
113 + AHCI_RX_FIS_SZ = 256,
114 + AHCI_CMD_TBL_CDB = 0x40,
115 + AHCI_CMD_TBL_HDR_SZ = 0x80,
116 + AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
117 + AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
118 + AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
119 + AHCI_RX_FIS_SZ,
120 + AHCI_IRQ_ON_SG = (1 << 31),
121 + AHCI_CMD_ATAPI = (1 << 5),
122 + AHCI_CMD_WRITE = (1 << 6),
123 + AHCI_CMD_PREFETCH = (1 << 7),
124 + AHCI_CMD_RESET = (1 << 8),
125 + AHCI_CMD_CLR_BUSY = (1 << 10),
126 +
127 + RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
128 + RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
129 + RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
130 +
131 + board_ahci = 0,
132 + board_ahci_vt8251 = 1,
133 + board_ahci_ign_iferr = 2,
134 + board_ahci_sb600 = 3,
135 + board_ahci_mv = 4,
136 + board_ahci_sb700 = 5, /* for SB700 and SB800 */
137 + board_ahci_mcp65 = 6,
138 + board_ahci_nopmp = 7,
139 + board_ahci_yesncq = 8,
140 +
141 + /* global controller registers */
142 + HOST_CAP = 0x00, /* host capabilities */
143 + HOST_CTL = 0x04, /* global host control */
144 + HOST_IRQ_STAT = 0x08, /* interrupt status */
145 + HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
146 + HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
147 + HOST_EM_LOC = 0x1c, /* Enclosure Management location */
148 + HOST_EM_CTL = 0x20, /* Enclosure Management Control */
149 +
150 + /* HOST_CTL bits */
151 + HOST_RESET = (1 << 0), /* reset controller; self-clear */
152 + HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
153 + HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
154 +
155 + /* HOST_CAP bits */
156 + HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */
157 + HOST_CAP_SSC = (1 << 14), /* Slumber capable */
158 + HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
159 + HOST_CAP_CLO = (1 << 24), /* Command List Override support */
160 + HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
161 + HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
162 + HOST_CAP_SNTF = (1 << 29), /* SNotification register */
163 + HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
164 + HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
165 +
166 + /* registers for each SATA port */
167 + PORT_LST_ADDR = 0x00, /* command list DMA addr */
168 + PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
169 + PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
170 + PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
171 + PORT_IRQ_STAT = 0x10, /* interrupt status */
172 + PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
173 + PORT_CMD = 0x18, /* port command */
174 + PORT_TFDATA = 0x20, /* taskfile data */
175 + PORT_SIG = 0x24, /* device TF signature */
176 + PORT_CMD_ISSUE = 0x38, /* command issue */
177 + PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
178 + PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
179 + PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
180 + PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
181 + PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
182 +
183 + /* PORT_IRQ_{STAT,MASK} bits */
184 + PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
185 + PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
186 + PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
187 + PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
188 + PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
189 + PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
190 + PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
191 + PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
192 +
193 + PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
194 + PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
195 + PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
196 + PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
197 + PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
198 + PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
199 + PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
200 + PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
201 + PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
202 +
203 + PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
204 + PORT_IRQ_IF_ERR |
205 + PORT_IRQ_CONNECT |
206 + PORT_IRQ_PHYRDY |
207 + PORT_IRQ_UNK_FIS |
208 + PORT_IRQ_BAD_PMP,
209 + PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
210 + PORT_IRQ_TF_ERR |
211 + PORT_IRQ_HBUS_DATA_ERR,
212 + DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
213 + PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
214 + PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
215 +
216 + /* PORT_CMD bits */
217 + PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
218 + PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
219 + PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
220 + PORT_CMD_PMP = (1 << 17), /* PMP attached */
221 + PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
222 + PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
223 + PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
224 + PORT_CMD_CLO = (1 << 3), /* Command list override */
225 + PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
226 + PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
227 + PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
228 +
229 + PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
230 + PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
231 + PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
232 + PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
233 +
234 + /* hpriv->flags bits */
235 + AHCI_HFLAG_NO_NCQ = (1 << 0),
236 + AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
237 + AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
238 + AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
239 + AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
240 + AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
241 + AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
242 + AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
243 + AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
244 + AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
245 + AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */
246 + AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as
247 + link offline */
248 +
249 + /* ap->flags bits */
250 +
251 + AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
252 + ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
253 + ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
254 + ATA_FLAG_IPM,
255 +
256 + ICH_MAP = 0x90, /* ICH MAP register */
257 +
258 + /* em constants */
259 + EM_MAX_SLOTS = 8,
260 + EM_MAX_RETRY = 5,
261 +
262 + /* em_ctl bits */
263 + EM_CTL_RST = (1 << 9), /* Reset */
264 + EM_CTL_TM = (1 << 8), /* Transmit Message */
265 + EM_CTL_ALHD = (1 << 26), /* Activity LED */
266 +
267 + /* CNS3XXX define */
268 + HOST_TIMER1MS = 0xe0, /* Timer 1ms register */
269 +};
270 +
271 +struct ahci_cmd_hdr {
272 + __le32 opts;
273 + __le32 status;
274 + __le32 tbl_addr;
275 + __le32 tbl_addr_hi;
276 + __le32 reserved[4];
277 +};
278 +
279 +struct ahci_sg {
280 + __le32 addr;
281 + __le32 addr_hi;
282 + __le32 reserved;
283 + __le32 flags_size;
284 +};
285 +
286 +struct ahci_em_priv {
287 + enum sw_activity blink_policy;
288 + struct timer_list timer;
289 + unsigned long saved_activity;
290 + unsigned long activity;
291 + unsigned long led_state;
292 +};
293 +
294 +struct ahci_host_priv {
295 + unsigned int flags; /* AHCI_HFLAG_* */
296 + u32 cap; /* cap to use */
297 + u32 port_map; /* port map to use */
298 + u32 saved_cap; /* saved initial cap */
299 + u32 saved_port_map; /* saved initial port_map */
300 + u32 em_loc; /* enclosure management location */
301 +};
302 +
303 +struct ahci_port_priv {
304 + struct ata_link *active_link;
305 + struct ahci_cmd_hdr *cmd_slot;
306 + dma_addr_t cmd_slot_dma;
307 + void *cmd_tbl;
308 + dma_addr_t cmd_tbl_dma;
309 + void *rx_fis;
310 + dma_addr_t rx_fis_dma;
311 + /* for NCQ spurious interrupt analysis */
312 + unsigned int ncq_saw_d2h:1;
313 + unsigned int ncq_saw_dmas:1;
314 + unsigned int ncq_saw_sdb:1;
315 + u32 intr_mask; /* interrupts to enable */
316 + /* enclosure management info per PM slot */
317 + struct ahci_em_priv em_priv[EM_MAX_SLOTS];
318 +};
319 +
320 +static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
321 +static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
322 +#if 0
323 +static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
324 +#else
325 +static int ahci_probe(struct platform_device *pdev);
326 +static int ahci_remove(struct platform_device *pdev);
327 +#endif
328 +static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
329 +static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
330 +static int ahci_port_start(struct ata_port *ap);
331 +static void ahci_port_stop(struct ata_port *ap);
332 +static void ahci_qc_prep(struct ata_queued_cmd *qc);
333 +static void ahci_freeze(struct ata_port *ap);
334 +static void ahci_thaw(struct ata_port *ap);
335 +static void ahci_pmp_attach(struct ata_port *ap);
336 +static void ahci_pmp_detach(struct ata_port *ap);
337 +static int ahci_softreset(struct ata_link *link, unsigned int *class,
338 + unsigned long deadline);
339 +static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
340 + unsigned long deadline);
341 +static int ahci_hardreset(struct ata_link *link, unsigned int *class,
342 + unsigned long deadline);
343 +static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
344 + unsigned long deadline);
345 +#if 0
346 +static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
347 + unsigned long deadline);
348 +#endif
349 +static void ahci_postreset(struct ata_link *link, unsigned int *class);
350 +static void ahci_error_handler(struct ata_port *ap);
351 +static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
352 +static int ahci_port_resume(struct ata_port *ap);
353 +static void ahci_dev_config(struct ata_device *dev);
354 +static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
355 + u32 opts);
356 +#ifdef CONFIG_PM
357 +static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
358 +static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
359 +static int ahci_pci_device_resume(struct pci_dev *pdev);
360 +#endif
361 +static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
362 +static ssize_t ahci_activity_store(struct ata_device *dev,
363 + enum sw_activity val);
364 +static void ahci_init_sw_activity(struct ata_link *link);
365 +
366 +static struct device_attribute *ahci_shost_attrs[] = {
367 + &dev_attr_link_power_management_policy,
368 + &dev_attr_em_message_type,
369 + &dev_attr_em_message,
370 + NULL
371 +};
372 +
373 +static struct device_attribute *ahci_sdev_attrs[] = {
374 + &dev_attr_sw_activity,
375 + &dev_attr_unload_heads,
376 + NULL
377 +};
378 +
379 +static struct scsi_host_template ahci_sht = {
380 + ATA_NCQ_SHT(DRV_NAME),
381 + .can_queue = AHCI_MAX_CMDS - 1,
382 + .sg_tablesize = AHCI_MAX_SG,
383 + .dma_boundary = AHCI_DMA_BOUNDARY,
384 + .shost_attrs = ahci_shost_attrs,
385 + .sdev_attrs = ahci_sdev_attrs,
386 +};
387 +
388 +static struct ata_port_operations ahci_ops = {
389 + .inherits = &sata_pmp_port_ops,
390 +
391 + .qc_defer = sata_pmp_qc_defer_cmd_switch,
392 + .qc_prep = ahci_qc_prep,
393 + .qc_issue = ahci_qc_issue,
394 + .qc_fill_rtf = ahci_qc_fill_rtf,
395 +
396 + .freeze = ahci_freeze,
397 + .thaw = ahci_thaw,
398 + .softreset = ahci_softreset,
399 + .hardreset = ahci_hardreset,
400 + .postreset = ahci_postreset,
401 + .pmp_softreset = ahci_softreset,
402 + .error_handler = ahci_error_handler,
403 + .post_internal_cmd = ahci_post_internal_cmd,
404 + .dev_config = ahci_dev_config,
405 +
406 + .scr_read = ahci_scr_read,
407 + .scr_write = ahci_scr_write,
408 + .pmp_attach = ahci_pmp_attach,
409 + .pmp_detach = ahci_pmp_detach,
410 +
411 + .enable_pm = ahci_enable_alpm,
412 + .disable_pm = ahci_disable_alpm,
413 + .em_show = ahci_led_show,
414 + .em_store = ahci_led_store,
415 + .sw_activity_show = ahci_activity_show,
416 + .sw_activity_store = ahci_activity_store,
417 +#ifdef CONFIG_PM
418 + .port_suspend = ahci_port_suspend,
419 + .port_resume = ahci_port_resume,
420 +#endif
421 + .port_start = ahci_port_start,
422 + .port_stop = ahci_port_stop,
423 +};
424 +
425 +static struct ata_port_operations ahci_vt8251_ops = {
426 + .inherits = &ahci_ops,
427 + .hardreset = ahci_vt8251_hardreset,
428 +};
429 +
430 +#if 0
431 +static struct ata_port_operations ahci_p5wdh_ops = {
432 + .inherits = &ahci_ops,
433 + .hardreset = ahci_p5wdh_hardreset,
434 +};
435 +#endif
436 +
437 +static struct ata_port_operations ahci_sb600_ops = {
438 + .inherits = &ahci_ops,
439 + .softreset = ahci_sb600_softreset,
440 + .pmp_softreset = ahci_sb600_softreset,
441 +};
442 +
443 +#define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
444 +
445 +static const struct ata_port_info ahci_port_info[] = {
446 + [board_ahci] =
447 + {
448 + .flags = AHCI_FLAG_COMMON,
449 + .pio_mask = ATA_PIO4,
450 + .udma_mask = ATA_UDMA6,
451 + .port_ops = &ahci_ops,
452 + },
453 + [board_ahci_vt8251] =
454 + {
455 + AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
456 + .flags = AHCI_FLAG_COMMON,
457 + .pio_mask = ATA_PIO4,
458 + .udma_mask = ATA_UDMA6,
459 + .port_ops = &ahci_vt8251_ops,
460 + },
461 + [board_ahci_ign_iferr] =
462 + {
463 + AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
464 + .flags = AHCI_FLAG_COMMON,
465 + .pio_mask = ATA_PIO4,
466 + .udma_mask = ATA_UDMA6,
467 + .port_ops = &ahci_ops,
468 + },
469 + [board_ahci_sb600] =
470 + {
471 + AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
472 + AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255),
473 + .flags = AHCI_FLAG_COMMON,
474 + .pio_mask = ATA_PIO4,
475 + .udma_mask = ATA_UDMA6,
476 + .port_ops = &ahci_sb600_ops,
477 + },
478 + [board_ahci_mv] =
479 + {
480 + AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
481 + AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
482 + .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
483 + ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
484 + .pio_mask = ATA_PIO4,
485 + .udma_mask = ATA_UDMA6,
486 + .port_ops = &ahci_ops,
487 + },
488 + [board_ahci_sb700] = /* for SB700 and SB800 */
489 + {
490 + AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL),
491 + .flags = AHCI_FLAG_COMMON,
492 + .pio_mask = ATA_PIO4,
493 + .udma_mask = ATA_UDMA6,
494 + .port_ops = &ahci_sb600_ops,
495 + },
496 + [board_ahci_mcp65] =
497 + {
498 + AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
499 + .flags = AHCI_FLAG_COMMON,
500 + .pio_mask = ATA_PIO4,
501 + .udma_mask = ATA_UDMA6,
502 + .port_ops = &ahci_ops,
503 + },
504 + [board_ahci_nopmp] =
505 + {
506 + AHCI_HFLAGS (AHCI_HFLAG_NO_PMP),
507 + .flags = AHCI_FLAG_COMMON,
508 + .pio_mask = ATA_PIO4,
509 + .udma_mask = ATA_UDMA6,
510 + .port_ops = &ahci_ops,
511 + },
512 + /* board_ahci_yesncq */
513 + {
514 + AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
515 + .flags = AHCI_FLAG_COMMON,
516 + .pio_mask = ATA_PIO4,
517 + .udma_mask = ATA_UDMA6,
518 + .port_ops = &ahci_ops,
519 + },
520 +};
521 +
522 +static const struct pci_device_id ahci_pci_tbl[] = {
523 + /* Intel */
524 + { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
525 + { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
526 + { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
527 + { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
528 + { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
529 + { PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */
530 + { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
531 + { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
532 + { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
533 + { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
534 + { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
535 + { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */
536 + { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
537 + { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
538 + { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
539 + { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
540 + { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
541 + { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
542 + { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
543 + { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
544 + { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
545 + { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
546 + { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
547 + { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */
548 + { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
549 + { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
550 + { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
551 + { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
552 + { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
553 + { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
554 + { PCI_VDEVICE(INTEL, 0x3a22), board_ahci }, /* ICH10 */
555 + { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
556 + { PCI_VDEVICE(INTEL, 0x3b22), board_ahci }, /* PCH AHCI */
557 + { PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */
558 + { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
559 + { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
560 + { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH AHCI */
561 + { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
562 + { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
563 + { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
564 +
565 + /* JMicron 360/1/3/5/6, match class to avoid IDE function */
566 + { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
567 + PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
568 +
569 + /* ATI */
570 + { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
571 + { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */
572 + { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */
573 + { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */
574 + { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */
575 + { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
576 + { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
577 +
578 + /* VIA */
579 + { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
580 + { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
581 +
582 + /* NVIDIA */
583 + { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci_mcp65 }, /* MCP65 */
584 + { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci_mcp65 }, /* MCP65 */
585 + { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci_mcp65 }, /* MCP65 */
586 + { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci_mcp65 }, /* MCP65 */
587 + { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci_mcp65 }, /* MCP65 */
588 + { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */
589 + { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */
590 + { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */
591 + { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_yesncq }, /* MCP67 */
592 + { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_yesncq }, /* MCP67 */
593 + { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_yesncq }, /* MCP67 */
594 + { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_yesncq }, /* MCP67 */
595 + { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_yesncq }, /* MCP67 */
596 + { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_yesncq }, /* MCP67 */
597 + { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_yesncq }, /* MCP67 */
598 + { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_yesncq }, /* MCP67 */
599 + { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_yesncq }, /* MCP67 */
600 + { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_yesncq }, /* MCP67 */
601 + { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_yesncq }, /* MCP67 */
602 + { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_yesncq }, /* MCP67 */
603 + { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_yesncq }, /* MCP73 */
604 + { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_yesncq }, /* MCP73 */
605 + { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_yesncq }, /* MCP73 */
606 + { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_yesncq }, /* MCP73 */
607 + { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_yesncq }, /* MCP73 */
608 + { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_yesncq }, /* MCP73 */
609 + { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_yesncq }, /* MCP73 */
610 + { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_yesncq }, /* MCP73 */
611 + { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_yesncq }, /* MCP73 */
612 + { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_yesncq }, /* MCP73 */
613 + { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_yesncq }, /* MCP73 */
614 + { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_yesncq }, /* MCP73 */
615 + { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */
616 + { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */
617 + { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */
618 + { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci }, /* MCP77 */
619 + { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci }, /* MCP77 */
620 + { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci }, /* MCP77 */
621 + { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci }, /* MCP77 */
622 + { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci }, /* MCP77 */
623 + { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci }, /* MCP77 */
624 + { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */
625 + { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */
626 + { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */
627 + { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci }, /* MCP79 */
628 + { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci }, /* MCP79 */
629 + { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci }, /* MCP79 */
630 + { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci }, /* MCP79 */
631 + { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */
632 + { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */
633 + { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */
634 + { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci }, /* MCP79 */
635 + { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci }, /* MCP79 */
636 + { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */
637 + { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */
638 + { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */
639 + { PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci }, /* MCP89 */
640 + { PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci }, /* MCP89 */
641 + { PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci }, /* MCP89 */
642 + { PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci }, /* MCP89 */
643 + { PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci }, /* MCP89 */
644 + { PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci }, /* MCP89 */
645 + { PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci }, /* MCP89 */
646 + { PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci }, /* MCP89 */
647 + { PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci }, /* MCP89 */
648 + { PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci }, /* MCP89 */
649 + { PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci }, /* MCP89 */
650 + { PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci }, /* MCP89 */
651 +
652 + /* SiS */
653 + { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
654 + { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 968 */
655 + { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
656 +
657 + /* Marvell */
658 + { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
659 + { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
660 +
661 + /* Promise */
662 + { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
663 +
664 + /* Generic, PCI class code for AHCI */
665 + { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
666 + PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
667 +
668 + { } /* terminate list */
669 +};
670 +
671 +
672 +#if 0
673 +static struct pci_driver ahci_pci_driver = {
674 + .name = DRV_NAME,
675 + .id_table = ahci_pci_tbl,
676 + .probe = ahci_init_one,
677 + .remove = ata_pci_remove_one,
678 +#ifdef CONFIG_PM
679 + .suspend = ahci_pci_device_suspend,
680 + .resume = ahci_pci_device_resume,
681 +#endif
682 +};
683 +#else
684 +static struct platform_driver ahci_driver = {
685 + .probe = ahci_probe,
686 + .remove = __devexit_p(ahci_remove),
687 + .driver = {
688 + .name = DRV_NAME,
689 + .owner = THIS_MODULE,
690 + },
691 +};
692 +#endif
693 +
694 +static int ahci_em_messages = 1;
695 +module_param(ahci_em_messages, int, 0444);
696 +/* add other LED protocol types when they become supported */
697 +MODULE_PARM_DESC(ahci_em_messages,
698 + "Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED");
699 +
700 +#if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE)
701 +static int marvell_enable;
702 +#else
703 +static int marvell_enable = 1;
704 +#endif
705 +module_param(marvell_enable, int, 0644);
706 +MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
707 +
708 +
709 +static inline int ahci_nr_ports(u32 cap)
710 +{
711 + return (cap & 0x1f) + 1;
712 +}
713 +
714 +static inline void __iomem *__ahci_port_base(struct ata_host *host,
715 + unsigned int port_no)
716 +{
717 +#if 0
718 + void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
719 +#else
720 + void __iomem *mmio = (void __iomem *)host->iomap;//[AHCI_BAR];
721 +#endif
722 +
723 + return mmio + 0x100 + (port_no * 0x80);
724 +}
725 +
726 +static inline void __iomem *ahci_port_base(struct ata_port *ap)
727 +{
728 + return __ahci_port_base(ap->host, ap->port_no);
729 +}
730 +
731 +static void ahci_enable_ahci(void __iomem *mmio)
732 +{
733 + int i;
734 + u32 tmp;
735 +
736 + /* turn on AHCI_EN */
737 + tmp = readl(mmio + HOST_CTL);
738 + if (tmp & HOST_AHCI_EN)
739 + return;
740 +
741 + /* Some controllers need AHCI_EN to be written multiple times.
742 + * Try a few times before giving up.
743 + */
744 + for (i = 0; i < 5; i++) {
745 + tmp |= HOST_AHCI_EN;
746 + writel(tmp, mmio + HOST_CTL);
747 + tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
748 + if (tmp & HOST_AHCI_EN)
749 + return;
750 + msleep(10);
751 + }
752 +
753 + WARN_ON(1);
754 +}
755 +
756 +/**
757 + * ahci_save_initial_config - Save and fixup initial config values
758 + * @pdev: target PCI device
759 + * @hpriv: host private area to store config values
760 + *
761 + * Some registers containing configuration info might be setup by
762 + * BIOS and might be cleared on reset. This function saves the
763 + * initial values of those registers into @hpriv such that they
764 + * can be restored after controller reset.
765 + *
766 + * If inconsistent, config values are fixed up by this function.
767 + *
768 + * LOCKING:
769 + * None.
770 + */
771 +#if 0
772 +static void ahci_save_initial_config(struct pci_dev *pdev,
773 + struct ahci_host_priv *hpriv)
774 +#else
775 +static void ahci_save_initial_config(struct platform_device *pdev,
776 + struct ahci_host_priv *hpriv,
777 + u8 * base)
778 +#endif
779 +{
780 +#if 0
781 + void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
782 +#else
783 + void __iomem *mmio = (void __iomem *)base;
784 +#endif
785 + u32 cap, port_map;
786 + int i;
787 +#if 0
788 + int mv;
789 +#endif
790 +
791 + /* make sure AHCI mode is enabled before accessing CAP */
792 + ahci_enable_ahci(mmio);
793 +
794 + /* Values prefixed with saved_ are written back to host after
795 + * reset. Values without are used for driver operation.
796 + */
797 + hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
798 + hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
799 +
800 + /* some chips have errata preventing 64bit use */
801 + if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
802 + dev_printk(KERN_INFO, &pdev->dev,
803 + "controller can't do 64bit DMA, forcing 32bit\n");
804 + cap &= ~HOST_CAP_64;
805 + }
806 +
807 + if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
808 + dev_printk(KERN_INFO, &pdev->dev,
809 + "controller can't do NCQ, turning off CAP_NCQ\n");
810 + cap &= ~HOST_CAP_NCQ;
811 + }
812 +
813 + if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
814 + dev_printk(KERN_INFO, &pdev->dev,
815 + "controller can do NCQ, turning on CAP_NCQ\n");
816 + cap |= HOST_CAP_NCQ;
817 + }
818 +
819 + if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
820 + dev_printk(KERN_INFO, &pdev->dev,
821 + "controller can't do PMP, turning off CAP_PMP\n");
822 + cap &= ~HOST_CAP_PMP;
823 + }
824 +#if 0
825 + if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 &&
826 + port_map != 1) {
827 + dev_printk(KERN_INFO, &pdev->dev,
828 + "JMB361 has only one port, port_map 0x%x -> 0x%x\n",
829 + port_map, 1);
830 + port_map = 1;
831 + }
832 +
833 + /*
834 + * Temporary Marvell 6145 hack: PATA port presence
835 + * is asserted through the standard AHCI port
836 + * presence register, as bit 4 (counting from 0)
837 + */
838 + if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
839 + if (pdev->device == 0x6121)
840 + mv = 0x3;
841 + else
842 + mv = 0xf;
843 + dev_printk(KERN_ERR, &pdev->dev,
844 + "MV_AHCI HACK: port_map %x -> %x\n",
845 + port_map,
846 + port_map & mv);
847 + dev_printk(KERN_ERR, &pdev->dev,
848 + "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
849 +
850 + port_map &= mv;
851 + }
852 +#endif
853 +
854 + /* cross check port_map and cap.n_ports */
855 + if (port_map) {
856 + int map_ports = 0;
857 +
858 + for (i = 0; i < AHCI_MAX_PORTS; i++)
859 + if (port_map & (1 << i))
860 + map_ports++;
861 +
862 + /* If PI has more ports than n_ports, whine, clear
863 + * port_map and let it be generated from n_ports.
864 + */
865 + if (map_ports > ahci_nr_ports(cap)) {
866 + dev_printk(KERN_WARNING, &pdev->dev,
867 + "implemented port map (0x%x) contains more "
868 + "ports than nr_ports (%u), using nr_ports\n",
869 + port_map, ahci_nr_ports(cap));
870 + port_map = 0;
871 + }
872 + }
873 +
874 + /* fabricate port_map from cap.nr_ports */
875 + if (!port_map) {
876 + port_map = (1 << ahci_nr_ports(cap)) - 1;
877 + dev_printk(KERN_WARNING, &pdev->dev,
878 + "forcing PORTS_IMPL to 0x%x\n", port_map);
879 +
880 + /* write the fixed up value to the PI register */
881 + hpriv->saved_port_map = port_map;
882 + }
883 +
884 + /* record values to use during operation */
885 + hpriv->cap = cap;
886 + hpriv->port_map = port_map;
887 +}
888 +
889 +/**
890 + * ahci_restore_initial_config - Restore initial config
891 + * @host: target ATA host
892 + *
893 + * Restore initial config stored by ahci_save_initial_config().
894 + *
895 + * LOCKING:
896 + * None.
897 + */
898 +static void ahci_restore_initial_config(struct ata_host *host)
899 +{
900 + struct ahci_host_priv *hpriv = host->private_data;
901 +#if 0
902 + void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
903 +#else
904 + void __iomem *mmio = (void __iomem *)host->iomap;//[AHCI_BAR];
905 +#endif
906 +
907 + writel(hpriv->saved_cap, mmio + HOST_CAP);
908 + writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
909 + (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
910 +}
911 +
912 +static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
913 +{
914 + static const int offset[] = {
915 + [SCR_STATUS] = PORT_SCR_STAT,
916 + [SCR_CONTROL] = PORT_SCR_CTL,
917 + [SCR_ERROR] = PORT_SCR_ERR,
918 + [SCR_ACTIVE] = PORT_SCR_ACT,
919 + [SCR_NOTIFICATION] = PORT_SCR_NTF,
920 + };
921 + struct ahci_host_priv *hpriv = ap->host->private_data;
922 +
923 + if (sc_reg < ARRAY_SIZE(offset) &&
924 + (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
925 + return offset[sc_reg];
926 + return 0;
927 +}
928 +
929 +static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
930 +{
931 + void __iomem *port_mmio = ahci_port_base(link->ap);
932 + int offset = ahci_scr_offset(link->ap, sc_reg);
933 +
934 + if (offset) {
935 + *val = readl(port_mmio + offset);
936 + return 0;
937 + }
938 + return -EINVAL;
939 +}
940 +
941 +static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
942 +{
943 + void __iomem *port_mmio = ahci_port_base(link->ap);
944 + int offset = ahci_scr_offset(link->ap, sc_reg);
945 +
946 + if (offset) {
947 + writel(val, port_mmio + offset);
948 + return 0;
949 + }
950 + return -EINVAL;
951 +}
952 +
953 +static void ahci_start_engine(struct ata_port *ap)
954 +{
955 + void __iomem *port_mmio = ahci_port_base(ap);
956 + u32 tmp;
957 +
958 + /* start DMA */
959 + tmp = readl(port_mmio + PORT_CMD);
960 + tmp |= PORT_CMD_START;
961 + writel(tmp, port_mmio + PORT_CMD);
962 + readl(port_mmio + PORT_CMD); /* flush */
963 +}
964 +
965 +static int ahci_stop_engine(struct ata_port *ap)
966 +{
967 + void __iomem *port_mmio = ahci_port_base(ap);
968 + u32 tmp;
969 +
970 + tmp = readl(port_mmio + PORT_CMD);
971 +
972 + /* check if the HBA is idle */
973 + if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
974 + return 0;
975 +
976 + /* setting HBA to idle */
977 + tmp &= ~PORT_CMD_START;
978 + writel(tmp, port_mmio + PORT_CMD);
979 +
980 + /* wait for engine to stop. This could be as long as 500 msec */
981 + tmp = ata_wait_register(port_mmio + PORT_CMD,
982 + PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
983 + if (tmp & PORT_CMD_LIST_ON)
984 + return -EIO;
985 +
986 + return 0;
987 +}
988 +
989 +static void ahci_start_fis_rx(struct ata_port *ap)
990 +{
991 + void __iomem *port_mmio = ahci_port_base(ap);
992 + struct ahci_host_priv *hpriv = ap->host->private_data;
993 + struct ahci_port_priv *pp = ap->private_data;
994 + u32 tmp;
995 +
996 + /* set FIS registers */
997 + if (hpriv->cap & HOST_CAP_64)
998 + writel((pp->cmd_slot_dma >> 16) >> 16,
999 + port_mmio + PORT_LST_ADDR_HI);
1000 + writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
1001 +
1002 + if (hpriv->cap & HOST_CAP_64)
1003 + writel((pp->rx_fis_dma >> 16) >> 16,
1004 + port_mmio + PORT_FIS_ADDR_HI);
1005 + writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
1006 +
1007 + /* enable FIS reception */
1008 + tmp = readl(port_mmio + PORT_CMD);
1009 + tmp |= PORT_CMD_FIS_RX;
1010 + writel(tmp, port_mmio + PORT_CMD);
1011 +
1012 + /* flush */
1013 + readl(port_mmio + PORT_CMD);
1014 +}
1015 +
1016 +static int ahci_stop_fis_rx(struct ata_port *ap)
1017 +{
1018 + void __iomem *port_mmio = ahci_port_base(ap);
1019 + u32 tmp;
1020 +
1021 + /* disable FIS reception */
1022 + tmp = readl(port_mmio + PORT_CMD);
1023 + tmp &= ~PORT_CMD_FIS_RX;
1024 + writel(tmp, port_mmio + PORT_CMD);
1025 +
1026 + /* wait for completion, spec says 500ms, give it 1000 */
1027 + tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
1028 + PORT_CMD_FIS_ON, 10, 1000);
1029 + if (tmp & PORT_CMD_FIS_ON)
1030 + return -EBUSY;
1031 +
1032 + return 0;
1033 +}
1034 +
1035 +static void ahci_power_up(struct ata_port *ap)
1036 +{
1037 + struct ahci_host_priv *hpriv = ap->host->private_data;
1038 + void __iomem *port_mmio = ahci_port_base(ap);
1039 + u32 cmd;
1040 +
1041 + cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
1042 +
1043 + /* spin up device */
1044 + if (hpriv->cap & HOST_CAP_SSS) {
1045 + cmd |= PORT_CMD_SPIN_UP;
1046 + writel(cmd, port_mmio + PORT_CMD);
1047 + }
1048 +
1049 + /* wake up link */
1050 + writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
1051 +}
1052 +
1053 +static void ahci_disable_alpm(struct ata_port *ap)
1054 +{
1055 + struct ahci_host_priv *hpriv = ap->host->private_data;
1056 + void __iomem *port_mmio = ahci_port_base(ap);
1057 + u32 cmd;
1058 + struct ahci_port_priv *pp = ap->private_data;
1059 +
1060 + /* IPM bits should be disabled by libata-core */
1061 + /* get the existing command bits */
1062 + cmd = readl(port_mmio + PORT_CMD);
1063 +
1064 + /* disable ALPM and ASP */
1065 + cmd &= ~PORT_CMD_ASP;
1066 + cmd &= ~PORT_CMD_ALPE;
1067 +
1068 + /* force the interface back to active */
1069 + cmd |= PORT_CMD_ICC_ACTIVE;
1070 +
1071 + /* write out new cmd value */
1072 + writel(cmd, port_mmio + PORT_CMD);
1073 + cmd = readl(port_mmio + PORT_CMD);
1074 +
1075 + /* wait 10ms to be sure we've come out of any low power state */
1076 + msleep(10);
1077 +
1078 + /* clear out any PhyRdy stuff from interrupt status */
1079 + writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
1080 +
1081 + /* go ahead and clean out PhyRdy Change from Serror too */
1082 + ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
1083 +
1084 + /*
1085 + * Clear flag to indicate that we should ignore all PhyRdy
1086 + * state changes
1087 + */
1088 + hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
1089 +
1090 + /*
1091 + * Enable interrupts on Phy Ready.
1092 + */
1093 + pp->intr_mask |= PORT_IRQ_PHYRDY;
1094 + writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1095 +
1096 + /*
1097 + * don't change the link pm policy - we can be called
1098 + * just to turn of link pm temporarily
1099 + */
1100 +}
1101 +
1102 +static int ahci_enable_alpm(struct ata_port *ap,
1103 + enum link_pm policy)
1104 +{
1105 + struct ahci_host_priv *hpriv = ap->host->private_data;
1106 + void __iomem *port_mmio = ahci_port_base(ap);
1107 + u32 cmd;
1108 + struct ahci_port_priv *pp = ap->private_data;
1109 + u32 asp;
1110 +
1111 + /* Make sure the host is capable of link power management */
1112 + if (!(hpriv->cap & HOST_CAP_ALPM))
1113 + return -EINVAL;
1114 +
1115 + switch (policy) {
1116 + case MAX_PERFORMANCE:
1117 + case NOT_AVAILABLE:
1118 + /*
1119 + * if we came here with NOT_AVAILABLE,
1120 + * it just means this is the first time we
1121 + * have tried to enable - default to max performance,
1122 + * and let the user go to lower power modes on request.
1123 + */
1124 + ahci_disable_alpm(ap);
1125 + return 0;
1126 + case MIN_POWER:
1127 + /* configure HBA to enter SLUMBER */
1128 + asp = PORT_CMD_ASP;
1129 + break;
1130 + case MEDIUM_POWER:
1131 + /* configure HBA to enter PARTIAL */
1132 + asp = 0;
1133 + break;
1134 + default:
1135 + return -EINVAL;
1136 + }
1137 +
1138 + /*
1139 + * Disable interrupts on Phy Ready. This keeps us from
1140 + * getting woken up due to spurious phy ready interrupts
1141 + * TBD - Hot plug should be done via polling now, is
1142 + * that even supported?
1143 + */
1144 + pp->intr_mask &= ~PORT_IRQ_PHYRDY;
1145 + writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1146 +
1147 + /*
1148 + * Set a flag to indicate that we should ignore all PhyRdy
1149 + * state changes since these can happen now whenever we
1150 + * change link state
1151 + */
1152 + hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
1153 +
1154 + /* get the existing command bits */
1155 + cmd = readl(port_mmio + PORT_CMD);
1156 +
1157 + /*
1158 + * Set ASP based on Policy
1159 + */
1160 + cmd |= asp;
1161 +
1162 + /*
1163 + * Setting this bit will instruct the HBA to aggressively
1164 + * enter a lower power link state when it's appropriate and
1165 + * based on the value set above for ASP
1166 + */
1167 + cmd |= PORT_CMD_ALPE;
1168 +
1169 + /* write out new cmd value */
1170 + writel(cmd, port_mmio + PORT_CMD);
1171 + cmd = readl(port_mmio + PORT_CMD);
1172 +
1173 + /* IPM bits should be set by libata-core */
1174 + return 0;
1175 +}
1176 +
1177 +#ifdef CONFIG_PM
1178 +static void ahci_power_down(struct ata_port *ap)
1179 +{
1180 + struct ahci_host_priv *hpriv = ap->host->private_data;
1181 + void __iomem *port_mmio = ahci_port_base(ap);
1182 + u32 cmd, scontrol;
1183 +
1184 + if (!(hpriv->cap & HOST_CAP_SSS))
1185 + return;
1186 +
1187 + /* put device into listen mode, first set PxSCTL.DET to 0 */
1188 + scontrol = readl(port_mmio + PORT_SCR_CTL);
1189 + scontrol &= ~0xf;
1190 + writel(scontrol, port_mmio + PORT_SCR_CTL);
1191 +
1192 + /* then set PxCMD.SUD to 0 */
1193 + cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
1194 + cmd &= ~PORT_CMD_SPIN_UP;
1195 + writel(cmd, port_mmio + PORT_CMD);
1196 +}
1197 +#endif
1198 +
1199 +static void ahci_start_port(struct ata_port *ap)
1200 +{
1201 + struct ahci_port_priv *pp = ap->private_data;
1202 + struct ata_link *link;
1203 + struct ahci_em_priv *emp;
1204 + ssize_t rc;
1205 + int i;
1206 +
1207 + /* enable FIS reception */
1208 + ahci_start_fis_rx(ap);
1209 +
1210 + /* enable DMA */
1211 + ahci_start_engine(ap);
1212 +
1213 + /* turn on LEDs */
1214 + if (ap->flags & ATA_FLAG_EM) {
1215 + ata_for_each_link(link, ap, EDGE) {
1216 + emp = &pp->em_priv[link->pmp];
1217 +
1218 + /* EM Transmit bit maybe busy during init */
1219 + for (i = 0; i < EM_MAX_RETRY; i++) {
1220 + rc = ahci_transmit_led_message(ap,
1221 + emp->led_state,
1222 + 4);
1223 + if (rc == -EBUSY)
1224 + msleep(1);
1225 + else
1226 + break;
1227 + }
1228 + }
1229 + }
1230 +
1231 + if (ap->flags & ATA_FLAG_SW_ACTIVITY)
1232 + ata_for_each_link(link, ap, EDGE)
1233 + ahci_init_sw_activity(link);
1234 +
1235 +}
1236 +
1237 +static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
1238 +{
1239 + int rc;
1240 +
1241 + /* disable DMA */
1242 + rc = ahci_stop_engine(ap);
1243 + if (rc) {
1244 + *emsg = "failed to stop engine";
1245 + return rc;
1246 + }
1247 +
1248 + /* disable FIS reception */
1249 + rc = ahci_stop_fis_rx(ap);
1250 + if (rc) {
1251 + *emsg = "failed stop FIS RX";
1252 + return rc;
1253 + }
1254 +
1255 + return 0;
1256 +}
1257 +
1258 +static int ahci_reset_controller(struct ata_host *host)
1259 +{
1260 +#if 0
1261 + struct pci_dev *pdev = to_pci_dev(host->dev);
1262 + struct ahci_host_priv *hpriv = host->private_data;
1263 + void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1264 +#else
1265 + void __iomem *mmio = (void __iomem *)host->iomap;//[AHCI_BAR];
1266 +#endif
1267 + u32 tmp;
1268 +
1269 + /* we must be in AHCI mode, before using anything
1270 + * AHCI-specific, such as HOST_RESET.
1271 + */
1272 + ahci_enable_ahci(mmio);
1273 +
1274 + /* global controller reset */
1275 + if (!ahci_skip_host_reset) {
1276 + tmp = readl(mmio + HOST_CTL);
1277 + if ((tmp & HOST_RESET) == 0) {
1278 + writel(tmp | HOST_RESET, mmio + HOST_CTL);
1279 + readl(mmio + HOST_CTL); /* flush */
1280 + }
1281 +
1282 + /*
1283 + * to perform host reset, OS should set HOST_RESET
1284 + * and poll until this bit is read to be "0".
1285 + * reset must complete within 1 second, or
1286 + * the hardware should be considered fried.
1287 + */
1288 + tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET,
1289 + HOST_RESET, 10, 1000);
1290 +
1291 + if (tmp & HOST_RESET) {
1292 + dev_printk(KERN_ERR, host->dev,
1293 + "controller reset failed (0x%x)\n", tmp);
1294 + return -EIO;
1295 + }
1296 +
1297 + /* turn on AHCI mode */
1298 + ahci_enable_ahci(mmio);
1299 +
1300 + /* Some registers might be cleared on reset. Restore
1301 + * initial values.
1302 + */
1303 + ahci_restore_initial_config(host);
1304 + } else
1305 + dev_printk(KERN_INFO, host->dev,
1306 + "skipping global host reset\n");
1307 +
1308 +#if 0
1309 + if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
1310 + u16 tmp16;
1311 +
1312 + /* configure PCS */
1313 + pci_read_config_word(pdev, 0x92, &tmp16);
1314 + if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
1315 + tmp16 |= hpriv->port_map;
1316 + pci_write_config_word(pdev, 0x92, tmp16);
1317 + }
1318 + }
1319 +#endif
1320 +
1321 + return 0;
1322 +}
1323 +
1324 +static void ahci_sw_activity(struct ata_link *link)
1325 +{
1326 + struct ata_port *ap = link->ap;
1327 + struct ahci_port_priv *pp = ap->private_data;
1328 + struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1329 +
1330 + if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
1331 + return;
1332 +
1333 + emp->activity++;
1334 + if (!timer_pending(&emp->timer))
1335 + mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
1336 +}
1337 +
1338 +static void ahci_sw_activity_blink(unsigned long arg)
1339 +{
1340 + struct ata_link *link = (struct ata_link *)arg;
1341 + struct ata_port *ap = link->ap;
1342 + struct ahci_port_priv *pp = ap->private_data;
1343 + struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1344 + unsigned long led_message = emp->led_state;
1345 + u32 activity_led_state;
1346 + unsigned long flags;
1347 +
1348 + led_message &= EM_MSG_LED_VALUE;
1349 + led_message |= ap->port_no | (link->pmp << 8);
1350 +
1351 + /* check to see if we've had activity. If so,
1352 + * toggle state of LED and reset timer. If not,
1353 + * turn LED to desired idle state.
1354 + */
1355 + spin_lock_irqsave(ap->lock, flags);
1356 + if (emp->saved_activity != emp->activity) {
1357 + emp->saved_activity = emp->activity;
1358 + /* get the current LED state */
1359 + activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
1360 +
1361 + if (activity_led_state)
1362 + activity_led_state = 0;
1363 + else
1364 + activity_led_state = 1;
1365 +
1366 + /* clear old state */
1367 + led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1368 +
1369 + /* toggle state */
1370 + led_message |= (activity_led_state << 16);
1371 + mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
1372 + } else {
1373 + /* switch to idle */
1374 + led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1375 + if (emp->blink_policy == BLINK_OFF)
1376 + led_message |= (1 << 16);
1377 + }
1378 + spin_unlock_irqrestore(ap->lock, flags);
1379 + ahci_transmit_led_message(ap, led_message, 4);
1380 +}
1381 +
1382 +static void ahci_init_sw_activity(struct ata_link *link)
1383 +{
1384 + struct ata_port *ap = link->ap;
1385 + struct ahci_port_priv *pp = ap->private_data;
1386 + struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1387 +
1388 + /* init activity stats, setup timer */
1389 + emp->saved_activity = emp->activity = 0;
1390 + setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
1391 +
1392 + /* check our blink policy and set flag for link if it's enabled */
1393 + if (emp->blink_policy)
1394 + link->flags |= ATA_LFLAG_SW_ACTIVITY;
1395 +}
1396 +
1397 +static int ahci_reset_em(struct ata_host *host)
1398 +{
1399 +#if 0
1400 + void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1401 +#else
1402 + void __iomem *mmio = (void __iomem *)host->iomap;//[AHCI_BAR];
1403 +#endif
1404 + u32 em_ctl;
1405 +
1406 + em_ctl = readl(mmio + HOST_EM_CTL);
1407 + if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
1408 + return -EINVAL;
1409 +
1410 + writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
1411 + return 0;
1412 +}
1413 +
1414 +static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
1415 + ssize_t size)
1416 +{
1417 + struct ahci_host_priv *hpriv = ap->host->private_data;
1418 + struct ahci_port_priv *pp = ap->private_data;
1419 +#if 0
1420 + void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
1421 +#else
1422 + void __iomem *mmio = (void __iomem *)ap->host->iomap;//[AHCI_BAR];
1423 +#endif
1424 + u32 em_ctl;
1425 + u32 message[] = {0, 0};
1426 + unsigned long flags;
1427 + int pmp;
1428 + struct ahci_em_priv *emp;
1429 +
1430 + /* get the slot number from the message */
1431 + pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1432 + if (pmp < EM_MAX_SLOTS)
1433 + emp = &pp->em_priv[pmp];
1434 + else
1435 + return -EINVAL;
1436 +
1437 + spin_lock_irqsave(ap->lock, flags);
1438 +
1439 + /*
1440 + * if we are still busy transmitting a previous message,
1441 + * do not allow
1442 + */
1443 + em_ctl = readl(mmio + HOST_EM_CTL);
1444 + if (em_ctl & EM_CTL_TM) {
1445 + spin_unlock_irqrestore(ap->lock, flags);
1446 + return -EBUSY;
1447 + }
1448 +
1449 + /*
1450 + * create message header - this is all zero except for
1451 + * the message size, which is 4 bytes.
1452 + */
1453 + message[0] |= (4 << 8);
1454 +
1455 + /* ignore 0:4 of byte zero, fill in port info yourself */
1456 + message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
1457 +
1458 + /* write message to EM_LOC */
1459 + writel(message[0], mmio + hpriv->em_loc);
1460 + writel(message[1], mmio + hpriv->em_loc+4);
1461 +
1462 + /* save off new led state for port/slot */
1463 + emp->led_state = state;
1464 +
1465 + /*
1466 + * tell hardware to transmit the message
1467 + */
1468 + writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
1469 +
1470 + spin_unlock_irqrestore(ap->lock, flags);
1471 + return size;
1472 +}
1473 +
1474 +static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
1475 +{
1476 + struct ahci_port_priv *pp = ap->private_data;
1477 + struct ata_link *link;
1478 + struct ahci_em_priv *emp;
1479 + int rc = 0;
1480 +
1481 + ata_for_each_link(link, ap, EDGE) {
1482 + emp = &pp->em_priv[link->pmp];
1483 + rc += sprintf(buf, "%lx\n", emp->led_state);
1484 + }
1485 + return rc;
1486 +}
1487 +
1488 +static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
1489 + size_t size)
1490 +{
1491 + int state;
1492 + int pmp;
1493 + struct ahci_port_priv *pp = ap->private_data;
1494 + struct ahci_em_priv *emp;
1495 +
1496 + state = simple_strtoul(buf, NULL, 0);
1497 +
1498 + /* get the slot number from the message */
1499 + pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1500 + if (pmp < EM_MAX_SLOTS)
1501 + emp = &pp->em_priv[pmp];
1502 + else
1503 + return -EINVAL;
1504 +
1505 + /* mask off the activity bits if we are in sw_activity
1506 + * mode, user should turn off sw_activity before setting
1507 + * activity led through em_message
1508 + */
1509 + if (emp->blink_policy)
1510 + state &= ~EM_MSG_LED_VALUE_ACTIVITY;
1511 +
1512 + return ahci_transmit_led_message(ap, state, size);
1513 +}
1514 +
1515 +static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
1516 +{
1517 + struct ata_link *link = dev->link;
1518 + struct ata_port *ap = link->ap;
1519 + struct ahci_port_priv *pp = ap->private_data;
1520 + struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1521 + u32 port_led_state = emp->led_state;
1522 +
1523 + /* save the desired Activity LED behavior */
1524 + if (val == OFF) {
1525 + /* clear LFLAG */
1526 + link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
1527 +
1528 + /* set the LED to OFF */
1529 + port_led_state &= EM_MSG_LED_VALUE_OFF;
1530 + port_led_state |= (ap->port_no | (link->pmp << 8));
1531 + ahci_transmit_led_message(ap, port_led_state, 4);
1532 + } else {
1533 + link->flags |= ATA_LFLAG_SW_ACTIVITY;
1534 + if (val == BLINK_OFF) {
1535 + /* set LED to ON for idle */
1536 + port_led_state &= EM_MSG_LED_VALUE_OFF;
1537 + port_led_state |= (ap->port_no | (link->pmp << 8));
1538 + port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
1539 + ahci_transmit_led_message(ap, port_led_state, 4);
1540 + }
1541 + }
1542 + emp->blink_policy = val;
1543 + return 0;
1544 +}
1545 +
1546 +static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
1547 +{
1548 + struct ata_link *link = dev->link;
1549 + struct ata_port *ap = link->ap;
1550 + struct ahci_port_priv *pp = ap->private_data;
1551 + struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1552 +
1553 + /* display the saved value of activity behavior for this
1554 + * disk.
1555 + */
1556 + return sprintf(buf, "%d\n", emp->blink_policy);
1557 +}
1558 +
1559 +#if 0
1560 +static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap,
1561 + int port_no, void __iomem *mmio,
1562 + void __iomem *port_mmio)
1563 +#else
1564 +static void ahci_port_init(struct platform_device *pdev, struct ata_port *ap,
1565 + int port_no, void __iomem *mmio,
1566 + void __iomem *port_mmio)
1567 +#endif
1568 +{
1569 + const char *emsg = NULL;
1570 + int rc;
1571 + u32 tmp;
1572 +
1573 + /* make sure port is not active */
1574 + rc = ahci_deinit_port(ap, &emsg);
1575 + if (rc)
1576 + dev_printk(KERN_WARNING, &pdev->dev,
1577 + "%s (%d)\n", emsg, rc);
1578 +
1579 + /* clear SError */
1580 + tmp = readl(port_mmio + PORT_SCR_ERR);
1581 + VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
1582 + writel(tmp, port_mmio + PORT_SCR_ERR);
1583 +
1584 + /* clear port IRQ */
1585 + tmp = readl(port_mmio + PORT_IRQ_STAT);
1586 + VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1587 + if (tmp)
1588 + writel(tmp, port_mmio + PORT_IRQ_STAT);
1589 +
1590 + writel(1 << port_no, mmio + HOST_IRQ_STAT);
1591 +}
1592 +
1593 +static void ahci_init_controller(struct ata_host *host)
1594 +{
1595 + struct ahci_host_priv *hpriv = host->private_data;
1596 +#if 0
1597 + struct pci_dev *pdev = to_pci_dev(host->dev);
1598 + void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1599 +#else
1600 + struct platform_device *pdev = to_platform_device(host->dev);
1601 + void __iomem *mmio = (void __iomem *)host->iomap;//[AHCI_BAR];
1602 +#endif
1603 + int i;
1604 + void __iomem *port_mmio;
1605 + u32 tmp;
1606 + int mv;
1607 +
1608 + if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
1609 +#if 0
1610 + if (pdev->device == 0x6121)
1611 + mv = 2;
1612 + else
1613 + mv = 4;
1614 +#else
1615 + mv = 0;
1616 +#endif
1617 + port_mmio = __ahci_port_base(host, mv);
1618 +
1619 + writel(0, port_mmio + PORT_IRQ_MASK);
1620 +
1621 + /* clear port IRQ */
1622 + tmp = readl(port_mmio + PORT_IRQ_STAT);
1623 + VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1624 + if (tmp)
1625 + writel(tmp, port_mmio + PORT_IRQ_STAT);
1626 + }
1627 +
1628 + /* set Timer 1ms, hclk = 200Mhz */
1629 + /* FIXME: Add auto detect function */
1630 + printk("CPU clock : %d \n", cns3xxx_cpu_clock());
1631 + tmp = readl(mmio + HOST_TIMER1MS);
1632 + printk("*** Timer 1ms: %d(0x%x) ***\n",tmp,tmp);
1633 + writel(cns3xxx_cpu_clock()*500, mmio + HOST_TIMER1MS);
1634 + tmp = readl(mmio + HOST_TIMER1MS);
1635 + printk("*** Set to: %d(0x%x) ***\n",tmp, tmp);
1636 +
1637 +
1638 +
1639 + for (i = 0; i < host->n_ports; i++) {
1640 + struct ata_port *ap = host->ports[i];
1641 +
1642 + port_mmio = ahci_port_base(ap);
1643 + if (ata_port_is_dummy(ap))
1644 + continue;
1645 +
1646 + ahci_port_init(pdev, ap, i, mmio, port_mmio);
1647 + }
1648 +
1649 + tmp = readl(mmio + HOST_CTL);
1650 + VPRINTK("HOST_CTL 0x%x\n", tmp);
1651 + writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
1652 + tmp = readl(mmio + HOST_CTL);
1653 + VPRINTK("HOST_CTL 0x%x\n", tmp);
1654 +}
1655 +
1656 +static void ahci_dev_config(struct ata_device *dev)
1657 +{
1658 + struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
1659 +
1660 + if (hpriv->flags & AHCI_HFLAG_SECT255) {
1661 + dev->max_sectors = 255;
1662 + ata_dev_printk(dev, KERN_INFO,
1663 + "SB600 AHCI: limiting to 255 sectors per cmd\n");
1664 + }
1665 +}
1666 +
1667 +static unsigned int ahci_dev_classify(struct ata_port *ap)
1668 +{
1669 + void __iomem *port_mmio = ahci_port_base(ap);
1670 + struct ata_taskfile tf;
1671 + u32 tmp;
1672 +
1673 + tmp = readl(port_mmio + PORT_SIG);
1674 + tf.lbah = (tmp >> 24) & 0xff;
1675 + tf.lbam = (tmp >> 16) & 0xff;
1676 + tf.lbal = (tmp >> 8) & 0xff;
1677 + tf.nsect = (tmp) & 0xff;
1678 +
1679 + return ata_dev_classify(&tf);
1680 +}
1681 +
1682 +static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
1683 + u32 opts)
1684 +{
1685 + dma_addr_t cmd_tbl_dma;
1686 +
1687 + cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
1688 +
1689 +#if 0
1690 + pp->cmd_slot[tag].opts = cpu_to_le32(opts);
1691 +#else
1692 + pp->cmd_slot[tag].opts = opts;
1693 +#endif
1694 + pp->cmd_slot[tag].status = 0;
1695 +#if 0
1696 + pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
1697 + pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
1698 +#else
1699 + pp->cmd_slot[tag].tbl_addr = cmd_tbl_dma & 0xffffffff;
1700 + pp->cmd_slot[tag].tbl_addr_hi = (cmd_tbl_dma >> 16) >> 16;
1701 +#endif
1702 +}
1703 +
1704 +static int ahci_kick_engine(struct ata_port *ap, int force_restart)
1705 +{
1706 + void __iomem *port_mmio = ahci_port_base(ap);
1707 + struct ahci_host_priv *hpriv = ap->host->private_data;
1708 + u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1709 + u32 tmp;
1710 + int busy, rc;
1711 +
1712 + /* do we need to kick the port? */
1713 + busy = status & (ATA_BUSY | ATA_DRQ);
1714 + if (!busy && !force_restart)
1715 + return 0;
1716 +
1717 + /* stop engine */
1718 + rc = ahci_stop_engine(ap);
1719 + if (rc)
1720 + goto out_restart;
1721 +
1722 + /* need to do CLO? */
1723 + if (!busy) {
1724 + rc = 0;
1725 + goto out_restart;
1726 + }
1727 +
1728 + if (!(hpriv->cap & HOST_CAP_CLO)) {
1729 + rc = -EOPNOTSUPP;
1730 + goto out_restart;
1731 + }
1732 +
1733 + /* perform CLO */
1734 + tmp = readl(port_mmio + PORT_CMD);
1735 + tmp |= PORT_CMD_CLO;
1736 + writel(tmp, port_mmio + PORT_CMD);
1737 +
1738 + rc = 0;
1739 + tmp = ata_wait_register(port_mmio + PORT_CMD,
1740 + PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
1741 + if (tmp & PORT_CMD_CLO)
1742 + rc = -EIO;
1743 +
1744 + /* restart engine */
1745 + out_restart:
1746 + ahci_start_engine(ap);
1747 + return rc;
1748 +}
1749 +
1750 +static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
1751 + struct ata_taskfile *tf, int is_cmd, u16 flags,
1752 + unsigned long timeout_msec)
1753 +{
1754 + const u32 cmd_fis_len = 5; /* five dwords */
1755 + struct ahci_port_priv *pp = ap->private_data;
1756 + void __iomem *port_mmio = ahci_port_base(ap);
1757 + u8 *fis = pp->cmd_tbl;
1758 + u32 tmp;
1759 +
1760 + /* prep the command */
1761 + ata_tf_to_fis(tf, pmp, is_cmd, fis);
1762 + ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
1763 +
1764 + /* issue & wait */
1765 + writel(1, port_mmio + PORT_CMD_ISSUE);
1766 +
1767 + if (timeout_msec) {
1768 + tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
1769 + 1, timeout_msec);
1770 + if (tmp & 0x1) {
1771 + ahci_kick_engine(ap, 1);
1772 + return -EBUSY;
1773 + }
1774 + } else
1775 + readl(port_mmio + PORT_CMD_ISSUE); /* flush */
1776 +
1777 + return 0;
1778 +}
1779 +
1780 +static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
1781 + int pmp, unsigned long deadline,
1782 + int (*check_ready)(struct ata_link *link))
1783 +{
1784 + struct ata_port *ap = link->ap;
1785 + struct ahci_host_priv *hpriv = ap->host->private_data;
1786 + const char *reason = NULL;
1787 + unsigned long now, msecs;
1788 + struct ata_taskfile tf;
1789 + int rc;
1790 +
1791 + DPRINTK("ENTER\n");
1792 +
1793 + /* prepare for SRST (AHCI-1.1 10.4.1) */
1794 + rc = ahci_kick_engine(ap, 1);
1795 + if (rc && rc != -EOPNOTSUPP)
1796 + ata_link_printk(link, KERN_WARNING,
1797 + "failed to reset engine (errno=%d)\n", rc);
1798 +
1799 + ata_tf_init(link->device, &tf);
1800 +
1801 + /* issue the first D2H Register FIS */
1802 + msecs = 0;
1803 + now = jiffies;
1804 + if (time_after(now, deadline))
1805 + msecs = jiffies_to_msecs(deadline - now);
1806 +
1807 + tf.ctl |= ATA_SRST;
1808 + if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
1809 + AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
1810 + rc = -EIO;
1811 + reason = "1st FIS failed";
1812 + goto fail;
1813 + }
1814 +
1815 + /* spec says at least 5us, but be generous and sleep for 1ms */
1816 + msleep(1);
1817 +
1818 + /* issue the second D2H Register FIS */
1819 + tf.ctl &= ~ATA_SRST;
1820 + ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
1821 +
1822 + /* wait for link to become ready */
1823 + rc = ata_wait_after_reset(link, deadline, check_ready);
1824 + if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) {
1825 + /*
1826 + * Workaround for cases where link online status can't
1827 + * be trusted. Treat device readiness timeout as link
1828 + * offline.
1829 + */
1830 + ata_link_printk(link, KERN_INFO,
1831 + "device not ready, treating as offline\n");
1832 + *class = ATA_DEV_NONE;
1833 + } else if (rc) {
1834 + /* link occupied, -ENODEV too is an error */
1835 + reason = "device not ready";
1836 + goto fail;
1837 + } else
1838 + *class = ahci_dev_classify(ap);
1839 +
1840 + DPRINTK("EXIT, class=%u\n", *class);
1841 + return 0;
1842 +
1843 + fail:
1844 + ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
1845 + return rc;
1846 +}
1847 +
1848 +static int ahci_check_ready(struct ata_link *link)
1849 +{
1850 + void __iomem *port_mmio = ahci_port_base(link->ap);
1851 + u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1852 +
1853 + return ata_check_ready(status);
1854 +}
1855 +
1856 +static int ahci_softreset(struct ata_link *link, unsigned int *class,
1857 + unsigned long deadline)
1858 +{
1859 + int pmp = sata_srst_pmp(link);
1860 +
1861 + DPRINTK("ENTER\n");
1862 +
1863 + return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
1864 +}
1865 +
1866 +static int ahci_sb600_check_ready(struct ata_link *link)
1867 +{
1868 + void __iomem *port_mmio = ahci_port_base(link->ap);
1869 + u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1870 + u32 irq_status = readl(port_mmio + PORT_IRQ_STAT);
1871 +
1872 + /*
1873 + * There is no need to check TFDATA if BAD PMP is found due to HW bug,
1874 + * which can save timeout delay.
1875 + */
1876 + if (irq_status & PORT_IRQ_BAD_PMP)
1877 + return -EIO;
1878 +
1879 + return ata_check_ready(status);
1880 +}
1881 +
1882 +static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
1883 + unsigned long deadline)
1884 +{
1885 + struct ata_port *ap = link->ap;
1886 + void __iomem *port_mmio = ahci_port_base(ap);
1887 + int pmp = sata_srst_pmp(link);
1888 + int rc;
1889 + u32 irq_sts;
1890 +
1891 + DPRINTK("ENTER\n");
1892 +
1893 + rc = ahci_do_softreset(link, class, pmp, deadline,
1894 + ahci_sb600_check_ready);
1895 +
1896 + /*
1897 + * Soft reset fails on some ATI chips with IPMS set when PMP
1898 + * is enabled but SATA HDD/ODD is connected to SATA port,
1899 + * do soft reset again to port 0.
1900 + */
1901 + if (rc == -EIO) {
1902 + irq_sts = readl(port_mmio + PORT_IRQ_STAT);
1903 + if (irq_sts & PORT_IRQ_BAD_PMP) {
1904 + ata_link_printk(link, KERN_WARNING,
1905 + "applying SB600 PMP SRST workaround "
1906 + "and retrying\n");
1907 + rc = ahci_do_softreset(link, class, 0, deadline,
1908 + ahci_check_ready);
1909 + }
1910 + }
1911 +
1912 + return rc;
1913 +}
1914 +
1915 +static int ahci_hardreset(struct ata_link *link, unsigned int *class,
1916 + unsigned long deadline)
1917 +{
1918 + const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
1919 + struct ata_port *ap = link->ap;
1920 + struct ahci_port_priv *pp = ap->private_data;
1921 + u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1922 + struct ata_taskfile tf;
1923 + bool online;
1924 + int rc;
1925 +
1926 + DPRINTK("ENTER\n");
1927 +
1928 + ahci_stop_engine(ap);
1929 +
1930 + /* clear D2H reception area to properly wait for D2H FIS */
1931 + ata_tf_init(link->device, &tf);
1932 + tf.command = 0x80;
1933 + ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1934 +
1935 + rc = sata_link_hardreset(link, timing, deadline, &online,
1936 + ahci_check_ready);
1937 +
1938 + ahci_start_engine(ap);
1939 +
1940 + if (online)
1941 + *class = ahci_dev_classify(ap);
1942 +
1943 + DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1944 + return rc;
1945 +}
1946 +
1947 +static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
1948 + unsigned long deadline)
1949 +{
1950 + struct ata_port *ap = link->ap;
1951 + bool online;
1952 + int rc;
1953 +
1954 + DPRINTK("ENTER\n");
1955 +
1956 + ahci_stop_engine(ap);
1957 +
1958 + rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
1959 + deadline, &online, NULL);
1960 +
1961 + ahci_start_engine(ap);
1962 +
1963 + DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1964 +
1965 + /* vt8251 doesn't clear BSY on signature FIS reception,
1966 + * request follow-up softreset.
1967 + */
1968 + return online ? -EAGAIN : rc;
1969 +}
1970 +
1971 +#if 0
1972 +static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
1973 + unsigned long deadline)
1974 +{
1975 + struct ata_port *ap = link->ap;
1976 + struct ahci_port_priv *pp = ap->private_data;
1977 + u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1978 + struct ata_taskfile tf;
1979 + bool online;
1980 + int rc;
1981 +
1982 + ahci_stop_engine(ap);
1983 +
1984 + /* clear D2H reception area to properly wait for D2H FIS */
1985 + ata_tf_init(link->device, &tf);
1986 + tf.command = 0x80;
1987 + ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1988 +
1989 + rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
1990 + deadline, &online, NULL);
1991 +
1992 + ahci_start_engine(ap);
1993 +
1994 + /* The pseudo configuration device on SIMG4726 attached to
1995 + * ASUS P5W-DH Deluxe doesn't send signature FIS after
1996 + * hardreset if no device is attached to the first downstream
1997 + * port && the pseudo device locks up on SRST w/ PMP==0. To
1998 + * work around this, wait for !BSY only briefly. If BSY isn't
1999 + * cleared, perform CLO and proceed to IDENTIFY (achieved by
2000 + * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA).
2001 + *
2002 + * Wait for two seconds. Devices attached to downstream port
2003 + * which can't process the following IDENTIFY after this will
2004 + * have to be reset again. For most cases, this should
2005 + * suffice while making probing snappish enough.
2006 + */
2007 + if (online) {
2008 + rc = ata_wait_after_reset(link, jiffies + 2 * HZ,
2009 + ahci_check_ready);
2010 + if (rc)
2011 + ahci_kick_engine(ap, 0);
2012 + }
2013 + return rc;
2014 +}
2015 +#endif
2016 +
2017 +static void ahci_postreset(struct ata_link *link, unsigned int *class)
2018 +{
2019 + struct ata_port *ap = link->ap;
2020 + void __iomem *port_mmio = ahci_port_base(ap);
2021 + u32 new_tmp, tmp;
2022 +
2023 + ata_std_postreset(link, class);
2024 +
2025 + /* Make sure port's ATAPI bit is set appropriately */
2026 + new_tmp = tmp = readl(port_mmio + PORT_CMD);
2027 + if (*class == ATA_DEV_ATAPI)
2028 + new_tmp |= PORT_CMD_ATAPI;
2029 + else
2030 + new_tmp &= ~PORT_CMD_ATAPI;
2031 + if (new_tmp != tmp) {
2032 + writel(new_tmp, port_mmio + PORT_CMD);
2033 + readl(port_mmio + PORT_CMD); /* flush */
2034 + }
2035 +}
2036 +
2037 +static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
2038 +{
2039 + struct scatterlist *sg;
2040 + struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
2041 + unsigned int si;
2042 +
2043 + VPRINTK("ENTER\n");
2044 +
2045 + /*
2046 + * Next, the S/G list.
2047 + */
2048 + for_each_sg(qc->sg, sg, qc->n_elem, si) {
2049 + dma_addr_t addr = sg_dma_address(sg);
2050 + u32 sg_len = sg_dma_len(sg);
2051 +
2052 +#if 0
2053 + ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
2054 + ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
2055 + ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
2056 +#else
2057 + ahci_sg[si].addr = addr & 0xffffffff;
2058 + ahci_sg[si].addr_hi = (addr >> 16) >> 16;
2059 + ahci_sg[si].flags_size = sg_len - 1;
2060 +#endif
2061 + }
2062 +
2063 + return si;
2064 +}
2065 +
2066 +static void ahci_qc_prep(struct ata_queued_cmd *qc)
2067 +{
2068 + struct ata_port *ap = qc->ap;
2069 + struct ahci_port_priv *pp = ap->private_data;
2070 + int is_atapi = ata_is_atapi(qc->tf.protocol);
2071 + void *cmd_tbl;
2072 + u32 opts;
2073 + const u32 cmd_fis_len = 5; /* five dwords */
2074 + unsigned int n_elem;
2075 +
2076 + /*
2077 + * Fill in command table information. First, the header,
2078 + * a SATA Register - Host to Device command FIS.
2079 + */
2080 + cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
2081 +
2082 + ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
2083 + if (is_atapi) {
2084 + memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
2085 + memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
2086 + }
2087 +
2088 + n_elem = 0;
2089 + if (qc->flags & ATA_QCFLAG_DMAMAP)
2090 + n_elem = ahci_fill_sg(qc, cmd_tbl);
2091 +
2092 + /*
2093 + * Fill in command slot information.
2094 + */
2095 + opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
2096 + if (qc->tf.flags & ATA_TFLAG_WRITE)
2097 + opts |= AHCI_CMD_WRITE;
2098 + if (is_atapi)
2099 + opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
2100 +
2101 + ahci_fill_cmd_slot(pp, qc->tag, opts);
2102 +}
2103 +
2104 +static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
2105 +{
2106 + struct ahci_host_priv *hpriv = ap->host->private_data;
2107 + struct ahci_port_priv *pp = ap->private_data;
2108 + struct ata_eh_info *host_ehi = &ap->link.eh_info;
2109 + struct ata_link *link = NULL;
2110 + struct ata_queued_cmd *active_qc;
2111 + struct ata_eh_info *active_ehi;
2112 + u32 serror;
2113 +
2114 + /* determine active link */
2115 + ata_for_each_link(link, ap, EDGE)
2116 + if (ata_link_active(link))
2117 + break;
2118 + if (!link)
2119 + link = &ap->link;
2120 +
2121 + active_qc = ata_qc_from_tag(ap, link->active_tag);
2122 + active_ehi = &link->eh_info;
2123 +
2124 + /* record irq stat */
2125 + ata_ehi_clear_desc(host_ehi);
2126 + ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
2127 +
2128 + /* AHCI needs SError cleared; otherwise, it might lock up */
2129 + ahci_scr_read(&ap->link, SCR_ERROR, &serror);
2130 + ahci_scr_write(&ap->link, SCR_ERROR, serror);
2131 + host_ehi->serror |= serror;
2132 +
2133 + /* some controllers set IRQ_IF_ERR on device errors, ignore it */
2134 + if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
2135 + irq_stat &= ~PORT_IRQ_IF_ERR;
2136 +
2137 + if (irq_stat & PORT_IRQ_TF_ERR) {
2138 + /* If qc is active, charge it; otherwise, the active
2139 + * link. There's no active qc on NCQ errors. It will
2140 + * be determined by EH by reading log page 10h.
2141 + */
2142 + if (active_qc)
2143 + active_qc->err_mask |= AC_ERR_DEV;
2144 + else
2145 + active_ehi->err_mask |= AC_ERR_DEV;
2146 +
2147 + if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
2148 + host_ehi->serror &= ~SERR_INTERNAL;
2149 + }
2150 +
2151 + if (irq_stat & PORT_IRQ_UNK_FIS) {
2152 + u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
2153 +
2154 + active_ehi->err_mask |= AC_ERR_HSM;
2155 + active_ehi->action |= ATA_EH_RESET;
2156 + ata_ehi_push_desc(active_ehi,
2157 + "unknown FIS %08x %08x %08x %08x" ,
2158 + unk[0], unk[1], unk[2], unk[3]);
2159 + }
2160 +
2161 + if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
2162 + active_ehi->err_mask |= AC_ERR_HSM;
2163 + active_ehi->action |= ATA_EH_RESET;
2164 + ata_ehi_push_desc(active_ehi, "incorrect PMP");
2165 + }
2166 +
2167 + if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
2168 + host_ehi->err_mask |= AC_ERR_HOST_BUS;
2169 + host_ehi->action |= ATA_EH_RESET;
2170 + ata_ehi_push_desc(host_ehi, "host bus error");
2171 + }
2172 +
2173 + if (irq_stat & PORT_IRQ_IF_ERR) {
2174 + host_ehi->err_mask |= AC_ERR_ATA_BUS;
2175 + host_ehi->action |= ATA_EH_RESET;
2176 + ata_ehi_push_desc(host_ehi, "interface fatal error");
2177 + }
2178 +
2179 + if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
2180 + ata_ehi_hotplugged(host_ehi);
2181 + ata_ehi_push_desc(host_ehi, "%s",
2182 + irq_stat & PORT_IRQ_CONNECT ?
2183 + "connection status changed" : "PHY RDY changed");
2184 + }
2185 +
2186 + /* okay, let's hand over to EH */
2187 +
2188 + if (irq_stat & PORT_IRQ_FREEZE)
2189 + ata_port_freeze(ap);
2190 + else
2191 + ata_port_abort(ap);
2192 +}
2193 +
2194 +static void ahci_port_intr(struct ata_port *ap)
2195 +{
2196 + void __iomem *port_mmio = ahci_port_base(ap);
2197 + struct ata_eh_info *ehi = &ap->link.eh_info;
2198 + struct ahci_port_priv *pp = ap->private_data;
2199 + struct ahci_host_priv *hpriv = ap->host->private_data;
2200 + int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
2201 + u32 status, qc_active;
2202 + int rc;
2203 +
2204 + status = readl(port_mmio + PORT_IRQ_STAT);
2205 + writel(status, port_mmio + PORT_IRQ_STAT);
2206 +
2207 + /* ignore BAD_PMP while resetting */
2208 + if (unlikely(resetting))
2209 + status &= ~PORT_IRQ_BAD_PMP;
2210 +
2211 + /* If we are getting PhyRdy, this is
2212 + * just a power state change, we should
2213 + * clear out this, plus the PhyRdy/Comm
2214 + * Wake bits from Serror
2215 + */
2216 + if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
2217 + (status & PORT_IRQ_PHYRDY)) {
2218 + status &= ~PORT_IRQ_PHYRDY;
2219 + ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
2220 + }
2221 +
2222 + if (unlikely(status & PORT_IRQ_ERROR)) {
2223 + ahci_error_intr(ap, status);
2224 + return;
2225 + }
2226 +
2227 + if (status & PORT_IRQ_SDB_FIS) {
2228 + /* If SNotification is available, leave notification
2229 + * handling to sata_async_notification(). If not,
2230 + * emulate it by snooping SDB FIS RX area.
2231 + *
2232 + * Snooping FIS RX area is probably cheaper than
2233 + * poking SNotification but some constrollers which
2234 + * implement SNotification, ICH9 for example, don't
2235 + * store AN SDB FIS into receive area.
2236 + */
2237 + if (hpriv->cap & HOST_CAP_SNTF)
2238 + sata_async_notification(ap);
2239 + else {
2240 + /* If the 'N' bit in word 0 of the FIS is set,
2241 + * we just received asynchronous notification.
2242 + * Tell libata about it.
2243 + */
2244 + const __le32 *f = pp->rx_fis + RX_FIS_SDB;
2245 +#if 0
2246 + u32 f0 = le32_to_cpu(f[0]);
2247 +#else
2248 + u32 f0 = f[0];
2249 +#endif
2250 +
2251 + if (f0 & (1 << 15))
2252 + sata_async_notification(ap);
2253 + }
2254 + }
2255 +
2256 + /* pp->active_link is valid iff any command is in flight */
2257 + if (ap->qc_active && pp->active_link->sactive)
2258 + qc_active = readl(port_mmio + PORT_SCR_ACT);
2259 + else
2260 + qc_active = readl(port_mmio + PORT_CMD_ISSUE);
2261 +
2262 + rc = ata_qc_complete_multiple(ap, qc_active);
2263 +
2264 + /* while resetting, invalid completions are expected */
2265 + if (unlikely(rc < 0 && !resetting)) {
2266 + ehi->err_mask |= AC_ERR_HSM;
2267 + ehi->action |= ATA_EH_RESET;
2268 + ata_port_freeze(ap);
2269 + }
2270 +}
2271 +
2272 +static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
2273 +{
2274 + struct ata_host *host = dev_instance;
2275 + struct ahci_host_priv *hpriv;
2276 + unsigned int i, handled = 0;
2277 + void __iomem *mmio;
2278 + u32 irq_stat, irq_masked;
2279 +
2280 + VPRINTK("ENTER\n");
2281 +
2282 + hpriv = host->private_data;
2283 +#if 0
2284 + mmio = host->iomap[AHCI_PCI_BAR];
2285 +#else
2286 + mmio = (void __iomem *)host->iomap;//[AHCI_BAR];
2287 +#endif
2288 +
2289 + /* sigh. 0xffffffff is a valid return from h/w */
2290 + irq_stat = readl(mmio + HOST_IRQ_STAT);
2291 + if (!irq_stat)
2292 + return IRQ_NONE;
2293 +
2294 + irq_masked = irq_stat & hpriv->port_map;
2295 +
2296 + spin_lock(&host->lock);
2297 +
2298 + for (i = 0; i < host->n_ports; i++) {
2299 + struct ata_port *ap;
2300 +
2301 + if (!(irq_masked & (1 << i)))
2302 + continue;
2303 +
2304 + ap = host->ports[i];
2305 + if (ap) {
2306 + ahci_port_intr(ap);
2307 + VPRINTK("port %u\n", i);
2308 + } else {
2309 + VPRINTK("port %u (no irq)\n", i);
2310 + if (ata_ratelimit())
2311 + dev_printk(KERN_WARNING, host->dev,
2312 + "interrupt on disabled port %u\n", i);
2313 + }
2314 +
2315 + handled = 1;
2316 + }
2317 +
2318 + /* HOST_IRQ_STAT behaves as level triggered latch meaning that
2319 + * it should be cleared after all the port events are cleared;
2320 + * otherwise, it will raise a spurious interrupt after each
2321 + * valid one. Please read section 10.6.2 of ahci 1.1 for more
2322 + * information.
2323 + *
2324 + * Also, use the unmasked value to clear interrupt as spurious
2325 + * pending event on a dummy port might cause screaming IRQ.
2326 + */
2327 + writel(irq_stat, mmio + HOST_IRQ_STAT);
2328 +
2329 + spin_unlock(&host->lock);
2330 +
2331 + VPRINTK("EXIT\n");
2332 +
2333 + return IRQ_RETVAL(handled);
2334 +}
2335 +
2336 +static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
2337 +{
2338 + struct ata_port *ap = qc->ap;
2339 + void __iomem *port_mmio = ahci_port_base(ap);
2340 + struct ahci_port_priv *pp = ap->private_data;
2341 +
2342 + /* Keep track of the currently active link. It will be used
2343 + * in completion path to determine whether NCQ phase is in
2344 + * progress.
2345 + */
2346 + pp->active_link = qc->dev->link;
2347 +
2348 + if (qc->tf.protocol == ATA_PROT_NCQ)
2349 + writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
2350 + writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
2351 +
2352 + ahci_sw_activity(qc->dev->link);
2353 +
2354 + return 0;
2355 +}
2356 +
2357 +static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
2358 +{
2359 + struct ahci_port_priv *pp = qc->ap->private_data;
2360 + u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
2361 +
2362 + ata_tf_from_fis(d2h_fis, &qc->result_tf);
2363 + return true;
2364 +}
2365 +
2366 +static void ahci_freeze(struct ata_port *ap)
2367 +{
2368 + void __iomem *port_mmio = ahci_port_base(ap);
2369 +
2370 + /* turn IRQ off */
2371 + writel(0, port_mmio + PORT_IRQ_MASK);
2372 +}
2373 +
2374 +static void ahci_thaw(struct ata_port *ap)
2375 +{
2376 +#if 0
2377 + void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
2378 +#else
2379 + void __iomem *mmio = (void __iomem *)ap->host->iomap;//[AHCI_BAR];
2380 +#endif
2381 + void __iomem *port_mmio = ahci_port_base(ap);
2382 + u32 tmp;
2383 + struct ahci_port_priv *pp = ap->private_data;
2384 +
2385 + /* clear IRQ */
2386 + tmp = readl(port_mmio + PORT_IRQ_STAT);
2387 + writel(tmp, port_mmio + PORT_IRQ_STAT);
2388 + writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
2389 +
2390 + /* turn IRQ back on */
2391 + writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2392 +}
2393 +
2394 +static void ahci_error_handler(struct ata_port *ap)
2395 +{
2396 + if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
2397 + /* restart engine */
2398 + ahci_stop_engine(ap);
2399 + ahci_start_engine(ap);
2400 + }
2401 +
2402 + sata_pmp_error_handler(ap);
2403 +}
2404 +
2405 +static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
2406 +{
2407 + struct ata_port *ap = qc->ap;
2408 +
2409 + /* make DMA engine forget about the failed command */
2410 + if (qc->flags & ATA_QCFLAG_FAILED)
2411 + ahci_kick_engine(ap, 1);
2412 +}
2413 +
2414 +static void ahci_pmp_attach(struct ata_port *ap)
2415 +{
2416 + void __iomem *port_mmio = ahci_port_base(ap);
2417 + struct ahci_port_priv *pp = ap->private_data;
2418 + u32 cmd;
2419 +
2420 + cmd = readl(port_mmio + PORT_CMD);
2421 + cmd |= PORT_CMD_PMP;
2422 + writel(cmd, port_mmio + PORT_CMD);
2423 +
2424 + pp->intr_mask |= PORT_IRQ_BAD_PMP;
2425 + writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2426 +}
2427 +
2428 +static void ahci_pmp_detach(struct ata_port *ap)
2429 +{
2430 + void __iomem *port_mmio = ahci_port_base(ap);
2431 + struct ahci_port_priv *pp = ap->private_data;
2432 + u32 cmd;
2433 +
2434 + cmd = readl(port_mmio + PORT_CMD);
2435 + cmd &= ~PORT_CMD_PMP;
2436 + writel(cmd, port_mmio + PORT_CMD);
2437 +
2438 + pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
2439 + writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2440 +}
2441 +
2442 +static int ahci_port_resume(struct ata_port *ap)
2443 +{
2444 + ahci_power_up(ap);
2445 + ahci_start_port(ap);
2446 +
2447 + if (sata_pmp_attached(ap))
2448 + ahci_pmp_attach(ap);
2449 + else
2450 + ahci_pmp_detach(ap);
2451 +
2452 + return 0;
2453 +}
2454 +
2455 +#ifdef CONFIG_PM
2456 +static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
2457 +{
2458 + const char *emsg = NULL;
2459 + int rc;
2460 +
2461 + rc = ahci_deinit_port(ap, &emsg);
2462 + if (rc == 0)
2463 + ahci_power_down(ap);
2464 + else {
2465 + ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
2466 + ahci_start_port(ap);
2467 + }
2468 +
2469 + return rc;
2470 +}
2471 +
2472 +static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
2473 +{
2474 + struct ata_host *host = dev_get_drvdata(&pdev->dev);
2475 + struct ahci_host_priv *hpriv = host->private_data;
2476 + void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2477 + u32 ctl;
2478 +
2479 + if (mesg.event & PM_EVENT_SUSPEND &&
2480 + hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
2481 + dev_printk(KERN_ERR, &pdev->dev,
2482 + "BIOS update required for suspend/resume\n");
2483 + return -EIO;
2484 + }
2485 +
2486 + if (mesg.event & PM_EVENT_SLEEP) {
2487 + /* AHCI spec rev1.1 section 8.3.3:
2488 + * Software must disable interrupts prior to requesting a
2489 + * transition of the HBA to D3 state.
2490 + */
2491 + ctl = readl(mmio + HOST_CTL);
2492 + ctl &= ~HOST_IRQ_EN;
2493 + writel(ctl, mmio + HOST_CTL);
2494 + readl(mmio + HOST_CTL); /* flush */
2495 + }
2496 +
2497 + return ata_pci_device_suspend(pdev, mesg);
2498 +}
2499 +
2500 +static int ahci_pci_device_resume(struct pci_dev *pdev)
2501 +{
2502 + struct ata_host *host = dev_get_drvdata(&pdev->dev);
2503 + int rc;
2504 +
2505 + rc = ata_pci_device_do_resume(pdev);
2506 + if (rc)
2507 + return rc;
2508 +
2509 + if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2510 + rc = ahci_reset_controller(host);
2511 + if (rc)
2512 + return rc;
2513 +
2514 + ahci_init_controller(host);
2515 + }
2516 +
2517 + ata_host_resume(host);
2518 +
2519 + return 0;
2520 +}
2521 +#endif
2522 +
2523 +static int ahci_port_start(struct ata_port *ap)
2524 +{
2525 + struct device *dev = ap->host->dev;
2526 + struct ahci_port_priv *pp;
2527 + void *mem;
2528 + dma_addr_t mem_dma;
2529 +
2530 + pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
2531 + if (!pp)
2532 + return -ENOMEM;
2533 +
2534 + mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma,
2535 + GFP_KERNEL);
2536 + if (!mem)
2537 + return -ENOMEM;
2538 + memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
2539 +
2540 + /*
2541 + * First item in chunk of DMA memory: 32-slot command table,
2542 + * 32 bytes each in size
2543 + */
2544 + pp->cmd_slot = mem;
2545 + pp->cmd_slot_dma = mem_dma;
2546 +
2547 + mem += AHCI_CMD_SLOT_SZ;
2548 + mem_dma += AHCI_CMD_SLOT_SZ;
2549 +
2550 + /*
2551 + * Second item: Received-FIS area
2552 + */
2553 + pp->rx_fis = mem;
2554 + pp->rx_fis_dma = mem_dma;
2555 +
2556 + mem += AHCI_RX_FIS_SZ;
2557 + mem_dma += AHCI_RX_FIS_SZ;
2558 +
2559 + /*
2560 + * Third item: data area for storing a single command
2561 + * and its scatter-gather table
2562 + */
2563 + pp->cmd_tbl = mem;
2564 + pp->cmd_tbl_dma = mem_dma;
2565 +
2566 + /*
2567 + * Save off initial list of interrupts to be enabled.
2568 + * This could be changed later
2569 + */
2570 + pp->intr_mask = DEF_PORT_IRQ;
2571 +
2572 + ap->private_data = pp;
2573 +
2574 + /* engage engines, captain */
2575 + return ahci_port_resume(ap);
2576 +}
2577 +
2578 +static void ahci_port_stop(struct ata_port *ap)
2579 +{
2580 + const char *emsg = NULL;
2581 + int rc;
2582 +
2583 + /* de-initialize port */
2584 + rc = ahci_deinit_port(ap, &emsg);
2585 + if (rc)
2586 + ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
2587 +}
2588 +
2589 +#if 0
2590 +static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
2591 +{
2592 + int rc;
2593 +
2594 + if (using_dac &&
2595 + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
2596 + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2597 + if (rc) {
2598 + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2599 + if (rc) {
2600 + dev_printk(KERN_ERR, &pdev->dev,
2601 + "64-bit DMA enable failed\n");
2602 + return rc;
2603 + }
2604 + }
2605 + } else {
2606 + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2607 + if (rc) {
2608 + dev_printk(KERN_ERR, &pdev->dev,
2609 + "32-bit DMA enable failed\n");
2610 + return rc;
2611 + }
2612 + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2613 + if (rc) {
2614 + dev_printk(KERN_ERR, &pdev->dev,
2615 + "32-bit consistent DMA enable failed\n");
2616 + return rc;
2617 + }
2618 + }
2619 + return 0;
2620 +}
2621 +#endif
2622 +
2623 +static void ahci_print_info(struct ata_host *host)
2624 +{
2625 + struct ahci_host_priv *hpriv = host->private_data;
2626 +#if 0
2627 + struct pci_dev *pdev = to_pci_dev(host->dev);
2628 + void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2629 +#else
2630 + struct platform_device *pdev = to_platform_device(host->dev);
2631 + void __iomem *mmio = (void __iomem *)host->iomap;//[AHCI_BAR];
2632 +#endif
2633 + u32 vers, cap, impl, speed;
2634 + const char *speed_s;
2635 +#if 0
2636 + u16 cc;
2637 +#endif
2638 + const char *scc_s;
2639 +
2640 + vers = readl(mmio + HOST_VERSION);
2641 + cap = hpriv->cap;
2642 + impl = hpriv->port_map;
2643 +
2644 + speed = (cap >> 20) & 0xf;
2645 + if (speed == 1)
2646 + speed_s = "1.5";
2647 + else if (speed == 2)
2648 + speed_s = "3";
2649 + else if (speed == 3)
2650 + speed_s = "6";
2651 + else
2652 + speed_s = "?";
2653 +
2654 +#if 0
2655 + pci_read_config_word(pdev, 0x0a, &cc);
2656 + if (cc == PCI_CLASS_STORAGE_IDE)
2657 + scc_s = "IDE";
2658 + else if (cc == PCI_CLASS_STORAGE_SATA)
2659 + scc_s = "SATA";
2660 + else if (cc == PCI_CLASS_STORAGE_RAID)
2661 + scc_s = "RAID";
2662 + else
2663 + scc_s = "unknown";
2664 +#else
2665 + scc_s = "SATA";
2666 +#endif
2667 +
2668 + dev_printk(KERN_INFO, &pdev->dev,
2669 + "AHCI %02x%02x.%02x%02x "
2670 + "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
2671 + ,
2672 +
2673 + (vers >> 24) & 0xff,
2674 + (vers >> 16) & 0xff,
2675 + (vers >> 8) & 0xff,
2676 + vers & 0xff,
2677 +
2678 + ((cap >> 8) & 0x1f) + 1,
2679 + (cap & 0x1f) + 1,
2680 + speed_s,
2681 + impl,
2682 + scc_s);
2683 +
2684 + dev_printk(KERN_INFO, &pdev->dev,
2685 + "flags: "
2686 + "%s%s%s%s%s%s%s"
2687 + "%s%s%s%s%s%s%s"
2688 + "%s\n"
2689 + ,
2690 +
2691 + cap & (1 << 31) ? "64bit " : "",
2692 + cap & (1 << 30) ? "ncq " : "",
2693 + cap & (1 << 29) ? "sntf " : "",
2694 + cap & (1 << 28) ? "ilck " : "",
2695 + cap & (1 << 27) ? "stag " : "",
2696 + cap & (1 << 26) ? "pm " : "",
2697 + cap & (1 << 25) ? "led " : "",
2698 +
2699 + cap & (1 << 24) ? "clo " : "",
2700 + cap & (1 << 19) ? "nz " : "",
2701 + cap & (1 << 18) ? "only " : "",
2702 + cap & (1 << 17) ? "pmp " : "",
2703 + cap & (1 << 15) ? "pio " : "",
2704 + cap & (1 << 14) ? "slum " : "",
2705 + cap & (1 << 13) ? "part " : "",
2706 + cap & (1 << 6) ? "ems ": ""
2707 + );
2708 +}
2709 +
2710 +#if 0
2711 +/* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
2712 + * hardwired to on-board SIMG 4726. The chipset is ICH8 and doesn't
2713 + * support PMP and the 4726 either directly exports the device
2714 + * attached to the first downstream port or acts as a hardware storage
2715 + * controller and emulate a single ATA device (can be RAID 0/1 or some
2716 + * other configuration).
2717 + *
2718 + * When there's no device attached to the first downstream port of the
2719 + * 4726, "Config Disk" appears, which is a pseudo ATA device to
2720 + * configure the 4726. However, ATA emulation of the device is very
2721 + * lame. It doesn't send signature D2H Reg FIS after the initial
2722 + * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues.
2723 + *
2724 + * The following function works around the problem by always using
2725 + * hardreset on the port and not depending on receiving signature FIS
2726 + * afterward. If signature FIS isn't received soon, ATA class is
2727 + * assumed without follow-up softreset.
2728 + */
2729 +static void ahci_p5wdh_workaround(struct ata_host *host)
2730 +{
2731 + static struct dmi_system_id sysids[] = {
2732 + {
2733 + .ident = "P5W DH Deluxe",
2734 + .matches = {
2735 + DMI_MATCH(DMI_SYS_VENDOR,
2736 + "ASUSTEK COMPUTER INC"),
2737 + DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
2738 + },
2739 + },
2740 + { }
2741 + };
2742 + struct pci_dev *pdev = to_pci_dev(host->dev);
2743 +
2744 + if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) &&
2745 + dmi_check_system(sysids)) {
2746 + struct ata_port *ap = host->ports[1];
2747 +
2748 + dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH "
2749 + "Deluxe on-board SIMG4726 workaround\n");
2750 +
2751 + ap->ops = &ahci_p5wdh_ops;
2752 + ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA;
2753 + }
2754 +}
2755 +
2756 +/*
2757 + * SB600 ahci controller on ASUS M2A-VM can't do 64bit DMA with older
2758 + * BIOS. The oldest version known to be broken is 0901 and working is
2759 + * 1501 which was released on 2007-10-26. Force 32bit DMA on anything
2760 + * older than 1501. Please read bko#9412 for more info.
2761 + */
2762 +static bool ahci_asus_m2a_vm_32bit_only(struct pci_dev *pdev)
2763 +{
2764 + static const struct dmi_system_id sysids[] = {
2765 + {
2766 + .ident = "ASUS M2A-VM",
2767 + .matches = {
2768 + DMI_MATCH(DMI_BOARD_VENDOR,
2769 + "ASUSTeK Computer INC."),
2770 + DMI_MATCH(DMI_BOARD_NAME, "M2A-VM"),
2771 + },
2772 + },
2773 + { }
2774 + };
2775 + const char *cutoff_mmdd = "10/26";
2776 + const char *date;
2777 + int year;
2778 +
2779 + if (pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x12, 0) ||
2780 + !dmi_check_system(sysids))
2781 + return false;
2782 +
2783 + /*
2784 + * Argh.... both version and date are free form strings.
2785 + * Let's hope they're using the same date format across
2786 + * different versions.
2787 + */
2788 + date = dmi_get_system_info(DMI_BIOS_DATE);
2789 + year = dmi_get_year(DMI_BIOS_DATE);
2790 + if (date && strlen(date) >= 10 && date[2] == '/' && date[5] == '/' &&
2791 + (year > 2007 ||
2792 + (year == 2007 && strncmp(date, cutoff_mmdd, 5) >= 0)))
2793 + return false;
2794 +
2795 + dev_printk(KERN_WARNING, &pdev->dev, "ASUS M2A-VM: BIOS too old, "
2796 + "forcing 32bit DMA, update BIOS\n");
2797 +
2798 + return true;
2799 +}
2800 +
2801 +static bool ahci_broken_system_poweroff(struct pci_dev *pdev)
2802 +{
2803 + static const struct dmi_system_id broken_systems[] = {
2804 + {
2805 + .ident = "HP Compaq nx6310",
2806 + .matches = {
2807 + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2808 + DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6310"),
2809 + },
2810 + /* PCI slot number of the controller */
2811 + .driver_data = (void *)0x1FUL,
2812 + },
2813 + {
2814 + .ident = "HP Compaq 6720s",
2815 + .matches = {
2816 + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2817 + DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6720s"),
2818 + },
2819 + /* PCI slot number of the controller */
2820 + .driver_data = (void *)0x1FUL,
2821 + },
2822 +
2823 + { } /* terminate list */
2824 + };
2825 + const struct dmi_system_id *dmi = dmi_first_match(broken_systems);
2826 +
2827 + if (dmi) {
2828 + unsigned long slot = (unsigned long)dmi->driver_data;
2829 + /* apply the quirk only to on-board controllers */
2830 + return slot == PCI_SLOT(pdev->devfn);
2831 + }
2832 +
2833 + return false;
2834 +}
2835 +
2836 +static bool ahci_broken_suspend(struct pci_dev *pdev)
2837 +{
2838 + static const struct dmi_system_id sysids[] = {
2839 + /*
2840 + * On HP dv[4-6] and HDX18 with earlier BIOSen, link
2841 + * to the harddisk doesn't become online after
2842 + * resuming from STR. Warn and fail suspend.
2843 + */
2844 + {
2845 + .ident = "dv4",
2846 + .matches = {
2847 + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2848 + DMI_MATCH(DMI_PRODUCT_NAME,
2849 + "HP Pavilion dv4 Notebook PC"),
2850 + },
2851 + .driver_data = "F.30", /* cutoff BIOS version */
2852 + },
2853 + {
2854 + .ident = "dv5",
2855 + .matches = {
2856 + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2857 + DMI_MATCH(DMI_PRODUCT_NAME,
2858 + "HP Pavilion dv5 Notebook PC"),
2859 + },
2860 + .driver_data = "F.16", /* cutoff BIOS version */
2861 + },
2862 + {
2863 + .ident = "dv6",
2864 + .matches = {
2865 + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2866 + DMI_MATCH(DMI_PRODUCT_NAME,
2867 + "HP Pavilion dv6 Notebook PC"),
2868 + },
2869 + .driver_data = "F.21", /* cutoff BIOS version */
2870 + },
2871 + {
2872 + .ident = "HDX18",
2873 + .matches = {
2874 + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2875 + DMI_MATCH(DMI_PRODUCT_NAME,
2876 + "HP HDX18 Notebook PC"),
2877 + },
2878 + .driver_data = "F.23", /* cutoff BIOS version */
2879 + },
2880 + { } /* terminate list */
2881 + };
2882 + const struct dmi_system_id *dmi = dmi_first_match(sysids);
2883 + const char *ver;
2884 +
2885 + if (!dmi || pdev->bus->number || pdev->devfn != PCI_DEVFN(0x1f, 2))
2886 + return false;
2887 +
2888 + ver = dmi_get_system_info(DMI_BIOS_VERSION);
2889 +
2890 + return !ver || strcmp(ver, dmi->driver_data) < 0;
2891 +}
2892 +
2893 +static bool ahci_broken_online(struct pci_dev *pdev)
2894 +{
2895 +#define ENCODE_BUSDEVFN(bus, slot, func) \
2896 + (void *)(unsigned long)(((bus) << 8) | PCI_DEVFN((slot), (func)))
2897 + static const struct dmi_system_id sysids[] = {
2898 + /*
2899 + * There are several gigabyte boards which use
2900 + * SIMG5723s configured as hardware RAID. Certain
2901 + * 5723 firmware revisions shipped there keep the link
2902 + * online but fail to answer properly to SRST or
2903 + * IDENTIFY when no device is attached downstream
2904 + * causing libata to retry quite a few times leading
2905 + * to excessive detection delay.
2906 + *
2907 + * As these firmwares respond to the second reset try
2908 + * with invalid device signature, considering unknown
2909 + * sig as offline works around the problem acceptably.
2910 + */
2911 + {
2912 + .ident = "EP45-DQ6",
2913 + .matches = {
2914 + DMI_MATCH(DMI_BOARD_VENDOR,
2915 + "Gigabyte Technology Co., Ltd."),
2916 + DMI_MATCH(DMI_BOARD_NAME, "EP45-DQ6"),
2917 + },
2918 + .driver_data = ENCODE_BUSDEVFN(0x0a, 0x00, 0),
2919 + },
2920 + {
2921 + .ident = "EP45-DS5",
2922 + .matches = {
2923 + DMI_MATCH(DMI_BOARD_VENDOR,
2924 + "Gigabyte Technology Co., Ltd."),
2925 + DMI_MATCH(DMI_BOARD_NAME, "EP45-DS5"),
2926 + },
2927 + .driver_data = ENCODE_BUSDEVFN(0x03, 0x00, 0),
2928 + },
2929 + { } /* terminate list */
2930 + };
2931 +#undef ENCODE_BUSDEVFN
2932 + const struct dmi_system_id *dmi = dmi_first_match(sysids);
2933 + unsigned int val;
2934 +
2935 + if (!dmi)
2936 + return false;
2937 +
2938 + val = (unsigned long)dmi->driver_data;
2939 +
2940 + return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff);
2941 +}
2942 +
2943 +#endif
2944 +static int ahci_remove(struct platform_device *pdev)
2945 +{
2946 + struct device *dev = &pdev->dev;
2947 + struct ata_host *host = dev_get_drvdata(dev);
2948 +
2949 + ata_host_detach(host);
2950 + return 0;
2951 +}
2952 +
2953 +#if 0
2954 +static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2955 +#else
2956 +static int __init ahci_probe(struct platform_device *pdev)
2957 +#endif
2958 +{
2959 + static int printed_version;
2960 +#if 0
2961 + unsigned int board_id = ent->driver_data;
2962 + struct ata_port_info pi = ahci_port_info[board_id];
2963 +#else
2964 + struct ata_port_info pi = ahci_port_info[board_ahci];
2965 +#endif
2966 + const struct ata_port_info *ppi[] = { &pi, NULL };
2967 + struct device *dev = &pdev->dev;
2968 + struct ahci_host_priv *hpriv;
2969 + struct ata_host *host;
2970 + int n_ports, i, rc;
2971 + struct resource *res;
2972 + u8 *base = NULL;
2973 +
2974 + VPRINTK("ENTER\n");
2975 +
2976 + WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
2977 +
2978 + if (!printed_version++)
2979 + dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2980 +
2981 +#if 0
2982 + /* The AHCI driver can only drive the SATA ports, the PATA driver
2983 + can drive them all so if both drivers are selected make sure
2984 + AHCI stays out of the way */
2985 + if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable)
2986 + return -ENODEV;
2987 +
2988 + /* acquire resources */
2989 + rc = pcim_enable_device(pdev);
2990 + if (rc)
2991 + return rc;
2992 +
2993 + /* AHCI controllers often implement SFF compatible interface.
2994 + * Grab all PCI BARs just in case.
2995 + */
2996 + rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
2997 + if (rc == -EBUSY)
2998 + pcim_pin_device(pdev);
2999 + if (rc)
3000 + return rc;
3001 +
3002 + if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
3003 + (pdev->device == 0x2652 || pdev->device == 0x2653)) {
3004 + u8 map;
3005 +
3006 + /* ICH6s share the same PCI ID for both piix and ahci
3007 + * modes. Enabling ahci mode while MAP indicates
3008 + * combined mode is a bad idea. Yield to ata_piix.
3009 + */
3010 + pci_read_config_byte(pdev, ICH_MAP, &map);
3011 + if (map & 0x3) {
3012 + dev_printk(KERN_INFO, &pdev->dev, "controller is in "
3013 + "combined mode, can't enable AHCI mode\n");
3014 + return -ENODEV;
3015 + }
3016 + }
3017 +#endif
3018 +
3019 + hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
3020 + if (!hpriv)
3021 + return -ENOMEM;
3022 + hpriv->flags |= (unsigned long)pi.private_data;
3023 +
3024 +#if 0
3025 + /* MCP65 revision A1 and A2 can't do MSI */
3026 + if (board_id == board_ahci_mcp65 &&
3027 + (pdev->revision == 0xa1 || pdev->revision == 0xa2))
3028 + hpriv->flags |= AHCI_HFLAG_NO_MSI;
3029 +
3030 + /* SB800 does NOT need the workaround to ignore SERR_INTERNAL */
3031 + if (board_id == board_ahci_sb700 && pdev->revision >= 0x40)
3032 + hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL;
3033 +
3034 + /* apply ASUS M2A_VM quirk */
3035 + if (ahci_asus_m2a_vm_32bit_only(pdev))
3036 + hpriv->flags |= AHCI_HFLAG_32BIT_ONLY;
3037 +
3038 + if (!(hpriv->flags & AHCI_HFLAG_NO_MSI))
3039 + pci_enable_msi(pdev);
3040 +#endif
3041 +
3042 + /* Cavium CNS3XXX Initial */
3043 + /* Get SATA register base address */
3044 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3045 + if (!res) {
3046 + dev_err(&pdev->dev, "no reg addr\n");
3047 + return -ENODEV;
3048 + }
3049 +
3050 + /* ioremap SATA registers */
3051 + base = devm_ioremap(&pdev->dev, res->start, res->end - res->start + 1);
3052 +
3053 + if (!base) {
3054 + dev_err(&pdev->dev, "ioremap failed for 0x%x\n", res->start);
3055 + return -ENODEV;
3056 + }
3057 +
3058 +#if 0
3059 + /* reset PHY test chip */
3060 + printk("*** Reset PHY ***\n");
3061 + CNS3XXX_MISC_REGISTER |= 0xF;
3062 + mdelay(100);
3063 +
3064 + printk("%s %d, base:0x%x\n",__FUNCTION__,__LINE__,(u32)base);
3065 +
3066 + /* set PI first */
3067 + printk("*** Manually set PI ***\n");
3068 + writel(0x1, (void __iomem *)base + HOST_PORTS_IMPL);
3069 + printk("*** Now PI is: 0x%x ***\n",readl((void __iomem *)base + HOST_PORTS_IMPL));
3070 +#endif
3071 +
3072 +
3073 +
3074 +
3075 + /* save initial config */
3076 +#if 0
3077 + ahci_save_initial_config(pdev, hpriv);
3078 +#else
3079 + ahci_save_initial_config(pdev, hpriv, base);
3080 +#endif
3081 +
3082 + /* prepare host */
3083 + if (hpriv->cap & HOST_CAP_NCQ)
3084 + pi.flags |= ATA_FLAG_NCQ;
3085 +
3086 + if (hpriv->cap & HOST_CAP_PMP)
3087 + pi.flags |= ATA_FLAG_PMP;
3088 +
3089 + if (ahci_em_messages && (hpriv->cap & HOST_CAP_EMS)) {
3090 + u8 messages;
3091 +#if 0
3092 + void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
3093 +#else
3094 + void __iomem *mmio = (void __iomem *)base;
3095 +#endif
3096 + u32 em_loc = readl(mmio + HOST_EM_LOC);
3097 + u32 em_ctl = readl(mmio + HOST_EM_CTL);
3098 +
3099 + messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
3100 +
3101 + /* we only support LED message type right now */
3102 + if ((messages & 0x01) && (ahci_em_messages == 1)) {
3103 + /* store em_loc */
3104 + hpriv->em_loc = ((em_loc >> 16) * 4);
3105 + pi.flags |= ATA_FLAG_EM;
3106 + if (!(em_ctl & EM_CTL_ALHD))
3107 + pi.flags |= ATA_FLAG_SW_ACTIVITY;
3108 + }
3109 + }
3110 +
3111 +#if 0
3112 + if (ahci_broken_system_poweroff(pdev)) {
3113 + pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN;
3114 + dev_info(&pdev->dev,
3115 + "quirky BIOS, skipping spindown on poweroff\n");
3116 + }
3117 +
3118 + if (ahci_broken_suspend(pdev)) {
3119 + hpriv->flags |= AHCI_HFLAG_NO_SUSPEND;
3120 + dev_printk(KERN_WARNING, &pdev->dev,
3121 + "BIOS update required for suspend/resume\n");
3122 + }
3123 +
3124 + if (ahci_broken_online(pdev)) {
3125 + hpriv->flags |= AHCI_HFLAG_SRST_TOUT_IS_OFFLINE;
3126 + dev_info(&pdev->dev,
3127 + "online status unreliable, applying workaround\n");
3128 + }
3129 +#endif
3130 +
3131 + /* CAP.NP sometimes indicate the index of the last enabled
3132 + * port, at other times, that of the last possible port, so
3133 + * determining the maximum port number requires looking at
3134 + * both CAP.NP and port_map.
3135 + */
3136 + n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
3137 +
3138 + host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3139 + if (!host)
3140 + return -ENOMEM;
3141 +#if 0
3142 + host->iomap = pcim_iomap_table(pdev);
3143 +#else
3144 + host->iomap = (void __iomem *)base;
3145 +#endif
3146 + host->private_data = hpriv;
3147 +
3148 + if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
3149 + host->flags |= ATA_HOST_PARALLEL_SCAN;
3150 + else
3151 + printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
3152 +
3153 + if (pi.flags & ATA_FLAG_EM)
3154 + ahci_reset_em(host);
3155 +
3156 + for (i = 0; i < host->n_ports; i++) {
3157 + struct ata_port *ap = host->ports[i];
3158 +
3159 +#if 0
3160 + ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
3161 + ata_port_pbar_desc(ap, AHCI_PCI_BAR,
3162 + 0x100 + ap->port_no * 0x80, "port");
3163 +#else
3164 + ata_port_desc(ap, "%s %s%llu@0x%llx", "ahci bar", "m",
3165 + (long long)(res->end - res->start) + 1, (long long)res->start);
3166 + ata_port_desc(ap, "%s 0x%llx", "port",
3167 + (long long)res->start + 0x100 + ap->port_no * 0x80);
3168 +#endif
3169 +
3170 + /* set initial link pm policy */
3171 + ap->pm_policy = NOT_AVAILABLE;
3172 +
3173 + /* set enclosure management message type */
3174 + if (ap->flags & ATA_FLAG_EM)
3175 + ap->em_message_type = ahci_em_messages;
3176 +
3177 +
3178 + /* disabled/not-implemented port */
3179 + if (!(hpriv->port_map & (1 << i)))
3180 + ap->ops = &ata_dummy_port_ops;
3181 + }
3182 +
3183 +#if 0
3184 + /* apply workaround for ASUS P5W DH Deluxe mainboard */
3185 + ahci_p5wdh_workaround(host);
3186 +
3187 + /* initialize adapter */
3188 + rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
3189 + if (rc)
3190 + return rc;
3191 +#endif
3192 +
3193 + rc = ahci_reset_controller(host);
3194 + if (rc)
3195 + return rc;
3196 +
3197 + ahci_init_controller(host);
3198 + ahci_print_info(host);
3199 +
3200 +#if 0
3201 + pci_set_master(pdev);
3202 +#endif
3203 +
3204 +
3205 +
3206 +#if 0
3207 + return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
3208 + &ahci_sht);
3209 +#else
3210 + /* Get SATA port interrupt number */
3211 + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
3212 + if (!res) {
3213 + dev_err(&pdev->dev, "no SATA irq\n");
3214 + return -ENODEV;
3215 + }
3216 +
3217 + return ata_host_activate(host, res->start, ahci_interrupt, IRQF_SHARED,
3218 + &ahci_sht);
3219 +
3220 +
3221 +#endif
3222 +}
3223 +
3224 +
3225 +#if defined(CONFIG_CNS3XXX_SILICON) || defined(CONFIG_SILICON)
3226 +static void ahci_phy_init(void){
3227 +
3228 + u32 u32tmp;
3229 +
3230 +
3231 + u32tmp = MISC_SATA_POWER_MODE;
3232 + u32tmp |= 0x1<< 16; // Disable SATA PHY 0 from SLUMBER Mode
3233 + u32tmp |= 0x1<< 17; // Disable SATA PHY 1 from SLUMBER Mode
3234 + MISC_SATA_POWER_MODE = u32tmp;
3235 +
3236 + /* Enable SATA PHY */
3237 + cns3xxx_pwr_power_up(0x1 << PM_PLL_HM_PD_CTRL_REG_OFFSET_SATA_PHY0);
3238 + cns3xxx_pwr_power_up(0x1 << PM_PLL_HM_PD_CTRL_REG_OFFSET_SATA_PHY1);
3239 +
3240 + /* Enable SATA Clock */
3241 + cns3xxx_pwr_clk_en(0x1 << PM_CLK_GATE_REG_OFFSET_SATA);
3242 +
3243 + /* De-Asscer SATA Reset */
3244 + u32tmp = PM_SOFT_RST_REG;
3245 + u32tmp |= 0x1 << PM_SOFT_RST_REG_OFFST_SATA;
3246 + PM_SOFT_RST_REG = u32tmp;
3247 +}
3248 +#endif
3249 +
3250 +
3251 +
3252 +static int __init ahci_init(void)
3253 +{
3254 +#if 0
3255 + return pci_register_driver(&ahci_pci_driver);
3256 +#else
3257 + printk("CNS3XXX AHCI SATA low-level driver\n");
3258 +#if defined(CONFIG_CNS3XXX_SILICON) || defined(CONFIG_SILICON)
3259 + ahci_phy_init();
3260 +#endif
3261 + return platform_driver_register(&ahci_driver);
3262 +#endif
3263 +}
3264 +
3265 +static void __exit ahci_exit(void)
3266 +{
3267 +#if 0
3268 + pci_unregister_driver(&ahci_pci_driver);
3269 +#else
3270 + platform_driver_unregister(&ahci_driver);
3271 +#endif
3272 +}
3273 +
3274 +
3275 +MODULE_AUTHOR("Jeff Garzik");
3276 +MODULE_DESCRIPTION("AHCI SATA low-level driver");
3277 +MODULE_LICENSE("GPL");
3278 +#if 0
3279 +MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
3280 +#endif
3281 +MODULE_VERSION(DRV_VERSION);
3282 +
3283 +module_init(ahci_init);
3284 +module_exit(ahci_exit);
3285 --- a/drivers/ata/Kconfig
3286 +++ b/drivers/ata/Kconfig
3287 @@ -47,6 +47,14 @@ config SATA_PMP
3288 This option adds support for SATA Port Multipliers
3289 (the SATA version of an ethernet hub, or SAS expander).
3290
3291 +config SATA_CNS3XXX_AHCI
3292 + tristate "Cavium CNS3XXX AHCI SATA support"
3293 + help
3294 + This option enables support for AHCI Serial ATA support for Cavium CNS3XXX.
3295 +
3296 + If unsure, say N.
3297 +
3298 +
3299 config SATA_AHCI
3300 tristate "AHCI SATA support"
3301 depends on PCI
3302 --- a/drivers/ata/libata-scsi.c
3303 +++ b/drivers/ata/libata-scsi.c
3304 @@ -3096,12 +3096,22 @@ int ata_scsi_queuecmd(struct scsi_cmnd *
3305 struct scsi_device *scsidev = cmd->device;
3306 struct Scsi_Host *shost = scsidev->host;
3307 int rc = 0;
3308 -
3309 +#ifdef CONFIG_SMP
3310 + u32 flags;
3311 + local_save_flags(flags);
3312 +#endif
3313 ap = ata_shost_to_port(shost);
3314
3315 spin_unlock(shost->host_lock);
3316 +#ifndef CONFIG_SMP
3317 spin_lock(ap->lock);
3318 -
3319 +#else
3320 + while(!spin_trylock(ap->lock)){
3321 + if(!irqs_disabled()) continue;
3322 + local_irq_enable();
3323 + local_irq_restore(flags);
3324 + }
3325 +#endif
3326 ata_scsi_dump_cdb(ap, cmd);
3327
3328 dev = ata_scsi_find_dev(ap, scsidev);
3329 --- a/drivers/ata/libata-sff.c
3330 +++ b/drivers/ata/libata-sff.c
3331 @@ -893,6 +893,9 @@ static void ata_pio_sector(struct ata_qu
3332 do_write);
3333 }
3334
3335 + if (!do_write)
3336 + flush_dcache_page(page);
3337 +
3338 qc->curbytes += qc->sect_size;
3339 qc->cursg_ofs += qc->sect_size;
3340
3341 --- a/drivers/ata/Makefile
3342 +++ b/drivers/ata/Makefile
3343 @@ -1,6 +1,7 @@
3344
3345 obj-$(CONFIG_ATA) += libata.o
3346
3347 +obj-$(CONFIG_SATA_CNS3XXX_AHCI) += cns3xxx_ahci.o
3348 obj-$(CONFIG_SATA_AHCI) += ahci.o
3349 obj-$(CONFIG_SATA_SVW) += sata_svw.o
3350 obj-$(CONFIG_ATA_PIIX) += ata_piix.o
This page took 0.188375 seconds and 5 git commands to generate.