Do not complain when USB is not available in our kernel
[openwrt.git] / package / rt2x00 / src / rt2x00pci.c
1 /*
2 Copyright (C) 2004 - 2007 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21 /*
22 Module: rt2x00pci
23 Abstract: rt2x00 generic pci device routines.
24 Supported chipsets: rt2460, rt2560, rt2561, rt2561s & rt2661.
25 */
26
27 /*
28 * Set enviroment defines for rt2x00.h
29 */
30 #define DRV_NAME "rt2x00pci"
31
32 #include <linux/kernel.h>
33 #include <linux/module.h>
34 #include <linux/version.h>
35 #include <linux/init.h>
36 #include <linux/pci.h>
37
38 #include "rt2x00.h"
39 #include "rt2x00lib.h"
40 #include "rt2x00pci.h"
41
42 /*
43 * Beacon handlers.
44 */
45 int rt2x00pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
46 struct ieee80211_tx_control *control)
47 {
48 struct rt2x00_dev *rt2x00dev = hw->priv;
49 struct data_ring *ring =
50 rt2x00_get_ring(rt2x00dev, IEEE80211_TX_QUEUE_BEACON);
51 struct data_entry *entry = rt2x00_get_data_entry(ring);
52
53 /*
54 * Just in case the ieee80211 doesn't set this,
55 * but we need this queue set for the descriptor
56 * initialization.
57 */
58 control->queue = IEEE80211_TX_QUEUE_BEACON;
59
60 /*
61 * Update the beacon entry.
62 */
63 memcpy(entry->data_addr, skb->data, skb->len);
64 rt2x00lib_write_tx_desc(rt2x00dev, entry, entry->priv,
65 (struct ieee80211_hdr*)skb->data, skb->len, control);
66
67 /*
68 * Enable beacon generation.
69 */
70 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, control->queue);
71
72 return 0;
73 }
74 EXPORT_SYMBOL_GPL(rt2x00pci_beacon_update);
75
76 void rt2x00pci_beacondone(struct rt2x00_dev *rt2x00dev, const int queue)
77 {
78 struct data_ring *ring = rt2x00_get_ring(rt2x00dev, queue);
79 struct data_entry *entry = rt2x00_get_data_entry(ring);
80 struct sk_buff *skb;
81
82 skb = ieee80211_beacon_get(rt2x00dev->hw,
83 rt2x00dev->interface.id, &entry->tx_status.control);
84 if (!skb)
85 return;
86
87 rt2x00dev->ops->hw->beacon_update(rt2x00dev->hw, skb,
88 &entry->tx_status.control);
89
90 dev_kfree_skb(skb);
91 }
92 EXPORT_SYMBOL_GPL(rt2x00pci_beacondone);
93
94 /*
95 * TX data handlers.
96 */
97 int rt2x00pci_write_tx_data(struct rt2x00_dev *rt2x00dev,
98 struct data_ring *ring, struct sk_buff *skb,
99 struct ieee80211_tx_control *control)
100 {
101 struct ieee80211_hdr *ieee80211hdr = (struct ieee80211_hdr*)skb->data;
102 struct data_entry *entry = rt2x00_get_data_entry(ring);
103 struct data_desc *txd = entry->priv;
104 u32 word;
105
106 if (rt2x00_ring_full(ring)) {
107 ieee80211_stop_queue(rt2x00dev->hw, control->queue);
108 return -EINVAL;
109 }
110
111 rt2x00_desc_read(txd, 0, &word);
112
113 if (rt2x00_get_field32(word, TXD_ENTRY_OWNER_NIC) ||
114 rt2x00_get_field32(word, TXD_ENTRY_VALID)) {
115 ERROR(rt2x00dev,
116 "Arrived at non-free entry in the non-full queue %d.\n"
117 "Please file bug report to %s.\n",
118 control->queue, DRV_PROJECT);
119 ieee80211_stop_queue(rt2x00dev->hw, control->queue);
120 return -EINVAL;
121 }
122
123 entry->skb = skb;
124 memcpy(&entry->tx_status.control, control, sizeof(*control));
125 memcpy(entry->data_addr, skb->data, skb->len);
126 rt2x00lib_write_tx_desc(rt2x00dev, entry, txd, ieee80211hdr,
127 skb->len, control);
128
129 rt2x00_ring_index_inc(ring);
130
131 if (rt2x00_ring_full(ring))
132 ieee80211_stop_queue(rt2x00dev->hw, control->queue);
133
134 return 0;
135 }
136 EXPORT_SYMBOL_GPL(rt2x00pci_write_tx_data);
137
138 /*
139 * RX data handlers.
140 */
141 void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
142 {
143 struct data_ring *ring = rt2x00dev->rx;
144 struct data_entry *entry;
145 struct data_desc *rxd;
146 u32 desc;
147 int signal;
148 int rssi;
149 int ofdm;
150 int size;
151
152 while (1) {
153 entry = rt2x00_get_data_entry(ring);
154 rxd = entry->priv;
155 rt2x00_desc_read(rxd, 0, &desc);
156
157 if (rt2x00_get_field32(desc, RXD_ENTRY_OWNER_NIC))
158 break;
159
160 size = rt2x00dev->ops->lib->fill_rxdone(
161 entry, &signal, &rssi, &ofdm);
162 if (size < 0)
163 goto skip_entry;
164
165 /*
166 * Send the packet to upper layer.
167 */
168 rt2x00lib_rxdone(entry, entry->data_addr, size,
169 signal, rssi, ofdm);
170
171 skip_entry:
172 if (test_bit(DEVICE_ENABLED_RADIO, &ring->rt2x00dev->flags)) {
173 rt2x00_set_field32(&desc, RXD_ENTRY_OWNER_NIC, 1);
174 rt2x00_desc_write(rxd, 0, desc);
175 }
176
177 rt2x00_ring_index_inc(ring);
178 }
179 }
180 EXPORT_SYMBOL_GPL(rt2x00pci_rxdone);
181
182 /*
183 * Device initialization handlers.
184 */
185 #define priv_offset(__ring, __i) \
186 ({ \
187 ring->data_addr + (i * ring->desc_size); \
188 })
189
190 #define data_addr_offset(__ring, __i) \
191 ({ \
192 (__ring)->data_addr \
193 + ((__ring)->stats.limit * (__ring)->desc_size) \
194 + ((__i) * (__ring)->data_size); \
195 })
196
197 #define data_dma_offset(__ring, __i) \
198 ({ \
199 (__ring)->data_dma \
200 + ((__ring)->stats.limit * (__ring)->desc_size) \
201 + ((__i) * (__ring)->data_size); \
202 })
203
204 static int rt2x00pci_alloc_ring(struct rt2x00_dev *rt2x00dev,
205 struct data_ring *ring)
206 {
207 unsigned int i;
208
209 /*
210 * Allocate DMA memory for descriptor and buffer.
211 */
212 ring->data_addr = pci_alloc_consistent(rt2x00dev_pci(rt2x00dev),
213 rt2x00_get_ring_size(ring), &ring->data_dma);
214 if (!ring->data_addr)
215 return -ENOMEM;
216
217 /*
218 * Initialize all ring entries to contain valid
219 * addresses.
220 */
221 for (i = 0; i < ring->stats.limit; i++) {
222 ring->entry[i].priv = priv_offset(ring, i);
223 ring->entry[i].data_addr = data_addr_offset(ring, i);
224 ring->entry[i].data_dma = data_dma_offset(ring, i);
225 }
226
227 return 0;
228 }
229
230 int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
231 {
232 struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
233 struct data_ring *ring;
234 int status;
235
236 /*
237 * Allocate DMA
238 */
239 ring_for_each(rt2x00dev, ring) {
240 status = rt2x00pci_alloc_ring(rt2x00dev, ring);
241 if (status)
242 goto exit;
243 }
244
245 /*
246 * Register interrupt handler.
247 */
248 status = request_irq(pci_dev->irq, rt2x00dev->ops->lib->irq_handler,
249 IRQF_SHARED, pci_dev->driver->name, rt2x00dev);
250 if (status) {
251 ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
252 pci_dev->irq, status);
253 return status;
254 }
255
256 return 0;
257
258 exit:
259 rt2x00pci_uninitialize(rt2x00dev);
260
261 return status;
262 }
263 EXPORT_SYMBOL_GPL(rt2x00pci_initialize);
264
265 void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
266 {
267 struct data_ring *ring;
268
269 /*
270 * Free irq line.
271 */
272 free_irq(rt2x00dev_pci(rt2x00dev)->irq, rt2x00dev);
273
274 /*
275 * Free DMA
276 */
277 ring_for_each(rt2x00dev, ring) {
278 if (ring->data_addr)
279 pci_free_consistent(rt2x00dev_pci(rt2x00dev),
280 rt2x00_get_ring_size(ring),
281 ring->data_addr, ring->data_dma);
282 ring->data_addr = NULL;
283 }
284 }
285 EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize);
286
287 /*
288 * PCI driver handlers.
289 */
290 static int rt2x00pci_alloc_csr(struct rt2x00_dev *rt2x00dev)
291 {
292 rt2x00dev->csr_addr = ioremap(
293 pci_resource_start(rt2x00dev_pci(rt2x00dev), 0),
294 pci_resource_len(rt2x00dev_pci(rt2x00dev), 0));
295 if (!rt2x00dev->csr_addr) {
296 ERROR(rt2x00dev, "Ioremap failed.\n");
297 return -ENOMEM;
298 }
299
300 return 0;
301 }
302
303 static void rt2x00pci_free_csr(struct rt2x00_dev *rt2x00dev)
304 {
305 if (rt2x00dev->csr_addr) {
306 iounmap(rt2x00dev->csr_addr);
307 rt2x00dev->csr_addr = NULL;
308 }
309 }
310
311 int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
312 {
313 struct rt2x00_ops *ops = (struct rt2x00_ops*)id->driver_data;
314 struct ieee80211_hw *hw;
315 struct rt2x00_dev *rt2x00dev;
316 int retval;
317
318 retval = pci_request_regions(pci_dev, pci_name(pci_dev));
319 if (retval) {
320 ERROR_PROBE("PCI request regions failed.\n");
321 return retval;
322 }
323
324 retval = pci_enable_device(pci_dev);
325 if (retval) {
326 ERROR_PROBE("Enable device failed.\n");
327 goto exit_release_regions;
328 }
329
330 pci_set_master(pci_dev);
331
332 if (pci_set_mwi(pci_dev))
333 ERROR_PROBE("MWI not available.\n");
334
335 if (pci_set_dma_mask(pci_dev, DMA_64BIT_MASK) &&
336 pci_set_dma_mask(pci_dev, DMA_32BIT_MASK)) {
337 ERROR_PROBE("PCI DMA not supported.\n");
338 retval = -EIO;
339 goto exit_disable_device;
340 }
341
342 hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
343 if (!hw) {
344 ERROR_PROBE("Failed to allocate hardware.\n");
345 retval = -ENOMEM;
346 goto exit_disable_device;
347 }
348
349 pci_set_drvdata(pci_dev, hw);
350
351 rt2x00dev = hw->priv;
352 rt2x00dev->dev = pci_dev;
353 rt2x00dev->ops = ops;
354 rt2x00dev->hw = hw;
355
356 retval = rt2x00pci_alloc_csr(rt2x00dev);
357 if (retval)
358 goto exit_free_device;
359
360 retval = rt2x00lib_probe_dev(rt2x00dev);
361 if (retval)
362 goto exit_free_csr;
363
364 return 0;
365
366 exit_free_csr:
367 rt2x00pci_free_csr(rt2x00dev);
368
369 exit_free_device:
370 ieee80211_free_hw(hw);
371
372 exit_disable_device:
373 if (retval != -EBUSY)
374 pci_disable_device(pci_dev);
375
376 exit_release_regions:
377 pci_release_regions(pci_dev);
378
379 pci_set_drvdata(pci_dev, NULL);
380
381 return retval;
382 }
383 EXPORT_SYMBOL_GPL(rt2x00pci_probe);
384
385 void rt2x00pci_remove(struct pci_dev *pci_dev)
386 {
387 struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
388 struct rt2x00_dev *rt2x00dev = hw->priv;
389
390 /*
391 * Free all allocated data.
392 */
393 rt2x00lib_remove_dev(rt2x00dev);
394 ieee80211_free_hw(hw);
395
396 /*
397 * Free the PCI device data.
398 */
399 pci_set_drvdata(pci_dev, NULL);
400 pci_disable_device(pci_dev);
401 pci_release_regions(pci_dev);
402 }
403 EXPORT_SYMBOL_GPL(rt2x00pci_remove);
404
405 #ifdef CONFIG_PM
406 int rt2x00pci_suspend(struct pci_dev *pci_dev, pm_message_t state)
407 {
408 struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
409 struct rt2x00_dev *rt2x00dev = hw->priv;
410 int retval;
411
412 retval = rt2x00lib_suspend(rt2x00dev, state);
413 if (retval)
414 return retval;
415
416 rt2x00pci_free_csr(rt2x00dev);
417
418 pci_save_state(pci_dev);
419 pci_disable_device(pci_dev);
420 return pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
421 }
422 EXPORT_SYMBOL_GPL(rt2x00pci_suspend);
423
424 int rt2x00pci_resume(struct pci_dev *pci_dev)
425 {
426 struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
427 struct rt2x00_dev *rt2x00dev = hw->priv;
428 int retval;
429
430 if (pci_set_power_state(pci_dev, PCI_D0) ||
431 pci_enable_device(pci_dev) ||
432 pci_restore_state(pci_dev)) {
433 ERROR(rt2x00dev, "Failed to resume device.\n");
434 return -EIO;
435 }
436
437 retval = rt2x00pci_alloc_csr(rt2x00dev);
438 if (retval)
439 return retval;
440
441 return rt2x00lib_resume(rt2x00dev);
442 }
443 EXPORT_SYMBOL_GPL(rt2x00pci_resume);
444 #endif /* CONFIG_PM */
445
446 /*
447 * rt2x00pci module information.
448 */
449 MODULE_AUTHOR(DRV_PROJECT);
450 MODULE_VERSION(DRV_VERSION);
451 MODULE_DESCRIPTION("rt2x00 library");
452 MODULE_LICENSE("GPL");
This page took 0.074956 seconds and 5 git commands to generate.