when changing the mac address of a bridge interface, keep member interface addresses...
[openwrt.git] / target / linux / s3c24xx / files-2.6.30 / drivers / ar6000 / hif / hif2.c
1 /*
2 * hif2.c - HIF layer re-implementation for the Linux SDIO stack
3 *
4 * Copyright (C) 2008, 2009 by OpenMoko, Inc.
5 * Written by Werner Almesberger <werner@openmoko.org>
6 * All Rights Reserved
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation;
11 *
12 * Based on:
13 *
14 * @abstract: HIF layer reference implementation for Atheros SDIO stack
15 * @notice: Copyright (c) 2004-2006 Atheros Communications Inc.
16 */
17
18
19 #include <linux/kernel.h>
20 #include <linux/kthread.h>
21 #include <linux/list.h>
22 #include <linux/wait.h>
23 #include <linux/spinlock.h>
24 #include <linux/mutex.h>
25 #include <linux/sched.h>
26 #include <linux/mmc/sdio_func.h>
27 #include <linux/mmc/sdio.h>
28 #include <linux/mmc/sdio_ids.h>
29
30 #include "athdefs.h"
31 #include "a_types.h"
32 #include "hif.h"
33
34
35 /* @@@ Hack - this wants cleaning up */
36
37 #ifdef CONFIG_MACH_NEO1973_GTA02
38
39 #include <mach/gta02-pm-wlan.h>
40
41 #else /* CONFIG_MACH_NEO1973_GTA02 */
42
43 #define gta02_wlan_query_rfkill_lock() 1
44 #define gta02_wlan_set_rfkill_cb(cb, hif) ((void) cb)
45 #define gta02_wlan_query_rfkill_unlock()
46 #define gta02_wlan_clear_rfkill_cb()
47
48 #endif /* !CONFIG_MACH_NEO1973_GTA02 */
49
50
51 /*
52 * KNOWN BUGS:
53 *
54 * - HIF_DEVICE_IRQ_ASYNC_SYNC doesn't work yet (gets MMC errors)
55 * - latency can reach hundreds of ms, probably because of scheduling delays
56 * - packets go through about three queues before finally hitting the network
57 */
58
59 /*
60 * Differences from Atheros' HIFs:
61 *
62 * - synchronous and asynchronous requests may get reordered with respect to
63 * each other, e.g., if HIFReadWrite returns for an asynchronous request and
64 * then HIFReadWrite is called for a synchronous request, the synchronous
65 * request may be executed before the asynchronous request.
66 *
67 * - request queue locking seems unnecessarily complex in the Atheros HIFs.
68 *
69 * - Atheros mask interrupts by calling sdio_claim_irq/sdio_release_irq, which
70 * can cause quite a bit of overhead. This HIF has its own light-weight
71 * interrupt masking.
72 *
73 * - Atheros call deviceInsertedHandler from a thread spawned off the probe or
74 * device insertion function. The original explanation for the Atheros SDIO
75 * stack said that this is done because a delay is needed to let the chip
76 * complete initialization. There is indeed a one second delay in the thread.
77 *
78 * The Atheros Linux SDIO HIF removes the delay and only retains the thread.
79 * Experimentally removing the thread didn't show any conflicts, so let's get
80 * rid of it for good.
81 *
82 * - The Atheros SDIO stack with Samuel's driver sets SDIO_CCCR_POWER in
83 * SDIO_POWER_EMPC. Atheros' Linux SDIO code apparently doesn't. We don't
84 * either, and this seems to work fine.
85 * @@@ Need to check this with Atheros.
86 */
87
88
89 #define MBOXES 4
90
91 #define HIF_MBOX_BLOCK_SIZE 128
92 #define HIF_MBOX_BASE_ADDR 0x800
93 #define HIF_MBOX_WIDTH 0x800
94 #define HIF_MBOX_START_ADDR(mbox) \
95 (HIF_MBOX_BASE_ADDR+(mbox)*HIF_MBOX_WIDTH)
96
97
98 struct hif_device {
99 void *htc_handle;
100 struct sdio_func *func;
101
102 /*
103 * @@@ our sweet little bit of bogosity - the mechanism that lets us
104 * use the SDIO stack from softirqs. This really wants to use skbs.
105 */
106 struct list_head queue;
107 spinlock_t queue_lock;
108 struct task_struct *io_task;
109 wait_queue_head_t wait;
110
111 /*
112 * activate_lock protects "active" and the activation/deactivation
113 * process itself.
114 *
115 * Relation to other locks: The SDIO function can be claimed while
116 * activate_lock is being held, but trying to acquire activate_lock
117 * while having ownership of the SDIO function could cause a deadlock.
118 */
119 int active;
120 struct mutex activate_lock;
121 };
122
123 struct hif_request {
124 struct list_head list;
125 struct sdio_func *func;
126 int (*read)(struct sdio_func *func,
127 void *dst, unsigned int addr, int count);
128 int (*write)(struct sdio_func *func,
129 unsigned int addr, void *src, int count);
130 void *buf;
131 unsigned long addr;
132 int len;
133 A_STATUS (*completion)(void *context, A_STATUS status);
134 void *context;
135 };
136
137
138 static HTC_CALLBACKS htcCallbacks;
139
140 /*
141 * shutdown_lock prevents recursion through HIFShutDownDevice
142 */
143 static DEFINE_MUTEX(shutdown_lock);
144
145
146 /* ----- Request processing ------------------------------------------------ */
147
148
149 static A_STATUS process_request(struct hif_request *req)
150 {
151 int ret;
152 A_STATUS status;
153
154 dev_dbg(&req->func->dev, "process_request(req %p)\n", req);
155 sdio_claim_host(req->func);
156 if (req->read) {
157 ret = req->read(req->func, req->buf, req->addr, req->len);
158 } else {
159 ret = req->write(req->func, req->addr, req->buf, req->len);
160 }
161 sdio_release_host(req->func);
162 status = ret ? A_ERROR : A_OK;
163 if (req->completion)
164 req->completion(req->context, status);
165 kfree(req);
166 return status;
167 }
168
169
170 static void enqueue_request(struct hif_device *hif, struct hif_request *req)
171 {
172 unsigned long flags;
173
174 dev_dbg(&req->func->dev, "enqueue_request(req %p)\n", req);
175 spin_lock_irqsave(&hif->queue_lock, flags);
176 list_add_tail(&req->list, &hif->queue);
177 spin_unlock_irqrestore(&hif->queue_lock, flags);
178 wake_up(&hif->wait);
179 }
180
181
182 static struct hif_request *dequeue_request(struct hif_device *hif)
183 {
184 struct hif_request *req;
185 unsigned long flags;
186
187 spin_lock_irqsave(&hif->queue_lock, flags);
188 if (list_empty(&hif->queue))
189 req = NULL;
190 else {
191 req = list_first_entry(&hif->queue,
192 struct hif_request, list);
193 list_del(&req->list);
194 }
195 spin_unlock_irqrestore(&hif->queue_lock, flags);
196 return req;
197 }
198
199
200 static void wait_queue_empty(struct hif_device *hif)
201 {
202 unsigned long flags;
203 int empty;
204
205 while (1) {
206 spin_lock_irqsave(&hif->queue_lock, flags);
207 empty = list_empty(&hif->queue);
208 spin_unlock_irqrestore(&hif->queue_lock, flags);
209 if (empty)
210 break;
211 else
212 yield();
213 }
214 }
215
216
217 static int io(void *data)
218 {
219 struct hif_device *hif = data;
220 struct sched_param param = { .sched_priority = 2 };
221 /* one priority level slower than ksdioirqd (which is at 1) */
222 DEFINE_WAIT(wait);
223 struct hif_request *req;
224
225 sched_setscheduler(current, SCHED_FIFO, &param);
226
227 while (1) {
228 while (1) {
229 /*
230 * Since we never use signals here, one might think
231 * that this ought to be TASK_UNINTERRUPTIBLE. However,
232 * such a task would increase the load average and,
233 * worse, it would trigger the softlockup check.
234 */
235 prepare_to_wait(&hif->wait, &wait, TASK_INTERRUPTIBLE);
236 if (kthread_should_stop()) {
237 finish_wait(&hif->wait, &wait);
238 return 0;
239 }
240 req = dequeue_request(hif);
241 if (req)
242 break;
243 schedule();
244 }
245 finish_wait(&hif->wait, &wait);
246
247 (void) process_request(req);
248 }
249 return 0;
250 }
251
252
253 A_STATUS HIFReadWrite(HIF_DEVICE *hif, A_UINT32 address, A_UCHAR *buffer,
254 A_UINT32 length, A_UINT32 request, void *context)
255 {
256 struct device *dev = HIFGetOSDevice(hif);
257 struct hif_request *req;
258
259 dev_dbg(dev, "HIFReadWrite(device %p, address 0x%x, buffer %p, "
260 "length %d, request 0x%x, context %p)\n",
261 hif, address, buffer, length, request, context);
262
263 BUG_ON(!(request & (HIF_SYNCHRONOUS | HIF_ASYNCHRONOUS)));
264 BUG_ON(!(request & (HIF_BYTE_BASIS | HIF_BLOCK_BASIS)));
265 BUG_ON(!(request & (HIF_READ | HIF_WRITE)));
266 BUG_ON(!(request & HIF_EXTENDED_IO));
267
268 if (address >= HIF_MBOX_START_ADDR(0) &&
269 address < HIF_MBOX_START_ADDR(MBOXES+1)) {
270 BUG_ON(length > HIF_MBOX_WIDTH);
271 /* Adjust the address so that the last byte falls on the EOM
272 address. */
273 address += HIF_MBOX_WIDTH-length;
274 }
275
276 req = kzalloc(sizeof(*req), GFP_ATOMIC);
277 if (!req) {
278 if (request & HIF_ASYNCHRONOUS)
279 htcCallbacks.rwCompletionHandler(context, A_ERROR);
280 return A_ERROR;
281 }
282
283 req->func = hif->func;
284 req->addr = address;
285 req->buf = buffer;
286 req->len = length;
287
288 if (request & HIF_READ) {
289 if (request & HIF_FIXED_ADDRESS)
290 req->read = sdio_readsb;
291 else
292 req->read = sdio_memcpy_fromio;
293 } else {
294 if (request & HIF_FIXED_ADDRESS)
295 req->write = sdio_writesb;
296 else
297 req->write = sdio_memcpy_toio;
298 }
299
300 if (!(request & HIF_ASYNCHRONOUS))
301 return process_request(req);
302
303 req->completion = htcCallbacks.rwCompletionHandler;
304 req->context = context;
305 enqueue_request(hif, req);
306
307 return A_OK;
308 }
309
310
311 /* ----- Interrupt handling ------------------------------------------------ */
312
313 /*
314 * Volatile ought to be good enough to make gcc do the right thing on S3C24xx.
315 * No need to use atomic or put barriers, keeping the code more readable.
316 *
317 * Warning: this story changes if going SMP/SMT.
318 */
319
320 static volatile int masked = 1;
321 static volatile int pending;
322 static volatile int in_interrupt;
323
324
325 static void ar6000_do_irq(struct sdio_func *func)
326 {
327 HIF_DEVICE *hif = sdio_get_drvdata(func);
328 struct device *dev = HIFGetOSDevice(hif);
329 A_STATUS status;
330
331 dev_dbg(dev, "ar6000_do_irq -> %p\n", htcCallbacks.dsrHandler);
332
333 status = htcCallbacks.dsrHandler(hif->htc_handle);
334 BUG_ON(status != A_OK);
335 }
336
337
338 static void sdio_ar6000_irq(struct sdio_func *func)
339 {
340 HIF_DEVICE *hif = sdio_get_drvdata(func);
341 struct device *dev = HIFGetOSDevice(hif);
342
343 dev_dbg(dev, "sdio_ar6000_irq\n");
344
345 in_interrupt = 1;
346 if (masked) {
347 in_interrupt = 0;
348 pending++;
349 return;
350 }
351 /*
352 * @@@ This is ugly. If we don't drop the lock, we'll deadlock when
353 * the handler tries to do SDIO. So there are four choices:
354 *
355 * 1) Break the call chain by calling the callback from a workqueue.
356 * Ugh.
357 * 2) Make process_request aware that we already have the lock.
358 * 3) Drop the lock. Which is ugly but should be safe as long as we're
359 * making sure the device doesn't go away.
360 * 4) Change the AR6k driver such that it only issues asynchronous
361 * quests when called from an interrupt.
362 *
363 * Solution 2) is probably the best for now. Will try it later.
364 */
365 sdio_release_host(func);
366 ar6000_do_irq(func);
367 sdio_claim_host(func);
368 in_interrupt = 0;
369 }
370
371
372 void HIFAckInterrupt(HIF_DEVICE *hif)
373 {
374 struct device *dev = HIFGetOSDevice(hif);
375
376 dev_dbg(dev, "HIFAckInterrupt\n");
377 /* do nothing */
378 }
379
380
381 void HIFUnMaskInterrupt(HIF_DEVICE *hif)
382 {
383 struct device *dev = HIFGetOSDevice(hif);
384
385 dev_dbg(dev, "HIFUnMaskInterrupt\n");
386 do {
387 masked = 1;
388 if (pending) {
389 pending = 0;
390 ar6000_do_irq(hif->func);
391 /* We may take an interrupt before unmasking and thus
392 get it pending. In this case, we just loop back. */
393 }
394 masked = 0;
395 }
396 while (pending);
397 }
398
399
400 void HIFMaskInterrupt(HIF_DEVICE *hif)
401 {
402 struct device *dev = HIFGetOSDevice(hif);
403
404 dev_dbg(dev, "HIFMaskInterrupt\n");
405 /*
406 * Since sdio_ar6000_irq can also be called from a process context, we
407 * may conceivably end up racing with it. Thus, we need to wait until
408 * we can be sure that no concurrent interrupt processing is going on
409 * before we return.
410 *
411 * Note: this may be a bit on the paranoid side - the callers may
412 * actually be nice enough to disable scheduling. Check later.
413 */
414 masked = 1;
415 while (in_interrupt)
416 yield();
417 }
418
419
420 /* ----- HIF API glue functions -------------------------------------------- */
421
422
423 struct device *HIFGetOSDevice(HIF_DEVICE *hif)
424 {
425 return &hif->func->dev;
426 }
427
428
429 void HIFSetHandle(void *hif_handle, void *handle)
430 {
431 HIF_DEVICE *hif = (HIF_DEVICE *) hif_handle;
432
433 hif->htc_handle = handle;
434 }
435
436
437 /* ----- Device configuration (HIF side) ----------------------------------- */
438
439
440 A_STATUS HIFConfigureDevice(HIF_DEVICE *hif,
441 HIF_DEVICE_CONFIG_OPCODE opcode, void *config, A_UINT32 configLen)
442 {
443 struct device *dev = HIFGetOSDevice(hif);
444 HIF_DEVICE_IRQ_PROCESSING_MODE *ipm_cfg = config;
445 A_UINT32 *mbs_cfg = config;
446 int i;
447
448 dev_dbg(dev, "HIFConfigureDevice\n");
449
450 switch (opcode) {
451 case HIF_DEVICE_GET_MBOX_BLOCK_SIZE:
452 for (i = 0; i != MBOXES; i++)
453 mbs_cfg[i] = HIF_MBOX_BLOCK_SIZE;
454 break;
455 case HIF_DEVICE_GET_MBOX_ADDR:
456 for (i = 0; i != MBOXES; i++)
457 mbs_cfg[i] = HIF_MBOX_START_ADDR(i);
458 break;
459 case HIF_DEVICE_GET_IRQ_PROC_MODE:
460 *ipm_cfg = HIF_DEVICE_IRQ_SYNC_ONLY;
461 // *ipm_cfg = HIF_DEVICE_IRQ_ASYNC_SYNC;
462 break;
463 default:
464 return A_ERROR;
465 }
466 return A_OK;
467 }
468
469
470 /* ----- Device probe and removal (Linux side) ----------------------------- */
471
472
473 static int ar6000_do_activate(struct hif_device *hif)
474 {
475 struct sdio_func *func = hif->func;
476 struct device *dev = &func->dev;
477 int ret;
478
479 dev_dbg(dev, "ar6000_do_activate\n");
480
481 sdio_claim_host(func);
482 sdio_enable_func(func);
483
484 INIT_LIST_HEAD(&hif->queue);
485 init_waitqueue_head(&hif->wait);
486 spin_lock_init(&hif->queue_lock);
487
488 ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
489 if (ret < 0) {
490 dev_err(dev, "sdio_set_block_size returns %d\n", ret);
491 goto out_enabled;
492 }
493 ret = sdio_claim_irq(func, sdio_ar6000_irq);
494 if (ret) {
495 dev_err(dev, "sdio_claim_irq returns %d\n", ret);
496 goto out_enabled;
497 }
498 /* Set SDIO_BUS_CD_DISABLE in SDIO_CCCR_IF ? */
499 #if 0
500 sdio_f0_writeb(func, SDIO_CCCR_CAP_E4MI, SDIO_CCCR_CAPS, &ret);
501 if (ret) {
502 dev_err(dev, "sdio_f0_writeb(SDIO_CCCR_CAPS) returns %d\n",
503 ret);
504 goto out_got_irq;
505 }
506 #else
507 if (0) /* avoid warning */
508 goto out_got_irq;
509 #endif
510
511 sdio_release_host(func);
512
513 hif->io_task = kthread_run(io, hif, "ar6000_io");
514 ret = IS_ERR(hif->io_task);
515 if (ret) {
516 dev_err(dev, "kthread_run(ar6000_io): %d\n", ret);
517 goto out_func_ready;
518 }
519
520 ret = htcCallbacks.deviceInsertedHandler(hif);
521 if (ret == A_OK)
522 return 0;
523
524 dev_err(dev, "deviceInsertedHandler: %d\n", ret);
525
526 ret = kthread_stop(hif->io_task);
527 if (ret)
528 dev_err(dev, "kthread_stop (ar6000_io): %d\n", ret);
529
530 out_func_ready:
531 sdio_claim_host(func);
532
533 out_got_irq:
534 sdio_release_irq(func);
535
536 out_enabled:
537 sdio_disable_func(func);
538 sdio_release_host(func);
539
540 return ret;
541 }
542
543
544 static void ar6000_do_deactivate(struct hif_device *hif)
545 {
546 struct sdio_func *func = hif->func;
547 struct device *dev = &func->dev;
548 int ret;
549
550 dev_dbg(dev, "ar6000_do_deactivate\n");
551 if (!hif->active)
552 return;
553
554 if (mutex_trylock(&shutdown_lock)) {
555 /*
556 * Funny, Atheros' HIF does this call, but this just puts us in
557 * a recursion through HTCShutDown/HIFShutDown if unloading the
558 * module.
559 *
560 * However, we need it for suspend/resume. See the comment at
561 * HIFShutDown, below.
562 */
563 ret = htcCallbacks.deviceRemovedHandler(hif->htc_handle, A_OK);
564 if (ret != A_OK)
565 dev_err(dev, "deviceRemovedHandler: %d\n", ret);
566 mutex_unlock(&shutdown_lock);
567 }
568 wait_queue_empty(hif);
569 ret = kthread_stop(hif->io_task);
570 if (ret)
571 dev_err(dev, "kthread_stop (ar6000_io): %d\n", ret);
572 sdio_claim_host(func);
573 sdio_release_irq(func);
574 sdio_disable_func(func);
575 sdio_release_host(func);
576 }
577
578
579 static int ar6000_activate(struct hif_device *hif)
580 {
581 int ret = 0;
582
583 dev_dbg(&hif->func->dev, "ar6000_activate\n");
584 mutex_lock(&hif->activate_lock);
585 if (!hif->active) {
586 ret = ar6000_do_activate(hif);
587 if (ret) {
588 printk(KERN_ERR "%s: Failed to activate %d\n",
589 __func__, ret);
590 goto out;
591 }
592 hif->active = 1;
593 }
594 out:
595 mutex_unlock(&hif->activate_lock);
596 return ret;
597 }
598
599
600 static void ar6000_deactivate(struct hif_device *hif)
601 {
602 dev_dbg(&hif->func->dev, "ar6000_deactivate\n");
603 mutex_lock(&hif->activate_lock);
604 if (hif->active) {
605 ar6000_do_deactivate(hif);
606 hif->active = 0;
607 }
608 mutex_unlock(&hif->activate_lock);
609 }
610
611
612 static int ar6000_rfkill_cb(void *data, int on)
613 {
614 struct hif_device *hif = data;
615 struct sdio_func *func = hif->func;
616 struct device *dev = &func->dev;
617
618 dev_dbg(dev, "ar6000_rfkill_cb: on %d\n", on);
619 if (on)
620 return ar6000_activate(hif);
621 ar6000_deactivate(hif);
622 return 0;
623 }
624
625
626 static int sdio_ar6000_probe(struct sdio_func *func,
627 const struct sdio_device_id *id)
628 {
629 struct device *dev = &func->dev;
630 struct hif_device *hif;
631 int ret = 0;
632
633 dev_dbg(dev, "sdio_ar6000_probe\n");
634 BUG_ON(!htcCallbacks.deviceInsertedHandler);
635
636 hif = kzalloc(sizeof(*hif), GFP_KERNEL);
637 if (!hif)
638 return -ENOMEM;
639
640 sdio_set_drvdata(func, hif);
641 hif->func = func;
642 mutex_init(&hif->activate_lock);
643 hif->active = 0;
644
645 if (gta02_wlan_query_rfkill_lock())
646 ret = ar6000_activate(hif);
647 if (!ret) {
648 gta02_wlan_set_rfkill_cb(ar6000_rfkill_cb, hif);
649 return 0;
650 }
651 gta02_wlan_query_rfkill_unlock();
652 sdio_set_drvdata(func, NULL);
653 kfree(hif);
654 return ret;
655 }
656
657
658 static void sdio_ar6000_remove(struct sdio_func *func)
659 {
660 struct device *dev = &func->dev;
661 HIF_DEVICE *hif = sdio_get_drvdata(func);
662
663 dev_dbg(dev, "sdio_ar6000_remove\n");
664 gta02_wlan_clear_rfkill_cb();
665 ar6000_deactivate(hif);
666 sdio_set_drvdata(func, NULL);
667 kfree(hif);
668 }
669
670
671 /* ----- Device registration/unregistration (called by HIF) ---------------- */
672
673
674 #define ATHEROS_SDIO_DEVICE(id, offset) \
675 SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_##id | (offset))
676
677 static const struct sdio_device_id sdio_ar6000_ids[] = {
678 { ATHEROS_SDIO_DEVICE(AR6002, 0) },
679 { ATHEROS_SDIO_DEVICE(AR6002, 0x1) },
680 { ATHEROS_SDIO_DEVICE(AR6001, 0x8) },
681 { ATHEROS_SDIO_DEVICE(AR6001, 0x9) },
682 { ATHEROS_SDIO_DEVICE(AR6001, 0xa) },
683 { ATHEROS_SDIO_DEVICE(AR6001, 0xb) },
684 { /* end: all zeroes */ },
685 };
686
687 MODULE_DEVICE_TABLE(sdio, sdio_ar6000_ids);
688
689
690 static struct sdio_driver sdio_ar6000_driver = {
691 .probe = sdio_ar6000_probe,
692 .remove = sdio_ar6000_remove,
693 .name = "sdio_ar6000",
694 .id_table = sdio_ar6000_ids,
695 };
696
697
698 int HIFInit(HTC_CALLBACKS *callbacks)
699 {
700 int ret;
701
702 BUG_ON(!callbacks);
703
704 printk(KERN_DEBUG "HIFInit\n");
705 htcCallbacks = *callbacks;
706
707 ret = sdio_register_driver(&sdio_ar6000_driver);
708 if (ret) {
709 printk(KERN_ERR
710 "sdio_register_driver(sdio_ar6000_driver): %d\n", ret);
711 return A_ERROR;
712 }
713
714 return 0;
715 }
716
717
718 /*
719 * We have four possible call chains here:
720 *
721 * System shutdown/reboot:
722 *
723 * kernel_restart_prepare ...> device_shutdown ... > s3cmci_shutdown ->
724 * mmc_remove_host ..> sdio_bus_remove -> sdio_ar6000_remove ->
725 * ar6000_deactivate -> ar6000_do_deactivate ->
726 * deviceRemovedHandler (HTCTargetRemovedHandler) -> HIFShutDownDevice
727 *
728 * This is roughly the same sequence as suspend, described below.
729 *
730 * Module removal:
731 *
732 * sys_delete_module -> ar6000_cleanup_module -> HTCShutDown ->
733 * HIFShutDownDevice -> sdio_unregister_driver ...> sdio_bus_remove ->
734 * sdio_ar6000_remove -> ar6000_deactivate -> ar6000_do_deactivate
735 *
736 * In this case, HIFShutDownDevice must call sdio_unregister_driver to
737 * notify the driver about its removal. ar6000_do_deactivate must not call
738 * deviceRemovedHandler, because that would loop back into HIFShutDownDevice.
739 *
740 * Suspend:
741 *
742 * device_suspend ...> s3cmci_suspend ...> sdio_bus_remove ->
743 * sdio_ar6000_remove -> ar6000_deactivate -> ar6000_do_deactivate ->
744 * deviceRemovedHandler (HTCTargetRemovedHandler) -> HIFShutDownDevice
745 *
746 * We must call deviceRemovedHandler to inform the ar6k stack that the device
747 * has been removed. Since HTCTargetRemovedHandler calls back into
748 * HIFShutDownDevice, we must also prevent the call to
749 * sdio_unregister_driver, or we'd end up recursing into the SDIO stack,
750 * eventually deadlocking somewhere.
751 *
752 * rfkill:
753 *
754 * rfkill_state_store -> rfkill_toggle_radio -> gta02_wlan_toggle_radio ->
755 * ar6000_rfkill_cb -> ar6000_deactivate -> ar6000_do_deactivate ->
756 * deviceRemovedHandler (HTCTargetRemovedHandler) -> HIFShutDownDevice
757 *
758 * This is similar to suspend - only the entry point changes.
759 */
760
761 void HIFShutDownDevice(HIF_DEVICE *hif)
762 {
763 /* Beware, HTCShutDown calls us with hif == NULL ! */
764 if (mutex_trylock(&shutdown_lock)) {
765 sdio_unregister_driver(&sdio_ar6000_driver);
766 mutex_unlock(&shutdown_lock);
767 }
768 }
This page took 0.09926 seconds and 5 git commands to generate.