+/*
+ * RX data handlers.
+ */
+static void rt2x00usb_interrupt_rxdone(struct urb *urb)
+{
+ struct data_entry *entry = (struct data_entry *)urb->context;
+ struct data_ring *ring = entry->ring;
+ struct rt2x00_dev *rt2x00dev = ring->rt2x00dev;
+ struct sk_buff *skb;
+ struct ieee80211_hdr *hdr;
+ struct rxdata_entry_desc desc;
+ int header_size;
+ int frame_size;
+
+ if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) ||
+ !test_and_clear_bit(ENTRY_OWNER_NIC, &entry->flags))
+ return;
+
+ /*
+ * Check if the received data is simply too small
+ * to be actually valid, or if the urb is signaling
+ * a problem.
+ */
+ if (urb->actual_length < entry->ring->desc_size || urb->status)
+ goto skip_entry;
+
+ memset(&desc, 0x00, sizeof(desc));
+ rt2x00dev->ops->lib->fill_rxdone(entry, &desc);
+
+ /*
+ * Allocate a new sk buffer to replace the current one.
+ * If allocation fails, we should drop the current frame
+ * so we can recycle the existing sk buffer for the new frame.
+ * As alignment we use 2 and not NET_IP_ALIGN because we need
+ * to be sure we have 2 bytes room in the head. (NET_IP_ALIGN
+ * can be 0 on some hardware). We use these 2 bytes for frame
+ * alignment later, we assume that the chance that
+ * header_size % 4 == 2 is bigger then header_size % 2 == 0
+ * and thus optimize alignment by reserving the 2 bytes in
+ * advance.
+ */
+ frame_size = entry->ring->data_size + entry->ring->desc_size;
+ skb = dev_alloc_skb(frame_size + 2);
+ if (!skb)
+ goto skip_entry;
+
+ skb_reserve(skb, 2);
+ skb_put(skb, frame_size);
+
+ /*
+ * The data behind the ieee80211 header must be
+ * aligned on a 4 byte boundary.
+ * After that trim the entire buffer down to only
+ * contain the valid frame data excluding the device
+ * descriptor.
+ */
+ hdr = (struct ieee80211_hdr *)entry->skb->data;
+ header_size =
+ ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control));
+
+ if (header_size % 4 == 0) {
+ skb_push(entry->skb, 2);
+ memmove(entry->skb->data, entry->skb->data + 2, skb->len - 2);
+ }
+ skb_trim(entry->skb, desc.size);
+
+ /*
+ * Send the frame to rt2x00lib for further processing.
+ */
+ rt2x00lib_rxdone(entry, entry->skb, &desc);
+
+ /*
+ * Replace current entry's skb with the newly allocated one,
+ * and reinitialize the urb.
+ */
+ entry->skb = skb;
+ urb->transfer_buffer = entry->skb->data;
+ urb->transfer_buffer_length = entry->skb->len;
+
+skip_entry:
+ if (test_bit(DEVICE_ENABLED_RADIO, &ring->rt2x00dev->flags)) {
+ __set_bit(ENTRY_OWNER_NIC, &entry->flags);
+ usb_submit_urb(urb, GFP_ATOMIC);
+ }
+
+ rt2x00_ring_index_inc(ring);
+}
+
+/*
+ * Radio handlers
+ */
+void rt2x00usb_enable_radio(struct rt2x00_dev *rt2x00dev)
+{
+ struct usb_device *usb_dev =
+ interface_to_usbdev(rt2x00dev_usb(rt2x00dev));
+ struct data_ring *ring;
+ struct data_entry *entry;
+ unsigned int i;
+
+ /*
+ * Initialize the TX rings
+ */
+ txringall_for_each(rt2x00dev, ring) {
+ for (i = 0; i < ring->stats.limit; i++)
+ ring->entry[i].flags = 0;
+
+ rt2x00_ring_index_clear(ring);
+ }
+
+ /*
+ * Initialize and start the RX ring.
+ */
+ rt2x00_ring_index_clear(rt2x00dev->rx);
+
+ for (i = 0; i < rt2x00dev->rx->stats.limit; i++) {
+ entry = &rt2x00dev->rx->entry[i];
+
+ usb_fill_bulk_urb(entry->priv, usb_dev,
+ usb_rcvbulkpipe(usb_dev, 1),
+ entry->skb->data, entry->skb->len,
+ rt2x00usb_interrupt_rxdone, entry);
+
+ __set_bit(ENTRY_OWNER_NIC, &entry->flags);
+ usb_submit_urb(entry->priv, GFP_ATOMIC);
+ }
+}
+EXPORT_SYMBOL_GPL(rt2x00usb_enable_radio);
+
+void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev)
+{
+ struct data_ring *ring;
+ unsigned int i;
+
+ rt2x00usb_vendor_request_sw(rt2x00dev, USB_RX_CONTROL, 0x0000, 0x0000,
+ REGISTER_TIMEOUT);
+
+ /*
+ * Cancel all rings.
+ */
+ ring_for_each(rt2x00dev, ring) {
+ for (i = 0; i < ring->stats.limit; i++)
+ usb_kill_urb(ring->entry[i].priv);
+ }
+}
+EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio);
+