3 @@ -3239,7 +3239,6 @@ ath_hardstart(struct sk_buff *skb, struc
4 struct ath_softc *sc = dev->priv;
5 struct ieee80211_node *ni = NULL;
6 struct ath_buf *bf = NULL;
7 - struct ether_header *eh;
9 struct ath_buf *tbf, *tempbf;
11 @@ -3251,6 +3250,7 @@ ath_hardstart(struct sk_buff *skb, struc
15 + struct ether_header *eh;
17 struct ieee80211com *ic = &sc->sc_ic;
19 @@ -3316,27 +3316,9 @@ ath_hardstart(struct sk_buff *skb, struc
25 - /* If the skb data is shared, we will copy it so we can strip padding
26 - * without affecting any other bridge ports. */
27 - if (skb_cloned(skb)) {
28 - /* Remember the original SKB so we can free up our references */
29 - struct sk_buff *skb_new;
30 - skb_new = skb_copy(skb, GFP_ATOMIC);
31 - if (skb_new == NULL) {
32 - DPRINTF(sc, ATH_DEBUG_XMIT,
33 - "Dropping; skb_copy failure.\n");
34 - /* No free RAM, do not requeue! */
35 - goto hardstart_fail;
37 - ieee80211_skb_copy_noderef(skb, skb_new);
38 - ieee80211_dev_kfree_skb(&skb);
41 eh = (struct ether_header *)skb->data;
44 /* NB: use this lock to protect an->an_tx_ffbuf (and txq->axq_stageq)
45 * in athff_can_aggregate() call too. */
46 ATH_TXQ_LOCK_IRQ(txq);
47 --- a/net80211/ieee80211_output.c
48 +++ b/net80211/ieee80211_output.c
49 @@ -283,7 +283,7 @@ ieee80211_hardstart(struct sk_buff *skb,
51 if (vap->iv_xrvap && (ni == vap->iv_bss) &&
52 vap->iv_xrvap->iv_sta_assoc) {
53 - struct sk_buff *skb1 = skb_copy(skb, GFP_ATOMIC);
54 + struct sk_buff *skb1 = skb_clone(skb, GFP_ATOMIC);
56 memset(SKB_CB(skb1), 0, sizeof(struct ieee80211_cb));
57 #ifdef IEEE80211_DEBUG_REFCNT
58 @@ -566,7 +566,7 @@ ieee80211_skbhdr_adjust(struct ieee80211
59 struct ieee80211_key *key, struct sk_buff *skb, int ismulticast)
61 /* XXX pre-calculate per node? */
62 - int need_headroom = LLC_SNAPFRAMELEN + hdrsize + IEEE80211_ADDR_LEN;
63 + int need_headroom = LLC_SNAPFRAMELEN + hdrsize;
64 int need_tailroom = 0;
66 int isff = ATH_FF_MAGIC_PRESENT(skb);
67 @@ -608,109 +608,56 @@ ieee80211_skbhdr_adjust(struct ieee80211
68 need_tailroom += cip->ic_miclen;
71 - if (skb_shared(skb)) {
72 - /* Take our own reference to the node in the clone */
73 - ieee80211_ref_node(SKB_CB(skb)->ni);
74 - /* Unshare the node, decrementing users in the old skb */
75 - skb = skb_unshare(skb, GFP_ATOMIC);
76 + need_headroom -= skb_headroom(skb);
78 + need_tailroom -= skb_tailroom(skb2);
80 + need_tailroom -= skb_tailroom(skb);
82 + if (need_headroom < 0)
84 + if (need_tailroom < 0)
87 + if (skb_cloned(skb) || (need_headroom > 0) ||
88 + (!isff && (need_tailroom > 0))) {
90 + if (pskb_expand_head(skb, need_headroom, need_tailroom, GFP_ATOMIC)) {
91 + IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
92 + "%s: cannot expand storage (tail)\n", __func__);
100 - IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
101 - "%s: cannot unshare for encapsulation\n",
103 - vap->iv_stats.is_tx_nobuf++;
104 - ieee80211_dev_kfree_skb(&skb2);
108 + inter_headroom -= skb_headroom(skb2);
109 + if (inter_headroom < 0)
110 + inter_headroom = 0;
111 + if ((skb_cloned(skb2) ||
112 + (inter_headroom > 0) || (need_tailroom > 0))) {
114 - /* first skb header */
115 - if (skb_headroom(skb) < need_headroom) {
116 - struct sk_buff *tmp = skb;
117 - skb = skb_realloc_headroom(skb, need_headroom);
119 + if (pskb_expand_head(skb2, inter_headroom,
120 + need_tailroom, GFP_ATOMIC)) {
121 IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
122 - "%s: cannot expand storage (head1)\n",
124 - vap->iv_stats.is_tx_nobuf++;
125 - ieee80211_dev_kfree_skb(&skb2);
128 - ieee80211_skb_copy_noderef(tmp, skb);
129 - ieee80211_dev_kfree_skb(&tmp);
130 - /* NB: cb[] area was copied, but not next ptr. must do that
131 - * prior to return on success. */
134 - /* second skb with header and tail adjustments possible */
135 - if (skb_tailroom(skb2) < need_tailroom) {
137 - if (inter_headroom > skb_headroom(skb2))
138 - n = inter_headroom - skb_headroom(skb2);
139 - if (pskb_expand_head(skb2, n,
140 - need_tailroom - skb_tailroom(skb2), GFP_ATOMIC)) {
141 - ieee80211_dev_kfree_skb(&skb2);
142 - IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
143 - "%s: cannot expand storage (tail2)\n",
145 - vap->iv_stats.is_tx_nobuf++;
146 - /* this shouldn't happen, but don't send first ff either */
147 - ieee80211_dev_kfree_skb(&skb);
148 + "%s: cannot expand storage (tail)\n", __func__);
151 - } else if (skb_headroom(skb2) < inter_headroom) {
152 - struct sk_buff *tmp = skb2;
154 - skb2 = skb_realloc_headroom(skb2, inter_headroom);
155 - if (skb2 == NULL) {
156 - IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
157 - "%s: cannot expand storage (head2)\n",
159 - vap->iv_stats.is_tx_nobuf++;
160 - /* this shouldn't happen, but don't send first ff either */
161 - ieee80211_dev_kfree_skb(&skb);
164 - ieee80211_skb_copy_noderef(tmp, skb);
165 - ieee80211_dev_kfree_skb(&tmp);
173 #endif /* ATH_SUPERG_FF */
175 - IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
176 - "%s: cannot unshare for encapsulation\n", __func__);
177 - vap->iv_stats.is_tx_nobuf++;
178 - } else if (skb_tailroom(skb) < need_tailroom) {
180 - if (need_headroom > skb_headroom(skb))
181 - n = need_headroom - skb_headroom(skb);
182 - if (pskb_expand_head(skb, n, need_tailroom -
183 - skb_tailroom(skb), GFP_ATOMIC)) {
184 - IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
185 - "%s: cannot expand storage (tail)\n", __func__);
186 - vap->iv_stats.is_tx_nobuf++;
187 - ieee80211_dev_kfree_skb(&skb);
189 - } else if (skb_headroom(skb) < need_headroom) {
190 - struct sk_buff *tmp = skb;
191 - skb = skb_realloc_headroom(skb, need_headroom);
192 - /* Increment reference count after copy */
194 - IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
195 - "%s: cannot expand storage (head)\n", __func__);
196 - vap->iv_stats.is_tx_nobuf++;
198 - ieee80211_skb_copy_noderef(tmp, skb);
199 - ieee80211_dev_kfree_skb(&tmp);
205 + vap->iv_stats.is_tx_nobuf++;
206 + ieee80211_dev_kfree_skb(&skb);
207 +#ifdef ATH_SUPERG_FF
209 + ieee80211_dev_kfree_skb(&skb2);
214 #define KEY_UNDEFINED(k) ((k).wk_cipher == &ieee80211_cipher_none)
215 --- a/net80211/ieee80211_input.c
216 +++ b/net80211/ieee80211_input.c
217 @@ -204,7 +204,6 @@ ieee80211_input(struct ieee80211vap * va
218 struct ieee80211_frame *wh;
219 struct ieee80211_key *key;
220 struct ether_header *eh;
221 - struct sk_buff *skb2;
225 @@ -244,20 +243,6 @@ ieee80211_input(struct ieee80211vap * va
226 vap->iv_stats.is_rx_tooshort++;
229 - /* Clone the SKB... we assume somewhere in this driver that we 'own'
230 - * the skbuff passed into hard start and we do a lot of messing with it
231 - * but bridges under some cases will not clone for the first pass of skb
232 - * to a bridge port, but will then clone for subsequent ones. This is
233 - * odd behavior but it means that if we have trashed the skb we are given
234 - * then other ports get clones of the residual garbage.
236 - if ((skb2 = skb_copy(skb, GFP_ATOMIC)) == NULL) {
237 - vap->iv_devstats.tx_dropped++;
240 - ieee80211_skb_copy_noderef(skb, skb2);
241 - ieee80211_dev_kfree_skb(&skb);
245 * Bit of a cheat here, we use a pointer for a 3-address
246 @@ -738,7 +723,7 @@ ieee80211_input(struct ieee80211vap * va
247 /* ether_type must be length as FF frames are always LLC/SNAP encap'd */
248 frame_len = ntohs(eh_tmp->ether_type);
250 - skb1 = skb_copy(skb, GFP_ATOMIC);
251 + skb1 = skb_clone(skb, GFP_ATOMIC);
254 ieee80211_skb_copy_noderef(skb, skb1);
255 @@ -1137,7 +1122,7 @@ ieee80211_deliver_data(struct ieee80211_
257 if (ETHER_IS_MULTICAST(eh->ether_dhost) && !netif_queue_stopped(dev)) {
258 /* Create a SKB for the BSS to send out. */
259 - skb1 = skb_copy(skb, GFP_ATOMIC);
260 + skb1 = skb_clone(skb, GFP_ATOMIC);
262 SKB_CB(skb1)->ni = ieee80211_ref_node(vap->iv_bss);