1 --- a/include/linux/skbuff.h
2 +++ b/include/linux/skbuff.h
3 @@ -912,6 +912,49 @@ static inline void skb_reserve(struct sk
8 + * CPUs often take a performance hit when accessing unaligned memory
9 + * locations. The actual performance hit varies, it can be small if the
10 + * hardware handles it or large if we have to take an exception and fix it
13 + * Since an ethernet header is 14 bytes network drivers often end up with
14 + * the IP header at an unaligned offset. The IP header can be aligned by
15 + * shifting the start of the packet by 2 bytes. Drivers should do this
18 + * skb_reserve(NET_IP_ALIGN);
20 + * The downside to this alignment of the IP header is that the DMA is now
21 + * unaligned. On some architectures the cost of an unaligned DMA is high
22 + * and this cost outweighs the gains made by aligning the IP header.
24 + * Since this trade off varies between architectures, we allow NET_IP_ALIGN
28 +#define NET_IP_ALIGN 2
32 + * The networking layer reserves some headroom in skb data (via
33 + * dev_alloc_skb). This is used to avoid having to reallocate skb data when
34 + * the header has to grow. In the default case, if the header has to grow
35 + * 16 bytes or less we avoid the reallocation.
37 + * Unfortunately this headroom changes the DMA alignment of the resulting
38 + * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
39 + * on some architectures. An architecture can override this value,
40 + * perhaps setting it to a cacheline in size (since that will maintain
41 + * cacheline alignment of the DMA). It must be a power of 2.
43 + * Various parts of the networking layer expect at least 16 bytes of
44 + * headroom, you should not reduce this.
47 +#define NET_SKB_PAD 16
50 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc);
52 static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
53 --- a/drivers/net/tun.c
54 +++ b/drivers/net/tun.c
55 @@ -185,22 +185,31 @@ static __inline__ ssize_t tun_get_user(s
57 struct tun_pi pi = { 0, __constant_htons(ETH_P_IP) };
60 + size_t len = count, align = 0;
62 if (!(tun->flags & TUN_NO_PI)) {
63 if ((len -= sizeof(pi)) > count)
66 - memcpy_fromiovec((void *)&pi, iv, sizeof(pi));
67 + if(memcpy_fromiovec((void *)&pi, iv, sizeof(pi)))
71 - if (!(skb = alloc_skb(len + 2, GFP_KERNEL))) {
73 + if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV)
74 + align = NET_IP_ALIGN;
76 + if (!(skb = alloc_skb(len + align, GFP_KERNEL))) {
77 tun->stats.rx_dropped++;
81 - skb_reserve(skb, 2);
82 - memcpy_fromiovec(skb_put(skb, len), iv, len);
84 + skb_reserve(skb, align);
85 + if (memcpy_fromiovec(skb_put(skb, len), iv, len)) {
86 + tun->stats.rx_dropped++;
92 switch (tun->flags & TUN_TYPE_MASK) {
93 @@ -271,7 +280,8 @@ static __inline__ ssize_t tun_put_user(s
94 pi.flags |= TUN_PKT_STRIP;
97 - memcpy_toiovec(iv, (void *) &pi, sizeof(pi));
98 + if(memcpy_toiovec(iv, (void *) &pi, sizeof(pi)))