atheros: nuke 2.6.26 support, 2.6.28 is stable
[openwrt.git] / package / madwifi / patches / 124-linux24_compat.patch
1 --- a/ath/if_athvar.h
2 +++ b/ath/if_athvar.h
3 @@ -126,6 +126,11 @@ typedef void irqreturn_t;
4 #define ATH_GET_NETDEV_DEV(ndev) ((ndev)->class_dev.dev)
5 #endif
6
7 +#ifndef NETDEV_TX_OK
8 +#define NETDEV_TX_OK 0
9 +#define NETDEV_TX_BUSY 1
10 +#endif
11 +
12 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23)
13 static inline struct net_device *_alloc_netdev(int sizeof_priv, const char *mask,
14 void (*setup)(struct net_device *))
15 --- a/ath/if_ath_radar.c
16 +++ b/ath/if_ath_radar.c
17 @@ -92,6 +92,13 @@
18 #define nofloat_pct(_value, _pct) \
19 ( (_value * (1000 + _pct)) / 1000 )
20
21 +#ifndef list_for_each_entry_reverse
22 +#define list_for_each_entry_reverse(pos, head, member) \
23 + for (pos = list_entry((head)->prev, typeof(*pos), member); \
24 + prefetch(pos->member.prev), &pos->member != (head); \
25 + pos = list_entry(pos->member.prev, typeof(*pos), member))
26 +#endif
27 +
28 struct radar_pattern_specification {
29 /* The name of the rule/specification (i.e. what did we detect) */
30 const char *name;
31 --- a/ath/if_ath.c
32 +++ b/ath/if_ath.c
33 @@ -4705,6 +4705,46 @@ ath_beacon_setup(struct ath_softc *sc, s
34 #undef USE_SHPREAMBLE
35 }
36
37 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
38 +static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
39 +{
40 + int ret;
41 + unsigned long flags;
42 +
43 + local_irq_save(flags);
44 + ret = v->counter;
45 + if (likely(ret == old))
46 + v->counter = new;
47 + local_irq_restore(flags);
48 +
49 + return ret;
50 +}
51 +
52 +/**
53 + * atomic_add_unless - add unless the number is a given value
54 + * @v: pointer of type atomic_t
55 + * @a: the amount to add to v...
56 + * @u: ...unless v is equal to u.
57 + *
58 + * Atomically adds @a to @v, so long as it was not @u.
59 + * Returns non-zero if @v was not @u, and zero otherwise.
60 + */
61 +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
62 +{
63 + int c, old;
64 + c = atomic_read(v);
65 + for (;;) {
66 + if (unlikely(c == (u)))
67 + break;
68 + old = atomic_cmpxchg((v), c, c + (a));
69 + if (likely(old == c))
70 + break;
71 + c = old;
72 + }
73 + return c != (u);
74 +}
75 +#endif
76 +
77 /*
78 * Generate beacon frame and queue cab data for a VAP.
79 */
80 --- /dev/null
81 +++ b/net80211/sort.c
82 @@ -0,0 +1,120 @@
83 +/*
84 + * A fast, small, non-recursive O(nlog n) sort for the Linux kernel
85 + *
86 + * Jan 23 2005 Matt Mackall <mpm@selenic.com>
87 + */
88 +
89 +#include <linux/kernel.h>
90 +#include <linux/module.h>
91 +#include <linux/slab.h>
92 +
93 +static void u32_swap(void *a, void *b, int size)
94 +{
95 + u32 t = *(u32 *)a;
96 + *(u32 *)a = *(u32 *)b;
97 + *(u32 *)b = t;
98 +}
99 +
100 +static void generic_swap(void *a, void *b, int size)
101 +{
102 + char t;
103 +
104 + do {
105 + t = *(char *)a;
106 + *(char *)a++ = *(char *)b;
107 + *(char *)b++ = t;
108 + } while (--size > 0);
109 +}
110 +
111 +/**
112 + * sort - sort an array of elements
113 + * @base: pointer to data to sort
114 + * @num: number of elements
115 + * @size: size of each element
116 + * @cmp: pointer to comparison function
117 + * @swap: pointer to swap function or NULL
118 + *
119 + * This function does a heapsort on the given array. You may provide a
120 + * swap function optimized to your element type.
121 + *
122 + * Sorting time is O(n log n) both on average and worst-case. While
123 + * qsort is about 20% faster on average, it suffers from exploitable
124 + * O(n*n) worst-case behavior and extra memory requirements that make
125 + * it less suitable for kernel use.
126 + */
127 +
128 +static void sort(void *base, size_t num, size_t size,
129 + int (*cmp)(const void *, const void *),
130 + void (*swap)(void *, void *, int size))
131 +{
132 + /* pre-scale counters for performance */
133 + int i = (num/2 - 1) * size, n = num * size, c, r;
134 +
135 + if (!swap)
136 + swap = (size == 4 ? u32_swap : generic_swap);
137 +
138 + /* heapify */
139 + for ( ; i >= 0; i -= size) {
140 + for (r = i; r * 2 + size < n; r = c) {
141 + c = r * 2 + size;
142 + if (c < n - size && cmp(base + c, base + c + size) < 0)
143 + c += size;
144 + if (cmp(base + r, base + c) >= 0)
145 + break;
146 + swap(base + r, base + c, size);
147 + }
148 + }
149 +
150 + /* sort */
151 + for (i = n - size; i >= 0; i -= size) {
152 + swap(base, base + i, size);
153 + for (r = 0; r * 2 + size < i; r = c) {
154 + c = r * 2 + size;
155 + if (c < i - size && cmp(base + c, base + c + size) < 0)
156 + c += size;
157 + if (cmp(base + r, base + c) >= 0)
158 + break;
159 + swap(base + r, base + c, size);
160 + }
161 + }
162 +}
163 +
164 +EXPORT_SYMBOL(sort);
165 +
166 +#if 0
167 +/* a simple boot-time regression test */
168 +
169 +int cmpint(const void *a, const void *b)
170 +{
171 + return *(int *)a - *(int *)b;
172 +}
173 +
174 +static int sort_test(void)
175 +{
176 + int *a, i, r = 1;
177 +
178 + a = kmalloc(1000 * sizeof(int), GFP_KERNEL);
179 + BUG_ON(!a);
180 +
181 + printk("testing sort()\n");
182 +
183 + for (i = 0; i < 1000; i++) {
184 + r = (r * 725861) % 6599;
185 + a[i] = r;
186 + }
187 +
188 + sort(a, 1000, sizeof(int), cmpint, NULL);
189 +
190 + for (i = 0; i < 999; i++)
191 + if (a[i] > a[i+1]) {
192 + printk("sort() failed!\n");
193 + break;
194 + }
195 +
196 + kfree(a);
197 +
198 + return 0;
199 +}
200 +
201 +module_init(sort_test);
202 +#endif
This page took 0.05022 seconds and 5 git commands to generate.