ath9k: make endian check optional
[openwrt.git] / target / linux / cns3xxx / patches-2.6.31 / 206-cns3xxx_raid_support.patch
1 --- a/crypto/xor.c
2 +++ b/crypto/xor.c
3 @@ -25,6 +25,26 @@
4 /* The xor routines to use. */
5 static struct xor_block_template *active_template;
6
7 +#ifdef CONFIG_CNS3XXX_RAID
8 +extern void do_cns_rdma_xorgen(unsigned int src_no, unsigned int bytes,
9 + void **bh_ptr, void *dst_ptr);
10 +/**
11 + * xor_blocks - one pass xor
12 + * @src_count: source count
13 + * @bytes: length in bytes
14 + * @dest: dest
15 + * @srcs: srcs
16 + *
17 + * Desc:
18 + * 1. dest = xor(srcs[0...src_count-1]) within one calc
19 + * 2. don't care if dest also be placed in srcs list or not.
20 + */
21 +void xor_blocks(unsigned int src_count, unsigned int bytes, void *dest,
22 + void **srcs)
23 +{
24 + do_cns_rdma_xorgen(src_count, bytes, srcs, dest);
25 +}
26 +#else
27 void
28 xor_blocks(unsigned int src_count, unsigned int bytes, void *dest, void **srcs)
29 {
30 @@ -51,6 +71,7 @@ xor_blocks(unsigned int src_count, unsig
31 p4 = (unsigned long *) srcs[3];
32 active_template->do_5(bytes, dest, p1, p2, p3, p4);
33 }
34 +#endif /* CONFIG_CNS3XXX_RAID */
35 EXPORT_SYMBOL(xor_blocks);
36
37 /* Set of all registered templates. */
38 @@ -95,7 +116,11 @@ do_xor_speed(struct xor_block_template *
39 speed / 1000, speed % 1000);
40 }
41
42 +#ifdef CONFIG_CNS3XXX_RAID
43 +int
44 +#else
45 static int __init
46 +#endif /* CONFIG_CNS3XXX_RAID */
47 calibrate_xor_blocks(void)
48 {
49 void *b1, *b2;
50 @@ -139,7 +164,10 @@ calibrate_xor_blocks(void)
51 if (f->speed > fastest->speed)
52 fastest = f;
53 }
54 -
55 +#ifdef CONFIG_CNS3XXX_RAID
56 + /* preferred */
57 + fastest = template_list;
58 +#endif /* CONFIG_CNS3XXX_RAID */
59 printk(KERN_INFO "xor: using function: %s (%d.%03d MB/sec)\n",
60 fastest->name, fastest->speed / 1000, fastest->speed % 1000);
61
62 @@ -151,10 +179,20 @@ calibrate_xor_blocks(void)
63 return 0;
64 }
65
66 -static __exit void xor_exit(void) { }
67 +#ifndef CONFIG_CNS3XXX_RAID
68 +static __exit void xor_exit(void)
69 +{
70 +}
71 +#endif /* ! CONFIG_CNS3XXX_RAID */
72
73 MODULE_LICENSE("GPL");
74
75 +#ifdef CONFIG_CNS3XXX_RAID
76 +/*
77 + * Calibrate in R5 init.
78 + */
79 +#else
80 /* when built-in xor.o must initialize before drivers/md/md.o */
81 core_initcall(calibrate_xor_blocks);
82 module_exit(xor_exit);
83 +#endif /* ! CONFIG_CNS3XXX_RAID */
84 --- a/drivers/md/Makefile
85 +++ b/drivers/md/Makefile
86 @@ -17,7 +17,7 @@ raid6_pq-y += raid6algos.o raid6recov.o
87 raid6int8.o raid6int16.o raid6int32.o \
88 raid6altivec1.o raid6altivec2.o raid6altivec4.o \
89 raid6altivec8.o \
90 - raid6mmx.o raid6sse1.o raid6sse2.o
91 + raid6mmx.o raid6sse1.o raid6sse2.o raid6cns.o
92 hostprogs-y += mktables
93
94 # Note: link order is important. All raid personalities
95 --- a/drivers/md/raid5.c
96 +++ b/drivers/md/raid5.c
97 @@ -1817,11 +1817,30 @@ static void compute_block_2(struct strip
98 compute_parity6(sh, UPDATE_PARITY);
99 return;
100 } else {
101 +#ifdef CONFIG_CNS3XXX_RAID
102 + void *ptrs[disks];
103 +
104 + count = 0;
105 + i = d0_idx;
106 + do {
107 + ptrs[count++] = page_address(sh->dev[i].page);
108 + i = raid6_next_disk(i, disks);
109 + if (i != dd_idx1 && i != dd_idx2 &&
110 + !test_bit(R5_UPTODATE, &sh->dev[i].flags))
111 + printk
112 + ("compute_2 with missing block %d/%d\n",
113 + count, i);
114 + } while (i != d0_idx);
115 +
116 + raid6_dataq_recov(disks, STRIPE_SIZE, faila, ptrs);
117 +#else
118 +
119 /* We're missing D+Q; recompute D from P */
120 compute_block_1(sh, ((dd_idx1 == sh->qd_idx) ?
121 dd_idx2 : dd_idx1),
122 0);
123 compute_parity6(sh, UPDATE_PARITY); /* Is this necessary? */
124 +#endif /* CONFIG_CNS3XXX_RAID */
125 return;
126 }
127 }
128 @@ -5412,8 +5431,21 @@ static struct mdk_personality raid4_pers
129 .quiesce = raid5_quiesce,
130 };
131
132 +#ifdef CONFIG_CNS3XXX_RAID
133 +extern int calibrate_xor_blocks(void);
134 +#endif /* CONFIG_CNS3XXX_RAID */
135 +
136 static int __init raid5_init(void)
137 {
138 +
139 +#ifdef CONFIG_CNS3XXX_RAID
140 + /* Just execute calibrate xor blocks */
141 + int e;
142 + e = calibrate_xor_blocks();
143 + if (e)
144 + return e;
145 +#endif /* CONFIG_CNS3XXX_RAID */
146 +
147 register_md_personality(&raid6_personality);
148 register_md_personality(&raid5_personality);
149 register_md_personality(&raid4_personality);
150 --- a/drivers/md/raid6algos.c
151 +++ b/drivers/md/raid6algos.c
152 @@ -49,6 +49,9 @@ extern const struct raid6_calls raid6_al
153 extern const struct raid6_calls raid6_altivec2;
154 extern const struct raid6_calls raid6_altivec4;
155 extern const struct raid6_calls raid6_altivec8;
156 +#ifdef CONFIG_CNS3XXX_RAID
157 +extern const struct raid6_calls raid6_cns_raid;
158 +#endif /* CONFIG_CNS3XXX_RAID */
159
160 const struct raid6_calls * const raid6_algos[] = {
161 &raid6_intx1,
162 @@ -78,6 +81,11 @@ const struct raid6_calls * const raid6_a
163 &raid6_altivec4,
164 &raid6_altivec8,
165 #endif
166 +#ifdef CONFIG_CNS3XXX_RAID
167 + /* CNS3000 HW RAID acceleration */
168 + &raid6_cns_raid,
169 +#endif /* CONFIG_CNS3XXX_RAID */
170 +
171 NULL
172 };
173
174 @@ -125,7 +133,9 @@ int __init raid6_select_algo(void)
175 if ( !(*algo)->valid || (*algo)->valid() ) {
176 perf = 0;
177
178 +#ifndef CONFIG_CNS3XXX_RAID
179 preempt_disable();
180 +#endif
181 j0 = jiffies;
182 while ( (j1 = jiffies) == j0 )
183 cpu_relax();
184 @@ -134,7 +144,9 @@ int __init raid6_select_algo(void)
185 (*algo)->gen_syndrome(disks, PAGE_SIZE, dptrs);
186 perf++;
187 }
188 +#ifndef CONFIG_CNS3XXX_RAID
189 preempt_enable();
190 +#endif
191
192 if ( (*algo)->prefer > bestprefer ||
193 ((*algo)->prefer == bestprefer &&
194 --- /dev/null
195 +++ b/drivers/md/raid6cns.c
196 @@ -0,0 +1,38 @@
197 +/*
198 + * raid6cns.c
199 + *
200 + * CNS3xxx xor & gen_syndrome functions
201 + *
202 + */
203 +
204 +#ifdef CONFIG_CNS3XXX_RAID
205 +
206 +#include <linux/raid/pq.h>
207 +
208 +extern void do_cns_rdma_gfgen(unsigned int src_no, unsigned int bytes, void **bh_ptr,
209 + void *p_dst, void *q_dst);
210 +
211 +/**
212 + * raid6_cnsraid_gen_syndrome - CNSRAID Syndrome Generate
213 + *
214 + * @disks: raid disks
215 + * @bytes: length
216 + * @ptrs: already arranged stripe ptrs,
217 + * disk0=[0], diskNNN=[disks-3],
218 + * P/Q=[z0+1] & [z0+2], or, [disks-2], [disks-1]
219 + */
220 +static void raid6_cnsraid_gen_syndrome(int disks, size_t bytes, void **ptrs)
221 +{
222 + do_cns_rdma_gfgen(disks - 2, bytes, ptrs, ptrs[disks-2], ptrs[disks-1]);
223 +}
224 +
225 +const struct raid6_calls raid6_cns_raid = {
226 + raid6_cnsraid_gen_syndrome, /* callback */
227 + NULL, /* always valid */
228 + "CNS-RAID", /* name */
229 + 1 /* preferred: revise it to "0" to compare/compete with others algos */
230 +};
231 +
232 +EXPORT_SYMBOL(raid6_cns_raid);
233 +
234 +#endif /* CONFIG_CNS3XXX_RAID */
235 --- a/drivers/md/raid6recov.c
236 +++ b/drivers/md/raid6recov.c
237 @@ -20,6 +20,136 @@
238
239 #include <linux/raid/pq.h>
240
241 +#ifdef CONFIG_CNS3XXX_RAID
242 +#define R6_RECOV_PD 1
243 +#define R6_RECOV_DD 2
244 +#define R6_RECOV_DQ 3
245 +extern void do_cns_rdma_gfgen_pd_dd_dq(unsigned int src_no, unsigned int bytes,
246 + void **bh_ptr, void *w1_dst,
247 + void *w2_dst, int pd_dd_qd,
248 + unsigned int w1_idx, unsigned int w2_idx,
249 + unsigned int *src_idx);
250 +
251 +/**
252 + * @disks: nr_disks
253 + * @bytes: len
254 + * @faila: 1st failed DD
255 + * @ptrs: ptrs by order {d0, d1, ..., da, ..., dn, P, Q}
256 + *
257 + * Desc:
258 + * new_read_ptrs = {d0, d1, ... dn, Q}
259 + * dd1 = faila
260 + * p_dst = P
261 + */
262 +void raid6_datap_recov(int disks, size_t bytes, int faila, void **ptrs)
263 +{
264 + int cnt = 0;
265 + int count = 0;
266 + void *p_dst, *q;
267 + void *dd1_dst;
268 + void *new_read_ptrs[disks - 2];
269 + unsigned int read_idx[disks - 2];
270 +
271 + q = ptrs[disks - 1];
272 + p_dst = ptrs[disks - 2];
273 + dd1_dst = ptrs[faila];
274 +
275 + while (cnt < disks) {
276 + if (cnt != faila && cnt != disks - 2) {
277 + new_read_ptrs[count] = ptrs[cnt];
278 + read_idx[count] = cnt;
279 + count++;
280 + }
281 + cnt++;
282 + }
283 +
284 + do_cns_rdma_gfgen_pd_dd_dq(disks - 2, bytes,
285 + new_read_ptrs, p_dst, dd1_dst,
286 + R6_RECOV_PD, disks - 1, faila + 1, read_idx);
287 +}
288 +
289 +/**
290 + * @disks: nr_disks
291 + * @bytes: len
292 + * @faila: 1st failed DD
293 + * @failb: 2nd failed DD
294 + * @ptrs: ptrs by order {d0, d1, ..., da, ..., db, ..., dn, P, Q}
295 + *
296 + * Desc:
297 + * new_read_ptrs = {d0, d1, ... dn, P, Q}
298 + * dd1_dst = faila
299 + * dd2_dst = failb
300 + */
301 +void raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
302 + void **ptrs)
303 +{
304 +
305 + int cnt = 0;
306 + int count = 0;
307 + void *p, *q;
308 + void *dd1_dst, *dd2_dst;
309 + void *new_read_ptrs[disks - 2];
310 + unsigned int read_idx[disks - 2];
311 +
312 + q = ptrs[disks - 1];
313 + p = ptrs[disks - 2];
314 + dd1_dst = ptrs[faila];
315 + dd2_dst = ptrs[failb];
316 +
317 + while (cnt < disks) {
318 + if (cnt != faila && cnt != failb) {
319 + new_read_ptrs[count] = ptrs[cnt];
320 + read_idx[count] = cnt;
321 + count++;
322 + }
323 + cnt++;
324 + }
325 +
326 + do_cns_rdma_gfgen_pd_dd_dq(disks - 2, bytes,
327 + new_read_ptrs, dd1_dst, dd2_dst,
328 + R6_RECOV_DD, faila + 1, failb + 1, read_idx);
329 +}
330 +
331 +/**
332 + * @disks: nr_disks
333 + * @bytes: len
334 + * @faila: 1st failed DD
335 + * @ptrs: ptrs by order {d0, d1, ..., da, ..., dn, P, Q}
336 + *
337 + * Desc:
338 + * new_read_ptrs = {d0, d1, ... dn, P}
339 + * dd1 = faila
340 + * q_dst = Q
341 + */
342 +void raid6_dataq_recov(int disks, size_t bytes, int faila, void **ptrs)
343 +{
344 + int cnt = 0;
345 + int count = 0;
346 + void *q_dst, *p;
347 + void *dd1_dst;
348 + void *new_read_ptrs[disks - 2];
349 + unsigned int read_idx[disks - 2];
350 +
351 + p = ptrs[disks - 2];
352 + q_dst = ptrs[disks - 1];
353 + dd1_dst = ptrs[faila];
354 +
355 + while (cnt < disks) {
356 + if (cnt != faila && cnt != disks - 1) {
357 + new_read_ptrs[count] = ptrs[cnt];
358 + read_idx[count] = cnt;
359 + count++;
360 + }
361 + cnt++;
362 + }
363 +
364 + do_cns_rdma_gfgen_pd_dd_dq(disks - 2, bytes,
365 + new_read_ptrs, dd1_dst, q_dst,
366 + R6_RECOV_DQ, faila + 1, disks, read_idx);
367 +}
368 +
369 +#else /* CONFIG_CNS3XXX_RAID
370 +
371 /* Recover two failed data blocks. */
372 void raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
373 void **ptrs)
374 @@ -96,6 +226,7 @@ void raid6_datap_recov(int disks, size_t
375 }
376 }
377 EXPORT_SYMBOL_GPL(raid6_datap_recov);
378 +#endif /* CONFIG_CNS3XXX_RAID */
379
380 #ifndef __KERNEL__
381 /* Testing only */
382 --- a/include/linux/raid/pq.h
383 +++ b/include/linux/raid/pq.h
384 @@ -100,6 +100,9 @@ void raid6_2data_recov(int disks, size_t
385 void raid6_datap_recov(int disks, size_t bytes, int faila, void **ptrs);
386 void raid6_dual_recov(int disks, size_t bytes, int faila, int failb,
387 void **ptrs);
388 +#ifdef CONFIG_CNS3XXX_RAID
389 +void raid6_dataq_recov(int disks, size_t bytes, int faila, void **ptrs);
390 +#endif /* CONFIG_CNS3XXX_RAID */
391
392 /* Some definitions to allow code to be compiled for testing in userspace */
393 #ifndef __KERNEL__
394 --- a/include/linux/raid/xor.h
395 +++ b/include/linux/raid/xor.h
396 @@ -1,7 +1,11 @@
397 #ifndef _XOR_H
398 #define _XOR_H
399
400 +#ifdef CONFIG_CNS3XXX_RAID
401 +#define MAX_XOR_BLOCKS 32
402 +#else
403 #define MAX_XOR_BLOCKS 4
404 +#endif /* CONFIG_CNS3XXX_RAID */
405
406 extern void xor_blocks(unsigned int count, unsigned int bytes,
407 void *dest, void **srcs);
408 --- a/mm/mempool.c
409 +++ b/mm/mempool.c
410 @@ -250,6 +250,28 @@ repeat_alloc:
411 }
412 EXPORT_SYMBOL(mempool_alloc);
413
414 +#ifdef CONFIG_CNS3XXX_RAID
415 +/**
416 + * acs_mempool_alloc - allocate an element from a specific memory pool
417 + * @pool: pointer to the memory pool which was allocated via
418 + * mempool_create().
419 + *
420 + * this function differs from mempool_alloc by directly allocating an element
421 + * from @pool without calling @pool->alloc().
422 + */
423 +void *acs_mempool_alloc(mempool_t * pool)
424 +{
425 + unsigned long flags;
426 + void *element = NULL;
427 +
428 + spin_lock_irqsave(&pool->lock, flags);
429 + if (likely(pool->curr_nr))
430 + element = remove_element(pool);
431 + spin_unlock_irqrestore(&pool->lock, flags);
432 + return element;
433 +}
434 +#endif /* CONFIG_CNS3XXX_RAID */
435 +
436 /**
437 * mempool_free - return an element to the pool.
438 * @element: pool element pointer.
This page took 0.063958 seconds and 5 git commands to generate.