[lantiq] remove deadcode from [25072]
[openwrt.git] / target / linux / cns21xx / patches-2.6.37 / 001-arm-use-cache-alignment-from-asm-cahce-h.patch
1 From 248d9a5b63bba72bfc316b8a48c6163fce5acc22 Mon Sep 17 00:00:00 2001
2 From: Paulius Zaleckas <paulius.zaleckas@gmail.com>
3 Date: Thu, 18 Feb 2010 21:53:01 +0200
4 Subject: [PATCH] ARM: Use cache alignment from asm/cache.h
5
6 Make code more optimal for ARM variants with
7 different cache line size.
8
9 Signed-off-by: Paulius Zaleckas <paulius.zaleckas@gmail.com>
10 ---
11 arch/arm/boot/compressed/head.S | 11 ++++++-----
12 arch/arm/include/asm/dma-mapping.h | 2 +-
13 arch/arm/kernel/entry-armv.S | 31 ++++++++++++++++---------------
14 arch/arm/kernel/entry-common.S | 7 ++++---
15 arch/arm/kernel/head.S | 3 ++-
16 arch/arm/kernel/vmlinux.lds.S | 5 +++--
17 arch/arm/lib/copy_page.S | 2 +-
18 arch/arm/lib/memchr.S | 3 ++-
19 arch/arm/lib/memset.S | 3 ++-
20 arch/arm/lib/memzero.S | 3 ++-
21 arch/arm/lib/strchr.S | 3 ++-
22 arch/arm/lib/strncpy_from_user.S | 3 ++-
23 arch/arm/lib/strnlen_user.S | 3 ++-
24 arch/arm/lib/strrchr.S | 3 ++-
25 arch/arm/mm/abort-ev4.S | 3 ++-
26 arch/arm/mm/abort-nommu.S | 3 ++-
27 16 files changed, 51 insertions(+), 37 deletions(-)
28
29 --- a/arch/arm/boot/compressed/head.S
30 +++ b/arch/arm/boot/compressed/head.S
31 @@ -9,6 +9,7 @@
32 * published by the Free Software Foundation.
33 */
34 #include <linux/linkage.h>
35 +#include <asm/cache.h>
36
37 /*
38 * Debugging stuff
39 @@ -355,7 +356,7 @@ params: ldr r0, =0x10000100 @ params_p
40 * This routine must preserve:
41 * r4, r5, r6, r7, r8
42 */
43 - .align 5
44 + .align L1_CACHE_SHIFT
45 cache_on: mov r3, #8 @ cache_on function
46 b call_cache_fn
47
48 @@ -544,7 +545,7 @@ __common_mmu_cache_on:
49 mcr p15, 0, r3, c2, c0, 0 @ load page table pointer
50 mcr p15, 0, r1, c3, c0, 0 @ load domain access control
51 b 1f
52 - .align 5 @ cache line aligned
53 + .align L1_CACHE_SHIFT @ cache line aligned
54 1: mcr p15, 0, r0, c1, c0, 0 @ load control register
55 mrc p15, 0, r0, c1, c0, 0 @ and read it back to
56 sub pc, lr, r0, lsr #32 @ properly flush pipeline
57 @@ -563,7 +564,7 @@ __common_mmu_cache_on:
58 * r8 = atags pointer
59 * r9-r12,r14 = corrupted
60 */
61 - .align 5
62 + .align L1_CACHE_SHIFT
63 reloc_start: add r9, r5, r0
64 sub r9, r9, #128 @ do not copy the stack
65 debug_reloc_start
66 @@ -793,7 +794,7 @@ proc_types:
67 * This routine must preserve:
68 * r4, r6, r7
69 */
70 - .align 5
71 + .align L1_CACHE_SHIFT
72 cache_off: mov r3, #12 @ cache_off function
73 b call_cache_fn
74
75 @@ -868,7 +869,7 @@ __armv3_mmu_cache_off:
76 * This routine must preserve:
77 * r0, r4, r5, r6, r7
78 */
79 - .align 5
80 + .align L1_CACHE_SHIFT
81 cache_clean_flush:
82 mov r3, #16
83 b call_cache_fn
84 --- a/arch/arm/kernel/entry-armv.S
85 +++ b/arch/arm/kernel/entry-armv.S
86 @@ -23,6 +23,7 @@
87 #include <asm/unwind.h>
88 #include <asm/unistd.h>
89 #include <asm/tls.h>
90 +#include <asm/cache.h>
91
92 #include "entry-header.S"
93
94 @@ -167,7 +168,7 @@ ENDPROC(__und_invalid)
95 stmia r5, {r0 - r4}
96 .endm
97
98 - .align 5
99 + .align L1_CACHE_SHIFT
100 __dabt_svc:
101 svc_entry
102
103 @@ -215,7 +216,7 @@ __dabt_svc:
104 UNWIND(.fnend )
105 ENDPROC(__dabt_svc)
106
107 - .align 5
108 + .align L1_CACHE_SHIFT
109 __irq_svc:
110 svc_entry
111
112 @@ -259,7 +260,7 @@ svc_preempt:
113 b 1b
114 #endif
115
116 - .align 5
117 + .align L1_CACHE_SHIFT
118 __und_svc:
119 #ifdef CONFIG_KPROBES
120 @ If a kprobe is about to simulate a "stmdb sp..." instruction,
121 @@ -305,7 +306,7 @@ __und_svc:
122 UNWIND(.fnend )
123 ENDPROC(__und_svc)
124
125 - .align 5
126 + .align L1_CACHE_SHIFT
127 __pabt_svc:
128 svc_entry
129
130 @@ -341,7 +342,7 @@ __pabt_svc:
131 UNWIND(.fnend )
132 ENDPROC(__pabt_svc)
133
134 - .align 5
135 + .align L1_CACHE_SHIFT
136 .LCcralign:
137 .word cr_alignment
138 #ifdef MULTI_DABORT
139 @@ -414,7 +415,7 @@ ENDPROC(__pabt_svc)
140 #endif
141 .endm
142
143 - .align 5
144 + .align L1_CACHE_SHIFT
145 __dabt_usr:
146 usr_entry
147 kuser_cmpxchg_check
148 @@ -446,7 +447,7 @@ __dabt_usr:
149 UNWIND(.fnend )
150 ENDPROC(__dabt_usr)
151
152 - .align 5
153 + .align L1_CACHE_SHIFT
154 __irq_usr:
155 usr_entry
156 kuser_cmpxchg_check
157 @@ -475,7 +476,7 @@ ENDPROC(__irq_usr)
158
159 .ltorg
160
161 - .align 5
162 + .align L1_CACHE_SHIFT
163 __und_usr:
164 usr_entry
165
166 @@ -691,7 +692,7 @@ __und_usr_unknown:
167 b do_undefinstr
168 ENDPROC(__und_usr_unknown)
169
170 - .align 5
171 + .align L1_CACHE_SHIFT
172 __pabt_usr:
173 usr_entry
174
175 @@ -805,7 +806,7 @@ ENDPROC(__switch_to)
176 #endif
177 .endm
178
179 - .align 5
180 + .align L1_CACHE_SHIFT
181 .globl __kuser_helper_start
182 __kuser_helper_start:
183
184 @@ -845,7 +846,7 @@ __kuser_memory_barrier: @ 0xffff0fa0
185 smp_dmb
186 usr_ret lr
187
188 - .align 5
189 + .align L1_CACHE_SHIFT
190
191 /*
192 * Reference prototype:
193 @@ -972,7 +973,7 @@ kuser_cmpxchg_fixup:
194
195 #endif
196
197 - .align 5
198 + .align L1_CACHE_SHIFT
199
200 /*
201 * Reference prototype:
202 @@ -1050,7 +1051,7 @@ __kuser_helper_end:
203 * of which is copied into r0 for the mode specific abort handler.
204 */
205 .macro vector_stub, name, mode, correction=0
206 - .align 5
207 + .align L1_CACHE_SHIFT
208
209 vector_\name:
210 .if \correction
211 @@ -1181,7 +1182,7 @@ __stubs_start:
212 .long __und_invalid @ e
213 .long __und_invalid @ f
214
215 - .align 5
216 + .align L1_CACHE_SHIFT
217
218 /*=============================================================================
219 * Undefined FIQs
220 @@ -1211,7 +1212,7 @@ vector_addrexcptn:
221 * We group all the following data together to optimise
222 * for CPUs with separate I & D caches.
223 */
224 - .align 5
225 + .align L1_CACHE_SHIFT
226
227 .LCvswi:
228 .word vector_swi
229 --- a/arch/arm/kernel/entry-common.S
230 +++ b/arch/arm/kernel/entry-common.S
231 @@ -10,13 +10,14 @@
232
233 #include <asm/unistd.h>
234 #include <asm/ftrace.h>
235 +#include <asm/cache.h>
236 #include <mach/entry-macro.S>
237 #include <asm/unwind.h>
238
239 #include "entry-header.S"
240
241
242 - .align 5
243 + .align L1_CACHE_SHIFT
244 /*
245 * This is the fast syscall return path. We do as little as
246 * possible here, and this includes saving r0 back into the SVC
247 @@ -260,7 +261,7 @@ ENDPROC(ftrace_stub)
248 #define A710(code...)
249 #endif
250
251 - .align 5
252 + .align L1_CACHE_SHIFT
253 ENTRY(vector_swi)
254 sub sp, sp, #S_FRAME_SIZE
255 stmia sp, {r0 - r12} @ Calling r0 - r12
256 @@ -404,7 +405,7 @@ __sys_trace_return:
257 bl syscall_trace
258 b ret_slow_syscall
259
260 - .align 5
261 + .align L1_CACHE_SHIFT
262 #ifdef CONFIG_ALIGNMENT_TRAP
263 .type __cr_alignment, #object
264 __cr_alignment:
265 --- a/arch/arm/kernel/head.S
266 +++ b/arch/arm/kernel/head.S
267 @@ -21,6 +21,7 @@
268 #include <asm/memory.h>
269 #include <asm/thread_info.h>
270 #include <asm/system.h>
271 +#include <asm/cache.h>
272
273 #ifdef CONFIG_DEBUG_LL
274 #include <mach/debug-macro.S>
275 @@ -373,7 +374,7 @@ ENDPROC(__enable_mmu)
276 *
277 * other registers depend on the function called upon completion
278 */
279 - .align 5
280 + .align L1_CACHE_SHIFT
281 __turn_mmu_on:
282 mov r0, r0
283 mcr p15, 0, r0, c1, c0, 0 @ write control reg
284 --- a/arch/arm/kernel/vmlinux.lds.S
285 +++ b/arch/arm/kernel/vmlinux.lds.S
286 @@ -7,6 +7,7 @@
287 #include <asm/thread_info.h>
288 #include <asm/memory.h>
289 #include <asm/page.h>
290 +#include <asm/cache.h>
291
292 #define PROC_INFO \
293 VMLINUX_SYMBOL(__proc_info_begin) = .; \
294 --- a/arch/arm/lib/copy_page.S
295 +++ b/arch/arm/lib/copy_page.S
296 @@ -17,7 +17,7 @@
297 #define COPY_COUNT (PAGE_SZ / (2 * L1_CACHE_BYTES) PLD( -1 ))
298
299 .text
300 - .align 5
301 + .align L1_CACHE_SHIFT
302 /*
303 * StrongARM optimised copy_page routine
304 * now 1.78bytes/cycle, was 1.60 bytes/cycle (50MHz bus -> 89MB/s)
305 --- a/arch/arm/lib/memchr.S
306 +++ b/arch/arm/lib/memchr.S
307 @@ -11,9 +11,10 @@
308 */
309 #include <linux/linkage.h>
310 #include <asm/assembler.h>
311 +#include <asm/cache.h>
312
313 .text
314 - .align 5
315 + .align L1_CACHE_SHIFT
316 ENTRY(memchr)
317 1: subs r2, r2, #1
318 bmi 2f
319 --- a/arch/arm/lib/memset.S
320 +++ b/arch/arm/lib/memset.S
321 @@ -11,9 +11,10 @@
322 */
323 #include <linux/linkage.h>
324 #include <asm/assembler.h>
325 +#include <asm/cache.h>
326
327 .text
328 - .align 5
329 + .align L1_CACHE_SHIFT
330 .word 0
331
332 1: subs r2, r2, #4 @ 1 do we have enough
333 --- a/arch/arm/lib/memzero.S
334 +++ b/arch/arm/lib/memzero.S
335 @@ -9,9 +9,10 @@
336 */
337 #include <linux/linkage.h>
338 #include <asm/assembler.h>
339 +#include <asm/cache.h>
340
341 .text
342 - .align 5
343 + .align L1_CACHE_SHIFT
344 .word 0
345 /*
346 * Align the pointer in r0. r3 contains the number of bytes that we are
347 --- a/arch/arm/lib/strchr.S
348 +++ b/arch/arm/lib/strchr.S
349 @@ -11,9 +11,10 @@
350 */
351 #include <linux/linkage.h>
352 #include <asm/assembler.h>
353 +#include <asm/cache.h>
354
355 .text
356 - .align 5
357 + .align L1_CACHE_SHIFT
358 ENTRY(strchr)
359 and r1, r1, #0xff
360 1: ldrb r2, [r0], #1
361 --- a/arch/arm/lib/strncpy_from_user.S
362 +++ b/arch/arm/lib/strncpy_from_user.S
363 @@ -10,9 +10,10 @@
364 #include <linux/linkage.h>
365 #include <asm/assembler.h>
366 #include <asm/errno.h>
367 +#include <asm/cache.h>
368
369 .text
370 - .align 5
371 + .align L1_CACHE_SHIFT
372
373 /*
374 * Copy a string from user space to kernel space.
375 --- a/arch/arm/lib/strnlen_user.S
376 +++ b/arch/arm/lib/strnlen_user.S
377 @@ -10,9 +10,10 @@
378 #include <linux/linkage.h>
379 #include <asm/assembler.h>
380 #include <asm/errno.h>
381 +#include <asm/cache.h>
382
383 .text
384 - .align 5
385 + .align L1_CACHE_SHIFT
386
387 /* Prototype: unsigned long __strnlen_user(const char *str, long n)
388 * Purpose : get length of a string in user memory
389 --- a/arch/arm/lib/strrchr.S
390 +++ b/arch/arm/lib/strrchr.S
391 @@ -11,9 +11,10 @@
392 */
393 #include <linux/linkage.h>
394 #include <asm/assembler.h>
395 +#include <asm/cache.h>
396
397 .text
398 - .align 5
399 + .align L1_CACHE_SHIFT
400 ENTRY(strrchr)
401 mov r3, #0
402 1: ldrb r2, [r0], #1
403 --- a/arch/arm/mm/abort-ev4.S
404 +++ b/arch/arm/mm/abort-ev4.S
405 @@ -1,5 +1,6 @@
406 #include <linux/linkage.h>
407 #include <asm/assembler.h>
408 +#include <asm/cache.h>
409 /*
410 * Function: v4_early_abort
411 *
412 @@ -17,7 +18,7 @@
413 * abort here if the I-TLB and D-TLB aren't seeing the same
414 * picture. Unfortunately, this does happen. We live with it.
415 */
416 - .align 5
417 + .align L1_CACHE_SHIFT
418 ENTRY(v4_early_abort)
419 mrc p15, 0, r1, c5, c0, 0 @ get FSR
420 mrc p15, 0, r0, c6, c0, 0 @ get FAR
421 --- a/arch/arm/mm/abort-nommu.S
422 +++ b/arch/arm/mm/abort-nommu.S
423 @@ -1,5 +1,6 @@
424 #include <linux/linkage.h>
425 #include <asm/assembler.h>
426 +#include <asm/cache.h>
427 /*
428 * Function: nommu_early_abort
429 *
430 @@ -12,7 +13,7 @@
431 * Note: There is no FSR/FAR on !CPU_CP15_MMU cores.
432 * Just fill zero into the registers.
433 */
434 - .align 5
435 + .align L1_CACHE_SHIFT
436 ENTRY(nommu_early_abort)
437 mov r0, #0 @ clear r0, r1 (no FSR/FAR)
438 mov r1, #0
This page took 0.06929 seconds and 5 git commands to generate.