rtl8366_smi: implement a function for detecting whether the attached switch is RTL836...
[openwrt.git] / target / linux / coldfire / files-2.6.31 / arch / m68k / include / asm / cf_bitops.h
1 /*
2 * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file COPYING in the main directory of this archive
6 * for more details.
7 */
8 #ifndef __CF_BITOPS__
9 #define __CF_BITOPS__
10
11 #ifndef _LINUX_BITOPS_H
12 #error only <linux/bitops.h> can be included directly
13 #endif
14
15 #include <linux/compiler.h>
16
17 #define test_and_set_bit(nr,vaddr) \
18 (__builtin_constant_p(nr) ? \
19 __constant_coldfire_test_and_set_bit(nr, vaddr) : \
20 __generic_coldfire_test_and_set_bit(nr, vaddr))
21
22 static __inline__ int __constant_coldfire_test_and_set_bit(int nr,
23 volatile void *vaddr)
24 {
25 char retval;
26 volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3];
27 __asm__ __volatile__ ("bset %2,(%4); sne %0"
28 : "=d" (retval), "=m" (*p)
29 : "di" (nr & 7), "m" (*p), "a" (p));
30 return retval;
31 }
32
33 static __inline__ int __generic_coldfire_test_and_set_bit(int nr,
34 volatile void *vaddr)
35 {
36 char retval;
37
38 __asm__ __volatile__ ("bset %2,%1; sne %0"
39 : "=d" (retval), "=m" (((volatile char *)vaddr)[(nr^31) >> 3])
40 : "d" (nr)
41 : "memory");
42 return retval;
43 }
44 #define __test_and_set_bit(nr, vaddr) test_and_set_bit(nr, vaddr)
45
46 #define set_bit(nr,vaddr) \
47 (__builtin_constant_p(nr) ? \
48 __constant_coldfire_set_bit(nr, vaddr) : \
49 __generic_coldfire_set_bit(nr, vaddr))
50
51 static __inline__ void __constant_coldfire_set_bit(int nr,
52 volatile void *vaddr)
53 {
54 volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3];
55 __asm__ __volatile__ ("bset %1,(%3)"
56 : "=m" (*p) : "di" (nr & 7), "m" (*p), "a" (p));
57 }
58
59 static __inline__ void __generic_coldfire_set_bit(int nr,
60 volatile void *vaddr)
61 {
62 __asm__ __volatile__ ("bset %1,%0"
63 : "=m" (((volatile char *)vaddr)[(nr^31) >> 3])
64 : "d" (nr)
65 : "memory");
66 }
67 #define __set_bit(nr, vaddr) set_bit(nr, vaddr)
68
69 #define test_and_clear_bit(nr, vaddr) \
70 (__builtin_constant_p(nr) ? \
71 __constant_coldfire_test_and_clear_bit(nr, vaddr) : \
72 __generic_coldfire_test_and_clear_bit(nr, vaddr))
73
74 static __inline__ int __constant_coldfire_test_and_clear_bit(int nr,
75 volatile void *vaddr)
76 {
77 char retval;
78 volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3];
79
80 __asm__ __volatile__ ("bclr %2,(%4); sne %0"
81 : "=d" (retval), "=m" (*p)
82 : "id" (nr & 7), "m" (*p), "a" (p));
83
84 return retval;
85 }
86
87 static __inline__ int __generic_coldfire_test_and_clear_bit(int nr,
88 volatile void *vaddr)
89 {
90 char retval;
91
92 __asm__ __volatile__ ("bclr %2,%1; sne %0"
93 : "=d" (retval), "=m" (((volatile char *)vaddr)[(nr^31) >> 3])
94 : "d" (nr & 7)
95 : "memory");
96
97 return retval;
98 }
99 #define __test_and_clear_bit(nr, vaddr) test_and_clear_bit(nr, vaddr)
100
101 /*
102 * clear_bit() doesn't provide any barrier for the compiler.
103 */
104 #define smp_mb__before_clear_bit() barrier()
105 #define smp_mb__after_clear_bit() barrier()
106
107 #define clear_bit(nr,vaddr) \
108 (__builtin_constant_p(nr) ? \
109 __constant_coldfire_clear_bit(nr, vaddr) : \
110 __generic_coldfire_clear_bit(nr, vaddr))
111
112 static __inline__ void __constant_coldfire_clear_bit(int nr,
113 volatile void *vaddr)
114 {
115 volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3];
116 __asm__ __volatile__ ("bclr %1,(%3)"
117 : "=m" (*p) : "id" (nr & 7), "m" (*p), "a" (p));
118 }
119
120 static __inline__ void __generic_coldfire_clear_bit(int nr,
121 volatile void *vaddr)
122 {
123 __asm__ __volatile__ ("bclr %1,%0"
124 : "=m" (((volatile char *)vaddr)[(nr^31) >> 3])
125 : "d" (nr)
126 : "memory");
127 }
128 #define __clear_bit(nr, vaddr) clear_bit(nr, vaddr)
129
130 #define test_and_change_bit(nr, vaddr) \
131 (__builtin_constant_p(nr) ? \
132 __constant_coldfire_test_and_change_bit(nr, vaddr) : \
133 __generic_coldfire_test_and_change_bit(nr, vaddr))
134
135 static __inline__ int __constant_coldfire_test_and_change_bit(int nr,
136 volatile void *vaddr)
137 {
138 char retval;
139 volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3];
140
141 __asm__ __volatile__ ("bchg %2,(%4); sne %0"
142 : "=d" (retval), "=m" (*p)
143 : "id" (nr & 7), "m" (*p), "a" (p));
144
145 return retval;
146 }
147
148 static __inline__ int __generic_coldfire_test_and_change_bit(int nr,
149 volatile void *vaddr)
150 {
151 char retval;
152
153 __asm__ __volatile__ ("bchg %2,%1; sne %0"
154 : "=d" (retval), "=m" (((volatile char *)vaddr)[(nr^31) >> 3])
155 : "id" (nr)
156 : "memory");
157
158 return retval;
159 }
160 #define __test_and_change_bit(nr, vaddr) test_and_change_bit(nr, vaddr)
161 #define __change_bit(nr, vaddr) change_bit(nr, vaddr)
162
163 #define change_bit(nr,vaddr) \
164 (__builtin_constant_p(nr) ? \
165 __constant_coldfire_change_bit(nr, vaddr) : \
166 __generic_coldfire_change_bit(nr, vaddr))
167
168 static __inline__ void __constant_coldfire_change_bit(int nr,
169 volatile void *vaddr)
170 {
171 volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3];
172 __asm__ __volatile__ ("bchg %1,(%3)"
173 : "=m" (*p) : "id" (nr & 7), "m" (*p), "a" (p));
174 }
175
176 static __inline__ void __generic_coldfire_change_bit(int nr,
177 volatile void *vaddr)
178 {
179 __asm__ __volatile__ ("bchg %1,%0"
180 : "=m" (((volatile char *)vaddr)[(nr^31) >> 3])
181 : "d" (nr)
182 : "memory");
183 }
184
185 static inline int test_bit(int nr, const unsigned long *vaddr)
186 {
187 return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
188 }
189
190 static __inline__ unsigned long ffz(unsigned long word)
191 {
192 unsigned long result = 0;
193
194 while (word & 1) {
195 result++;
196 word >>= 1;
197 }
198 return result;
199 }
200
201 /* find_next_zero_bit() finds the first zero bit in a bit string of length
202 * 'size' bits, starting the search at bit 'offset'. This is largely based
203 * on Linus's ALPHA routines.
204 */
205 static __inline__ unsigned long find_next_zero_bit(void *addr,
206 unsigned long size, unsigned long offset)
207 {
208 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
209 unsigned long result = offset & ~31UL;
210 unsigned long tmp;
211
212 if (offset >= size)
213 return size;
214 size -= result;
215 offset &= 31UL;
216 if (offset) {
217 tmp = *(p++);
218 tmp |= ~0UL >> (32-offset);
219 if (size < 32)
220 goto found_first;
221 if (~tmp)
222 goto found_middle;
223 size -= 32;
224 result += 32;
225 }
226 while (size & ~31UL) {
227 tmp = *(p++);
228 if (~tmp)
229 goto found_middle;
230 result += 32;
231 size -= 32;
232 }
233 if (!size)
234 return result;
235 tmp = *p;
236
237 found_first:
238 tmp |= ~0UL >> size;
239 found_middle:
240 return result + ffz(tmp);
241 }
242
243 #define find_first_zero_bit(addr, size) find_next_zero_bit(((void *)addr), \
244 (size), 0)
245
246 /* Ported from included/linux/bitops.h */
247 static __inline__ int ffs(int x)
248 {
249 int r = 1;
250
251 if (!x)
252 return 0;
253 if (!(x & 0xffff)) {
254 x >>= 16;
255 r += 16;
256 }
257 if (!(x & 0xff)) {
258 x >>= 8;
259 r += 8;
260 }
261 if (!(x & 0xf)) {
262 x >>= 4;
263 r += 4;
264 }
265 if (!(x & 3)) {
266 x >>= 2;
267 r += 2;
268 }
269 if (!(x & 1)) {
270 x >>= 1;
271 r += 1;
272 }
273 return r;
274 }
275 #define __ffs(x) (ffs(x) - 1)
276
277 /* find_next_bit - find the next set bit in a memory region
278 * (from asm-ppc/bitops.h)
279 */
280 static __inline__ unsigned long find_next_bit(const unsigned long *addr,
281 unsigned long size, unsigned long offset)
282 {
283 unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
284 unsigned int result = offset & ~31UL;
285 unsigned int tmp;
286
287 if (offset >= size)
288 return size;
289 size -= result;
290 offset &= 31UL;
291 if (offset) {
292 tmp = *p++;
293 tmp &= ~0UL << offset;
294 if (size < 32)
295 goto found_first;
296 if (tmp)
297 goto found_middle;
298 size -= 32;
299 result += 32;
300 }
301 while (size >= 32) {
302 tmp = *p++;
303 if (tmp != 0)
304 goto found_middle;
305 result += 32;
306 size -= 32;
307 }
308 if (!size)
309 return result;
310 tmp = *p;
311
312 found_first:
313 tmp &= ~0UL >> (32 - size);
314 if (tmp == 0UL) /* Are any bits set? */
315 return result + size; /* Nope. */
316 found_middle:
317 return result + __ffs(tmp);
318 }
319
320 #define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
321
322 #ifdef __KERNEL__
323
324 /* Ported from include/linux/bitops.h */
325 static __inline__ int fls(int x)
326 {
327 int r = 32;
328
329 if (!x)
330 return 0;
331 if (!(x & 0xffff0000u)) {
332 x <<= 16;
333 r -= 16;
334 }
335 if (!(x & 0xff000000u)) {
336 x <<= 8;
337 r -= 8;
338 }
339 if (!(x & 0xf0000000u)) {
340 x <<= 4;
341 r -= 4;
342 }
343 if (!(x & 0xc0000000u)) {
344 x <<= 2;
345 r -= 2;
346 }
347 if (!(x & 0x80000000u)) {
348 x <<= 1;
349 r -= 1;
350 }
351 return r;
352 }
353
354 static inline int __fls(int x)
355 {
356 return fls(x) - 1;
357 }
358
359 #include <asm-generic/bitops/fls64.h>
360 #include <asm-generic/bitops/sched.h>
361 #include <asm-generic/bitops/hweight.h>
362 #include <asm-generic/bitops/lock.h>
363
364 #define minix_find_first_zero_bit(addr, size) find_next_zero_bit((addr), \
365 (size), 0)
366 #define minix_test_and_set_bit(nr, addr) test_and_set_bit((nr), \
367 (unsigned long *)(addr))
368 #define minix_set_bit(nr, addr) set_bit((nr), \
369 (unsigned long *)(addr))
370 #define minix_test_and_clear_bit(nr, addr) test_and_clear_bit((nr), \
371 (unsigned long *)(addr))
372
373 static inline int minix_test_bit(int nr, const volatile unsigned long *vaddr)
374 {
375 int *a = (int *)vaddr;
376 int mask;
377
378 a += nr >> 5;
379 mask = 1 << (nr & 0x1f);
380 return ((mask & *a) != 0);
381 }
382
383 #define ext2_set_bit(nr, addr) test_and_set_bit((nr) ^ 24, \
384 (unsigned long *)(addr))
385 #define ext2_set_bit_atomic(lock, nr, addr) test_and_set_bit((nr) ^ 24, \
386 (unsigned long *)(addr))
387 #define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 24, \
388 (unsigned long *)(addr))
389 #define ext2_clear_bit_atomic(lock, nr, addr) test_and_clear_bit((nr) ^ 24, \
390 (unsigned long *)(addr))
391
392 static inline int ext2_test_bit(int nr, const void *vaddr)
393 {
394 const unsigned char *p = vaddr;
395 return (p[nr >> 3] & (1U << (nr & 7))) != 0;
396 }
397
398 static inline int ext2_find_first_zero_bit(const void *vaddr, unsigned size)
399 {
400 const unsigned long *p = vaddr, *addr = vaddr;
401 int res;
402
403 if (!size)
404 return 0;
405
406 size = (size >> 5) + ((size & 31) > 0);
407 while (*p++ == ~0UL) {
408 if (--size == 0)
409 return (p - addr) << 5;
410 }
411
412 --p;
413 for (res = 0; res < 32; res++)
414 if (!ext2_test_bit (res, p))
415 break;
416 return (p - addr) * 32 + res;
417 }
418
419 static inline int ext2_find_next_zero_bit(const void *vaddr, unsigned size,
420 unsigned offset)
421 {
422 const unsigned long *addr = vaddr;
423 const unsigned long *p = addr + (offset >> 5);
424 int bit = offset & 31UL, res;
425
426 if (offset >= size)
427 return size;
428
429 if (bit) {
430 /* Look for zero in first longword */
431 for (res = bit; res < 32; res++)
432 if (!ext2_test_bit (res, p))
433 return (p - addr) * 32 + res;
434 p++;
435 }
436 /* No zero yet, search remaining full bytes for a zero */
437 res = ext2_find_first_zero_bit(p, size - 32 * (p - addr));
438 return (p - addr) * 32 + res;
439 }
440
441 #endif /* KERNEL */
442
443 #endif /* __CF_BITOPS__ */
This page took 0.076894 seconds and 5 git commands to generate.