blob: 659ac9d3201797f38552ef1d301f8eb066bc26ca [file] [log] [blame]
wdenk6069ff22003-02-28 00:49:47 +00001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org)
7 * Copyright (c) 2000 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_BITOPS_H
10#define _ASM_BITOPS_H
11
12#include <linux/types.h>
13#include <asm/byteorder.h> /* sigh ... */
14
15#ifdef __KERNEL__
16
17#include <asm/sgidefs.h>
18#include <asm/system.h>
19#include <linux/config.h>
20
21/*
22 * clear_bit() doesn't provide any barrier for the compiler.
23 */
24#define smp_mb__before_clear_bit() barrier()
25#define smp_mb__after_clear_bit() barrier()
26
27/*
28 * Only disable interrupt for kernel mode stuff to keep usermode stuff
29 * that dares to use kernel include files alive.
30 */
31#define __bi_flags unsigned long flags
32#define __bi_cli() __cli()
33#define __bi_save_flags(x) __save_flags(x)
34#define __bi_save_and_cli(x) __save_and_cli(x)
35#define __bi_restore_flags(x) __restore_flags(x)
36#else
37#define __bi_flags
38#define __bi_cli()
39#define __bi_save_flags(x)
40#define __bi_save_and_cli(x)
41#define __bi_restore_flags(x)
42#endif /* __KERNEL__ */
43
44#ifdef CONFIG_CPU_HAS_LLSC
45
46#include <asm/mipsregs.h>
47
48/*
49 * These functions for MIPS ISA > 1 are interrupt and SMP proof and
50 * interrupt friendly
51 */
52
53/*
54 * set_bit - Atomically set a bit in memory
55 * @nr: the bit to set
56 * @addr: the address to start counting from
57 *
58 * This function is atomic and may not be reordered. See __set_bit()
59 * if you do not require the atomic guarantees.
60 * Note that @nr may be almost arbitrarily large; this function is not
61 * restricted to acting on a single-word quantity.
62 */
Shinya Kuribayashi47f6a362009-05-16 09:12:09 +090063static __inline__ void
wdenk6069ff22003-02-28 00:49:47 +000064set_bit(int nr, volatile void *addr)
65{
66 unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
67 unsigned long temp;
68
69 __asm__ __volatile__(
70 "1:\tll\t%0, %1\t\t# set_bit\n\t"
71 "or\t%0, %2\n\t"
72 "sc\t%0, %1\n\t"
73 "beqz\t%0, 1b"
74 : "=&r" (temp), "=m" (*m)
75 : "ir" (1UL << (nr & 0x1f)), "m" (*m));
76}
77
78/*
79 * __set_bit - Set a bit in memory
80 * @nr: the bit to set
81 * @addr: the address to start counting from
82 *
83 * Unlike set_bit(), this function is non-atomic and may be reordered.
84 * If it's called on the same region of memory simultaneously, the effect
85 * may be that only one operation succeeds.
86 */
Shinya Kuribayashi47f6a362009-05-16 09:12:09 +090087static __inline__ void __set_bit(int nr, volatile void * addr)
wdenk6069ff22003-02-28 00:49:47 +000088{
89 unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
90
91 *m |= 1UL << (nr & 31);
92}
93
94/*
95 * clear_bit - Clears a bit in memory
96 * @nr: Bit to clear
97 * @addr: Address to start counting from
98 *
99 * clear_bit() is atomic and may not be reordered. However, it does
100 * not contain a memory barrier, so if it is used for locking purposes,
101 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
102 * in order to ensure changes are visible on other processors.
103 */
Shinya Kuribayashi47f6a362009-05-16 09:12:09 +0900104static __inline__ void
wdenk6069ff22003-02-28 00:49:47 +0000105clear_bit(int nr, volatile void *addr)
106{
107 unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
108 unsigned long temp;
109
110 __asm__ __volatile__(
111 "1:\tll\t%0, %1\t\t# clear_bit\n\t"
112 "and\t%0, %2\n\t"
113 "sc\t%0, %1\n\t"
114 "beqz\t%0, 1b\n\t"
115 : "=&r" (temp), "=m" (*m)
116 : "ir" (~(1UL << (nr & 0x1f))), "m" (*m));
117}
118
119/*
120 * change_bit - Toggle a bit in memory
121 * @nr: Bit to clear
122 * @addr: Address to start counting from
123 *
124 * change_bit() is atomic and may not be reordered.
125 * Note that @nr may be almost arbitrarily large; this function is not
126 * restricted to acting on a single-word quantity.
127 */
Shinya Kuribayashi47f6a362009-05-16 09:12:09 +0900128static __inline__ void
wdenk6069ff22003-02-28 00:49:47 +0000129change_bit(int nr, volatile void *addr)
130{
131 unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
132 unsigned long temp;
133
134 __asm__ __volatile__(
135 "1:\tll\t%0, %1\t\t# change_bit\n\t"
136 "xor\t%0, %2\n\t"
137 "sc\t%0, %1\n\t"
138 "beqz\t%0, 1b"
139 : "=&r" (temp), "=m" (*m)
140 : "ir" (1UL << (nr & 0x1f)), "m" (*m));
141}
142
143/*
144 * __change_bit - Toggle a bit in memory
145 * @nr: the bit to set
146 * @addr: the address to start counting from
147 *
148 * Unlike change_bit(), this function is non-atomic and may be reordered.
149 * If it's called on the same region of memory simultaneously, the effect
150 * may be that only one operation succeeds.
151 */
Shinya Kuribayashi47f6a362009-05-16 09:12:09 +0900152static __inline__ void __change_bit(int nr, volatile void * addr)
wdenk6069ff22003-02-28 00:49:47 +0000153{
154 unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
155
156 *m ^= 1UL << (nr & 31);
157}
158
159/*
160 * test_and_set_bit - Set a bit and return its old value
161 * @nr: Bit to set
162 * @addr: Address to count from
163 *
wdenk8bde7f72003-06-27 21:31:46 +0000164 * This operation is atomic and cannot be reordered.
wdenk6069ff22003-02-28 00:49:47 +0000165 * It also implies a memory barrier.
166 */
Shinya Kuribayashi47f6a362009-05-16 09:12:09 +0900167static __inline__ int
wdenk6069ff22003-02-28 00:49:47 +0000168test_and_set_bit(int nr, volatile void *addr)
169{
170 unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
171 unsigned long temp, res;
172
173 __asm__ __volatile__(
174 ".set\tnoreorder\t\t# test_and_set_bit\n"
175 "1:\tll\t%0, %1\n\t"
176 "or\t%2, %0, %3\n\t"
177 "sc\t%2, %1\n\t"
178 "beqz\t%2, 1b\n\t"
179 " and\t%2, %0, %3\n\t"
180 ".set\treorder"
181 : "=&r" (temp), "=m" (*m), "=&r" (res)
182 : "r" (1UL << (nr & 0x1f)), "m" (*m)
183 : "memory");
184
185 return res != 0;
186}
187
188/*
189 * __test_and_set_bit - Set a bit and return its old value
190 * @nr: Bit to set
191 * @addr: Address to count from
192 *
wdenk8bde7f72003-06-27 21:31:46 +0000193 * This operation is non-atomic and can be reordered.
wdenk6069ff22003-02-28 00:49:47 +0000194 * If two examples of this operation race, one can appear to succeed
195 * but actually fail. You must protect multiple accesses with a lock.
196 */
Shinya Kuribayashi47f6a362009-05-16 09:12:09 +0900197static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
wdenk6069ff22003-02-28 00:49:47 +0000198{
199 int mask, retval;
200 volatile int *a = addr;
201
202 a += nr >> 5;
203 mask = 1 << (nr & 0x1f);
204 retval = (mask & *a) != 0;
205 *a |= mask;
206
207 return retval;
208}
209
210/*
211 * test_and_clear_bit - Clear a bit and return its old value
212 * @nr: Bit to set
213 * @addr: Address to count from
214 *
wdenk8bde7f72003-06-27 21:31:46 +0000215 * This operation is atomic and cannot be reordered.
wdenk6069ff22003-02-28 00:49:47 +0000216 * It also implies a memory barrier.
217 */
Shinya Kuribayashi47f6a362009-05-16 09:12:09 +0900218static __inline__ int
wdenk6069ff22003-02-28 00:49:47 +0000219test_and_clear_bit(int nr, volatile void *addr)
220{
221 unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
222 unsigned long temp, res;
223
224 __asm__ __volatile__(
225 ".set\tnoreorder\t\t# test_and_clear_bit\n"
226 "1:\tll\t%0, %1\n\t"
227 "or\t%2, %0, %3\n\t"
228 "xor\t%2, %3\n\t"
229 "sc\t%2, %1\n\t"
230 "beqz\t%2, 1b\n\t"
231 " and\t%2, %0, %3\n\t"
232 ".set\treorder"
233 : "=&r" (temp), "=m" (*m), "=&r" (res)
234 : "r" (1UL << (nr & 0x1f)), "m" (*m)
235 : "memory");
236
237 return res != 0;
238}
239
240/*
241 * __test_and_clear_bit - Clear a bit and return its old value
242 * @nr: Bit to set
243 * @addr: Address to count from
244 *
wdenk8bde7f72003-06-27 21:31:46 +0000245 * This operation is non-atomic and can be reordered.
wdenk6069ff22003-02-28 00:49:47 +0000246 * If two examples of this operation race, one can appear to succeed
247 * but actually fail. You must protect multiple accesses with a lock.
248 */
Shinya Kuribayashi47f6a362009-05-16 09:12:09 +0900249static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
wdenk6069ff22003-02-28 00:49:47 +0000250{
251 int mask, retval;
252 volatile int *a = addr;
253
254 a += nr >> 5;
255 mask = 1 << (nr & 0x1f);
256 retval = (mask & *a) != 0;
257 *a &= ~mask;
258
259 return retval;
260}
261
262/*
263 * test_and_change_bit - Change a bit and return its new value
264 * @nr: Bit to set
265 * @addr: Address to count from
266 *
wdenk8bde7f72003-06-27 21:31:46 +0000267 * This operation is atomic and cannot be reordered.
wdenk6069ff22003-02-28 00:49:47 +0000268 * It also implies a memory barrier.
269 */
Shinya Kuribayashi47f6a362009-05-16 09:12:09 +0900270static __inline__ int
wdenk6069ff22003-02-28 00:49:47 +0000271test_and_change_bit(int nr, volatile void *addr)
272{
273 unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
274 unsigned long temp, res;
275
276 __asm__ __volatile__(
277 ".set\tnoreorder\t\t# test_and_change_bit\n"
278 "1:\tll\t%0, %1\n\t"
279 "xor\t%2, %0, %3\n\t"
280 "sc\t%2, %1\n\t"
281 "beqz\t%2, 1b\n\t"
282 " and\t%2, %0, %3\n\t"
283 ".set\treorder"
284 : "=&r" (temp), "=m" (*m), "=&r" (res)
285 : "r" (1UL << (nr & 0x1f)), "m" (*m)
286 : "memory");
287
288 return res != 0;
289}
290
291/*
292 * __test_and_change_bit - Change a bit and return its old value
293 * @nr: Bit to set
294 * @addr: Address to count from
295 *
wdenk8bde7f72003-06-27 21:31:46 +0000296 * This operation is non-atomic and can be reordered.
wdenk6069ff22003-02-28 00:49:47 +0000297 * If two examples of this operation race, one can appear to succeed
298 * but actually fail. You must protect multiple accesses with a lock.
299 */
Shinya Kuribayashi47f6a362009-05-16 09:12:09 +0900300static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
wdenk6069ff22003-02-28 00:49:47 +0000301{
302 int mask, retval;
303 volatile int *a = addr;
304
305 a += nr >> 5;
306 mask = 1 << (nr & 0x1f);
307 retval = (mask & *a) != 0;
308 *a ^= mask;
309
310 return retval;
311}
312
313#else /* MIPS I */
314
315/*
316 * set_bit - Atomically set a bit in memory
317 * @nr: the bit to set
318 * @addr: the address to start counting from
319 *
320 * This function is atomic and may not be reordered. See __set_bit()
321 * if you do not require the atomic guarantees.
322 * Note that @nr may be almost arbitrarily large; this function is not
323 * restricted to acting on a single-word quantity.
324 */
Shinya Kuribayashi47f6a362009-05-16 09:12:09 +0900325static __inline__ void set_bit(int nr, volatile void * addr)
wdenk6069ff22003-02-28 00:49:47 +0000326{
327 int mask;
328 volatile int *a = addr;
329 __bi_flags;
330
331 a += nr >> 5;
332 mask = 1 << (nr & 0x1f);
333 __bi_save_and_cli(flags);
334 *a |= mask;
335 __bi_restore_flags(flags);
336}
337
338/*
339 * __set_bit - Set a bit in memory
340 * @nr: the bit to set
341 * @addr: the address to start counting from
342 *
343 * Unlike set_bit(), this function is non-atomic and may be reordered.
344 * If it's called on the same region of memory simultaneously, the effect
345 * may be that only one operation succeeds.
346 */
Shinya Kuribayashi47f6a362009-05-16 09:12:09 +0900347static __inline__ void __set_bit(int nr, volatile void * addr)
wdenk6069ff22003-02-28 00:49:47 +0000348{
349 int mask;
350 volatile int *a = addr;
351
352 a += nr >> 5;
353 mask = 1 << (nr & 0x1f);
354 *a |= mask;
355}
356
357/*
358 * clear_bit - Clears a bit in memory
359 * @nr: Bit to clear
360 * @addr: Address to start counting from
361 *
362 * clear_bit() is atomic and may not be reordered. However, it does
363 * not contain a memory barrier, so if it is used for locking purposes,
364 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
365 * in order to ensure changes are visible on other processors.
366 */
Shinya Kuribayashi47f6a362009-05-16 09:12:09 +0900367static __inline__ void clear_bit(int nr, volatile void * addr)
wdenk6069ff22003-02-28 00:49:47 +0000368{
369 int mask;
370 volatile int *a = addr;
371 __bi_flags;
372
373 a += nr >> 5;
374 mask = 1 << (nr & 0x1f);
375 __bi_save_and_cli(flags);
376 *a &= ~mask;
377 __bi_restore_flags(flags);
378}
379
380/*
381 * change_bit - Toggle a bit in memory
382 * @nr: Bit to clear
383 * @addr: Address to start counting from
384 *
385 * change_bit() is atomic and may not be reordered.
386 * Note that @nr may be almost arbitrarily large; this function is not
387 * restricted to acting on a single-word quantity.
388 */
Shinya Kuribayashi47f6a362009-05-16 09:12:09 +0900389static __inline__ void change_bit(int nr, volatile void * addr)
wdenk6069ff22003-02-28 00:49:47 +0000390{
391 int mask;
392 volatile int *a = addr;
393 __bi_flags;
394
395 a += nr >> 5;
396 mask = 1 << (nr & 0x1f);
397 __bi_save_and_cli(flags);
398 *a ^= mask;
399 __bi_restore_flags(flags);
400}
401
402/*
403 * __change_bit - Toggle a bit in memory
404 * @nr: the bit to set
405 * @addr: the address to start counting from
406 *
407 * Unlike change_bit(), this function is non-atomic and may be reordered.
408 * If it's called on the same region of memory simultaneously, the effect
409 * may be that only one operation succeeds.
410 */
Shinya Kuribayashi47f6a362009-05-16 09:12:09 +0900411static __inline__ void __change_bit(int nr, volatile void * addr)
wdenk6069ff22003-02-28 00:49:47 +0000412{
413 unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
414
415 *m ^= 1UL << (nr & 31);
416}
417
418/*
419 * test_and_set_bit - Set a bit and return its old value
420 * @nr: Bit to set
421 * @addr: Address to count from
422 *
wdenk8bde7f72003-06-27 21:31:46 +0000423 * This operation is atomic and cannot be reordered.
wdenk6069ff22003-02-28 00:49:47 +0000424 * It also implies a memory barrier.
425 */
Shinya Kuribayashi47f6a362009-05-16 09:12:09 +0900426static __inline__ int test_and_set_bit(int nr, volatile void * addr)
wdenk6069ff22003-02-28 00:49:47 +0000427{
428 int mask, retval;
429 volatile int *a = addr;
430 __bi_flags;
431
432 a += nr >> 5;
433 mask = 1 << (nr & 0x1f);
434 __bi_save_and_cli(flags);
435 retval = (mask & *a) != 0;
436 *a |= mask;
437 __bi_restore_flags(flags);
438
439 return retval;
440}
441
442/*
443 * __test_and_set_bit - Set a bit and return its old value
444 * @nr: Bit to set
445 * @addr: Address to count from
446 *
wdenk8bde7f72003-06-27 21:31:46 +0000447 * This operation is non-atomic and can be reordered.
wdenk6069ff22003-02-28 00:49:47 +0000448 * If two examples of this operation race, one can appear to succeed
449 * but actually fail. You must protect multiple accesses with a lock.
450 */
Shinya Kuribayashi47f6a362009-05-16 09:12:09 +0900451static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
wdenk6069ff22003-02-28 00:49:47 +0000452{
453 int mask, retval;
454 volatile int *a = addr;
455
456 a += nr >> 5;
457 mask = 1 << (nr & 0x1f);
458 retval = (mask & *a) != 0;
459 *a |= mask;
460
461 return retval;
462}
463
464/*
465 * test_and_clear_bit - Clear a bit and return its old value
466 * @nr: Bit to set
467 * @addr: Address to count from
468 *
wdenk8bde7f72003-06-27 21:31:46 +0000469 * This operation is atomic and cannot be reordered.
wdenk6069ff22003-02-28 00:49:47 +0000470 * It also implies a memory barrier.
471 */
Shinya Kuribayashi47f6a362009-05-16 09:12:09 +0900472static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
wdenk6069ff22003-02-28 00:49:47 +0000473{
474 int mask, retval;
475 volatile int *a = addr;
476 __bi_flags;
477
478 a += nr >> 5;
479 mask = 1 << (nr & 0x1f);
480 __bi_save_and_cli(flags);
481 retval = (mask & *a) != 0;
482 *a &= ~mask;
483 __bi_restore_flags(flags);
484
485 return retval;
486}
487
488/*
489 * __test_and_clear_bit - Clear a bit and return its old value
490 * @nr: Bit to set
491 * @addr: Address to count from
492 *
wdenk8bde7f72003-06-27 21:31:46 +0000493 * This operation is non-atomic and can be reordered.
wdenk6069ff22003-02-28 00:49:47 +0000494 * If two examples of this operation race, one can appear to succeed
495 * but actually fail. You must protect multiple accesses with a lock.
496 */
Shinya Kuribayashi47f6a362009-05-16 09:12:09 +0900497static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
wdenk6069ff22003-02-28 00:49:47 +0000498{
499 int mask, retval;
500 volatile int *a = addr;
501
502 a += nr >> 5;
503 mask = 1 << (nr & 0x1f);
504 retval = (mask & *a) != 0;
505 *a &= ~mask;
506
507 return retval;
508}
509
510/*
511 * test_and_change_bit - Change a bit and return its new value
512 * @nr: Bit to set
513 * @addr: Address to count from
514 *
wdenk8bde7f72003-06-27 21:31:46 +0000515 * This operation is atomic and cannot be reordered.
wdenk6069ff22003-02-28 00:49:47 +0000516 * It also implies a memory barrier.
517 */
Shinya Kuribayashi47f6a362009-05-16 09:12:09 +0900518static __inline__ int test_and_change_bit(int nr, volatile void * addr)
wdenk6069ff22003-02-28 00:49:47 +0000519{
520 int mask, retval;
521 volatile int *a = addr;
522 __bi_flags;
523
524 a += nr >> 5;
525 mask = 1 << (nr & 0x1f);
526 __bi_save_and_cli(flags);
527 retval = (mask & *a) != 0;
528 *a ^= mask;
529 __bi_restore_flags(flags);
530
531 return retval;
532}
533
534/*
535 * __test_and_change_bit - Change a bit and return its old value
536 * @nr: Bit to set
537 * @addr: Address to count from
538 *
wdenk8bde7f72003-06-27 21:31:46 +0000539 * This operation is non-atomic and can be reordered.
wdenk6069ff22003-02-28 00:49:47 +0000540 * If two examples of this operation race, one can appear to succeed
541 * but actually fail. You must protect multiple accesses with a lock.
542 */
Shinya Kuribayashi47f6a362009-05-16 09:12:09 +0900543static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
wdenk6069ff22003-02-28 00:49:47 +0000544{
545 int mask, retval;
546 volatile int *a = addr;
547
548 a += nr >> 5;
549 mask = 1 << (nr & 0x1f);
550 retval = (mask & *a) != 0;
551 *a ^= mask;
552
553 return retval;
554}
555
556#undef __bi_flags
557#undef __bi_cli
558#undef __bi_save_flags
559#undef __bi_restore_flags
560
561#endif /* MIPS I */
562
563/*
564 * test_bit - Determine whether a bit is set
565 * @nr: bit number to test
566 * @addr: Address to start counting from
567 */
Shinya Kuribayashi47f6a362009-05-16 09:12:09 +0900568static __inline__ int test_bit(int nr, volatile void *addr)
wdenk6069ff22003-02-28 00:49:47 +0000569{
570 return ((1UL << (nr & 31)) & (((const unsigned int *) addr)[nr >> 5])) != 0;
571}
572
573#ifndef __MIPSEB__
574
575/* Little endian versions. */
576
577/*
578 * find_first_zero_bit - find the first zero bit in a memory region
579 * @addr: The address to start the search at
580 * @size: The maximum size to search
581 *
582 * Returns the bit-number of the first zero bit, not the number of the byte
583 * containing a bit.
584 */
Shinya Kuribayashi47f6a362009-05-16 09:12:09 +0900585static __inline__ int find_first_zero_bit (void *addr, unsigned size)
wdenk6069ff22003-02-28 00:49:47 +0000586{
587 unsigned long dummy;
588 int res;
589
590 if (!size)
591 return 0;
592
593 __asm__ (".set\tnoreorder\n\t"
594 ".set\tnoat\n"
595 "1:\tsubu\t$1,%6,%0\n\t"
596 "blez\t$1,2f\n\t"
597 "lw\t$1,(%5)\n\t"
598 "addiu\t%5,4\n\t"
599#if (_MIPS_ISA == _MIPS_ISA_MIPS2 ) || (_MIPS_ISA == _MIPS_ISA_MIPS3 ) || \
600 (_MIPS_ISA == _MIPS_ISA_MIPS4 ) || (_MIPS_ISA == _MIPS_ISA_MIPS5 ) || \
601 (_MIPS_ISA == _MIPS_ISA_MIPS32) || (_MIPS_ISA == _MIPS_ISA_MIPS64)
602 "beql\t%1,$1,1b\n\t"
603 "addiu\t%0,32\n\t"
604#else
605 "addiu\t%0,32\n\t"
606 "beq\t%1,$1,1b\n\t"
607 "nop\n\t"
608 "subu\t%0,32\n\t"
609#endif
610#ifdef __MIPSEB__
611#error "Fix this for big endian"
612#endif /* __MIPSEB__ */
613 "li\t%1,1\n"
614 "1:\tand\t%2,$1,%1\n\t"
615 "beqz\t%2,2f\n\t"
616 "sll\t%1,%1,1\n\t"
617 "bnez\t%1,1b\n\t"
618 "add\t%0,%0,1\n\t"
619 ".set\tat\n\t"
620 ".set\treorder\n"
621 "2:"
622 : "=r" (res), "=r" (dummy), "=r" (addr)
623 : "0" ((signed int) 0), "1" ((unsigned int) 0xffffffff),
624 "2" (addr), "r" (size)
625 : "$1");
626
627 return res;
628}
629
630/*
631 * find_next_zero_bit - find the first zero bit in a memory region
632 * @addr: The address to base the search on
633 * @offset: The bitnumber to start searching at
634 * @size: The maximum size to search
635 */
Shinya Kuribayashi47f6a362009-05-16 09:12:09 +0900636static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
wdenk6069ff22003-02-28 00:49:47 +0000637{
638 unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
639 int set = 0, bit = offset & 31, res;
640 unsigned long dummy;
wdenk8bde7f72003-06-27 21:31:46 +0000641
wdenk6069ff22003-02-28 00:49:47 +0000642 if (bit) {
643 /*
644 * Look for zero in first byte
645 */
646#ifdef __MIPSEB__
647#error "Fix this for big endian byte order"
648#endif
649 __asm__(".set\tnoreorder\n\t"
650 ".set\tnoat\n"
651 "1:\tand\t$1,%4,%1\n\t"
652 "beqz\t$1,1f\n\t"
653 "sll\t%1,%1,1\n\t"
654 "bnez\t%1,1b\n\t"
655 "addiu\t%0,1\n\t"
656 ".set\tat\n\t"
657 ".set\treorder\n"
658 "1:"
659 : "=r" (set), "=r" (dummy)
660 : "0" (0), "1" (1 << bit), "r" (*p)
661 : "$1");
662 if (set < (32 - bit))
663 return set + offset;
664 set = 32 - bit;
665 p++;
666 }
667 /*
668 * No zero yet, search remaining full bytes for a zero
669 */
670 res = find_first_zero_bit(p, size - 32 * (p - (unsigned int *) addr));
671 return offset + set + res;
672}
673
674#endif /* !(__MIPSEB__) */
675
676/*
677 * ffz - find first zero in word.
678 * @word: The word to search
679 *
680 * Undefined if no zero exists, so code should check against ~0UL first.
681 */
Shinya Kuribayashi47f6a362009-05-16 09:12:09 +0900682static __inline__ unsigned long ffz(unsigned long word)
wdenk6069ff22003-02-28 00:49:47 +0000683{
684 unsigned int __res;
685 unsigned int mask = 1;
686
687 __asm__ (
688 ".set\tnoreorder\n\t"
689 ".set\tnoat\n\t"
690 "move\t%0,$0\n"
691 "1:\tand\t$1,%2,%1\n\t"
692 "beqz\t$1,2f\n\t"
693 "sll\t%1,1\n\t"
694 "bnez\t%1,1b\n\t"
695 "addiu\t%0,1\n\t"
696 ".set\tat\n\t"
697 ".set\treorder\n"
698 "2:\n\t"
699 : "=&r" (__res), "=r" (mask)
700 : "r" (word), "1" (mask)
701 : "$1");
702
703 return __res;
704}
705
706#ifdef __KERNEL__
707
708/**
709 * ffs - find first bit set
710 * @x: the word to search
711 *
712 * This is defined the same way as
713 * the libc and compiler builtin ffs routines, therefore
714 * differs in spirit from the above ffz (man ffs).
715 */
716
717#define ffs(x) generic_ffs(x)
718
719/*
720 * hweightN - returns the hamming weight of a N-bit word
721 * @x: the word to weigh
722 *
723 * The Hamming Weight of a number is the total number of bits set in it.
724 */
725
726#define hweight32(x) generic_hweight32(x)
727#define hweight16(x) generic_hweight16(x)
728#define hweight8(x) generic_hweight8(x)
729
730#endif /* __KERNEL__ */
731
732#ifdef __MIPSEB__
733/*
734 * find_next_zero_bit - find the first zero bit in a memory region
735 * @addr: The address to base the search on
736 * @offset: The bitnumber to start searching at
737 * @size: The maximum size to search
738 */
Shinya Kuribayashi47f6a362009-05-16 09:12:09 +0900739static __inline__ int find_next_zero_bit(void *addr, int size, int offset)
wdenk6069ff22003-02-28 00:49:47 +0000740{
741 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
742 unsigned long result = offset & ~31UL;
743 unsigned long tmp;
744
745 if (offset >= size)
746 return size;
747 size -= result;
748 offset &= 31UL;
749 if (offset) {
750 tmp = *(p++);
751 tmp |= ~0UL >> (32-offset);
752 if (size < 32)
753 goto found_first;
754 if (~tmp)
755 goto found_middle;
756 size -= 32;
757 result += 32;
758 }
759 while (size & ~31UL) {
760 if (~(tmp = *(p++)))
761 goto found_middle;
762 result += 32;
763 size -= 32;
764 }
765 if (!size)
766 return result;
767 tmp = *p;
768
769found_first:
770 tmp |= ~0UL << size;
771found_middle:
772 return result + ffz(tmp);
773}
774
775/* Linus sez that gcc can optimize the following correctly, we'll see if this
776 * holds on the Sparc as it does for the ALPHA.
777 */
778
779#if 0 /* Fool kernel-doc since it doesn't do macros yet */
780/*
781 * find_first_zero_bit - find the first zero bit in a memory region
782 * @addr: The address to start the search at
783 * @size: The maximum size to search
784 *
785 * Returns the bit-number of the first zero bit, not the number of the byte
786 * containing a bit.
787 */
Shinya Kuribayashi47f6a362009-05-16 09:12:09 +0900788static int find_first_zero_bit (void *addr, unsigned size);
wdenk6069ff22003-02-28 00:49:47 +0000789#endif
790
791#define find_first_zero_bit(addr, size) \
wdenk8bde7f72003-06-27 21:31:46 +0000792 find_next_zero_bit((addr), (size), 0)
wdenk6069ff22003-02-28 00:49:47 +0000793
794#endif /* (__MIPSEB__) */
795
796/* Now for the ext2 filesystem bit operations and helper routines. */
797
798#ifdef __MIPSEB__
Shinya Kuribayashi47f6a362009-05-16 09:12:09 +0900799static __inline__ int ext2_set_bit(int nr, void * addr)
wdenk6069ff22003-02-28 00:49:47 +0000800{
801 int mask, retval, flags;
802 unsigned char *ADDR = (unsigned char *) addr;
803
804 ADDR += nr >> 3;
805 mask = 1 << (nr & 0x07);
806 save_and_cli(flags);
807 retval = (mask & *ADDR) != 0;
808 *ADDR |= mask;
809 restore_flags(flags);
810 return retval;
811}
812
Shinya Kuribayashi47f6a362009-05-16 09:12:09 +0900813static __inline__ int ext2_clear_bit(int nr, void * addr)
wdenk6069ff22003-02-28 00:49:47 +0000814{
815 int mask, retval, flags;
816 unsigned char *ADDR = (unsigned char *) addr;
817
818 ADDR += nr >> 3;
819 mask = 1 << (nr & 0x07);
820 save_and_cli(flags);
821 retval = (mask & *ADDR) != 0;
822 *ADDR &= ~mask;
823 restore_flags(flags);
824 return retval;
825}
826
Shinya Kuribayashi47f6a362009-05-16 09:12:09 +0900827static __inline__ int ext2_test_bit(int nr, const void * addr)
wdenk6069ff22003-02-28 00:49:47 +0000828{
829 int mask;
830 const unsigned char *ADDR = (const unsigned char *) addr;
831
832 ADDR += nr >> 3;
833 mask = 1 << (nr & 0x07);
834 return ((mask & *ADDR) != 0);
835}
836
837#define ext2_find_first_zero_bit(addr, size) \
wdenk8bde7f72003-06-27 21:31:46 +0000838 ext2_find_next_zero_bit((addr), (size), 0)
wdenk6069ff22003-02-28 00:49:47 +0000839
Shinya Kuribayashi47f6a362009-05-16 09:12:09 +0900840static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
wdenk6069ff22003-02-28 00:49:47 +0000841{
842 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
843 unsigned long result = offset & ~31UL;
844 unsigned long tmp;
845
846 if (offset >= size)
847 return size;
848 size -= result;
849 offset &= 31UL;
850 if(offset) {
851 /* We hold the little endian value in tmp, but then the
852 * shift is illegal. So we could keep a big endian value
853 * in tmp, like this:
854 *
855 * tmp = __swab32(*(p++));
856 * tmp |= ~0UL >> (32-offset);
857 *
858 * but this would decrease preformance, so we change the
859 * shift:
860 */
861 tmp = *(p++);
862 tmp |= __swab32(~0UL >> (32-offset));
863 if(size < 32)
864 goto found_first;
865 if(~tmp)
866 goto found_middle;
867 size -= 32;
868 result += 32;
869 }
870 while(size & ~31UL) {
871 if(~(tmp = *(p++)))
872 goto found_middle;
873 result += 32;
874 size -= 32;
875 }
876 if(!size)
877 return result;
878 tmp = *p;
879
880found_first:
881 /* tmp is little endian, so we would have to swab the shift,
882 * see above. But then we have to swab tmp below for ffz, so
883 * we might as well do this here.
884 */
885 return result + ffz(__swab32(tmp) | (~0UL << size));
886found_middle:
887 return result + ffz(__swab32(tmp));
888}
889#else /* !(__MIPSEB__) */
890
891/* Native ext2 byte ordering, just collapse using defines. */
892#define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr))
893#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr))
894#define ext2_test_bit(nr, addr) test_bit((nr), (addr))
895#define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size))
896#define ext2_find_next_zero_bit(addr, size, offset) \
wdenk8bde7f72003-06-27 21:31:46 +0000897 find_next_zero_bit((addr), (size), (offset))
898
wdenk6069ff22003-02-28 00:49:47 +0000899#endif /* !(__MIPSEB__) */
900
901/*
902 * Bitmap functions for the minix filesystem.
903 * FIXME: These assume that Minix uses the native byte/bitorder.
904 * This limits the Minix filesystem's value for data exchange very much.
905 */
906#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
907#define minix_set_bit(nr,addr) set_bit(nr,addr)
908#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
909#define minix_test_bit(nr,addr) test_bit(nr,addr)
910#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
911
912#endif /* _ASM_BITOPS_H */