blob: b4cfa68ca38ed8787cbb37455f9bbe491ba5f10b [file] [log] [blame]
wdenkb783eda2003-06-25 22:26:29 +00001/*
2 * linux/include/asm-arm/proc-armv/system.h
3 *
4 * Copyright (C) 1996 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef __ASM_PROC_SYSTEM_H
11#define __ASM_PROC_SYSTEM_H
12
13#include <linux/config.h>
14
wdenkb783eda2003-06-25 22:26:29 +000015/*
16 * Save the current interrupt enable state & disable IRQs
17 */
18#define local_irq_save(x) \
19 ({ \
20 unsigned long temp; \
21 __asm__ __volatile__( \
22 "mrs %0, cpsr @ local_irq_save\n" \
23" orr %1, %0, #128\n" \
24" msr cpsr_c, %1" \
25 : "=r" (x), "=r" (temp) \
26 : \
27 : "memory"); \
28 })
wdenk8bde7f72003-06-27 21:31:46 +000029
wdenkb783eda2003-06-25 22:26:29 +000030/*
31 * Enable IRQs
32 */
33#define local_irq_enable() \
34 ({ \
35 unsigned long temp; \
36 __asm__ __volatile__( \
37 "mrs %0, cpsr @ local_irq_enable\n" \
38" bic %0, %0, #128\n" \
39" msr cpsr_c, %0" \
40 : "=r" (temp) \
41 : \
42 : "memory"); \
43 })
44
45/*
46 * Disable IRQs
47 */
48#define local_irq_disable() \
49 ({ \
50 unsigned long temp; \
51 __asm__ __volatile__( \
52 "mrs %0, cpsr @ local_irq_disable\n" \
53" orr %0, %0, #128\n" \
54" msr cpsr_c, %0" \
55 : "=r" (temp) \
56 : \
57 : "memory"); \
58 })
59
60/*
61 * Enable FIQs
62 */
63#define __stf() \
64 ({ \
65 unsigned long temp; \
66 __asm__ __volatile__( \
67 "mrs %0, cpsr @ stf\n" \
68" bic %0, %0, #64\n" \
69" msr cpsr_c, %0" \
70 : "=r" (temp) \
71 : \
72 : "memory"); \
73 })
74
75/*
76 * Disable FIQs
77 */
78#define __clf() \
79 ({ \
80 unsigned long temp; \
81 __asm__ __volatile__( \
82 "mrs %0, cpsr @ clf\n" \
83" orr %0, %0, #64\n" \
84" msr cpsr_c, %0" \
85 : "=r" (temp) \
86 : \
87 : "memory"); \
88 })
89
90/*
91 * Save the current interrupt enable state.
92 */
93#define local_save_flags(x) \
94 ({ \
95 __asm__ __volatile__( \
96 "mrs %0, cpsr @ local_save_flags\n" \
97 : "=r" (x) \
98 : \
99 : "memory"); \
100 })
101
102/*
103 * restore saved IRQ & FIQ state
104 */
105#define local_irq_restore(x) \
106 __asm__ __volatile__( \
107 "msr cpsr_c, %0 @ local_irq_restore\n" \
108 : \
109 : "r" (x) \
110 : "memory")
111
112#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
113/*
114 * On the StrongARM, "swp" is terminally broken since it bypasses the
115 * cache totally. This means that the cache becomes inconsistent, and,
116 * since we use normal loads/stores as well, this is really bad.
117 * Typically, this causes oopsen in filp_close, but could have other,
118 * more disasterous effects. There are two work-arounds:
119 * 1. Disable interrupts and emulate the atomic swap
120 * 2. Clean the cache, perform atomic swap, flush the cache
121 *
122 * We choose (1) since its the "easiest" to achieve here and is not
123 * dependent on the processor type.
124 */
125#define swp_is_buggy
126#endif
127
128static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
129{
130 extern void __bad_xchg(volatile void *, int);
131 unsigned long ret;
132#ifdef swp_is_buggy
133 unsigned long flags;
134#endif
135
136 switch (size) {
137#ifdef swp_is_buggy
138 case 1:
139 local_irq_save(flags);
140 ret = *(volatile unsigned char *)ptr;
141 *(volatile unsigned char *)ptr = x;
142 local_irq_restore(flags);
143 break;
144
145 case 4:
146 local_irq_save(flags);
147 ret = *(volatile unsigned long *)ptr;
148 *(volatile unsigned long *)ptr = x;
149 local_irq_restore(flags);
150 break;
151#else
152 case 1: __asm__ __volatile__ ("swpb %0, %1, [%2]"
153 : "=&r" (ret)
154 : "r" (x), "r" (ptr)
155 : "memory");
156 break;
157 case 4: __asm__ __volatile__ ("swp %0, %1, [%2]"
158 : "=&r" (ret)
159 : "r" (x), "r" (ptr)
160 : "memory");
161 break;
162#endif
163 default: __bad_xchg(ptr, size), ret = 0;
164 }
165
166 return ret;
167}
168
169#endif