blob: cda8976b6a254877f2c19c6de70b040ddcb60d63 [file] [log] [blame]
wdenkbb2d9272003-06-25 22:26:29 +00001/*
2 * linux/include/asm-arm/proc-armv/system.h
3 *
4 * Copyright (C) 1996 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef __ASM_PROC_SYSTEM_H
11#define __ASM_PROC_SYSTEM_H
12
wdenkbb2d9272003-06-25 22:26:29 +000013/*
14 * Save the current interrupt enable state & disable IRQs
15 */
16#define local_irq_save(x) \
17 ({ \
18 unsigned long temp; \
19 __asm__ __volatile__( \
20 "mrs %0, cpsr @ local_irq_save\n" \
21" orr %1, %0, #128\n" \
22" msr cpsr_c, %1" \
23 : "=r" (x), "=r" (temp) \
24 : \
25 : "memory"); \
26 })
wdenk57b2d802003-06-27 21:31:46 +000027
wdenkbb2d9272003-06-25 22:26:29 +000028/*
29 * Enable IRQs
30 */
31#define local_irq_enable() \
32 ({ \
33 unsigned long temp; \
34 __asm__ __volatile__( \
35 "mrs %0, cpsr @ local_irq_enable\n" \
36" bic %0, %0, #128\n" \
37" msr cpsr_c, %0" \
38 : "=r" (temp) \
39 : \
40 : "memory"); \
41 })
42
43/*
44 * Disable IRQs
45 */
46#define local_irq_disable() \
47 ({ \
48 unsigned long temp; \
49 __asm__ __volatile__( \
50 "mrs %0, cpsr @ local_irq_disable\n" \
51" orr %0, %0, #128\n" \
52" msr cpsr_c, %0" \
53 : "=r" (temp) \
54 : \
55 : "memory"); \
56 })
57
58/*
59 * Enable FIQs
60 */
61#define __stf() \
62 ({ \
63 unsigned long temp; \
64 __asm__ __volatile__( \
65 "mrs %0, cpsr @ stf\n" \
66" bic %0, %0, #64\n" \
67" msr cpsr_c, %0" \
68 : "=r" (temp) \
69 : \
70 : "memory"); \
71 })
72
73/*
74 * Disable FIQs
75 */
76#define __clf() \
77 ({ \
78 unsigned long temp; \
79 __asm__ __volatile__( \
80 "mrs %0, cpsr @ clf\n" \
81" orr %0, %0, #64\n" \
82" msr cpsr_c, %0" \
83 : "=r" (temp) \
84 : \
85 : "memory"); \
86 })
87
88/*
89 * Save the current interrupt enable state.
90 */
91#define local_save_flags(x) \
92 ({ \
93 __asm__ __volatile__( \
94 "mrs %0, cpsr @ local_save_flags\n" \
95 : "=r" (x) \
96 : \
97 : "memory"); \
98 })
99
100/*
101 * restore saved IRQ & FIQ state
102 */
103#define local_irq_restore(x) \
104 __asm__ __volatile__( \
105 "msr cpsr_c, %0 @ local_irq_restore\n" \
106 : \
107 : "r" (x) \
108 : "memory")
109
110#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
111/*
112 * On the StrongARM, "swp" is terminally broken since it bypasses the
113 * cache totally. This means that the cache becomes inconsistent, and,
114 * since we use normal loads/stores as well, this is really bad.
115 * Typically, this causes oopsen in filp_close, but could have other,
116 * more disasterous effects. There are two work-arounds:
117 * 1. Disable interrupts and emulate the atomic swap
118 * 2. Clean the cache, perform atomic swap, flush the cache
119 *
120 * We choose (1) since its the "easiest" to achieve here and is not
121 * dependent on the processor type.
122 */
123#define swp_is_buggy
124#endif
125
126static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
127{
128 extern void __bad_xchg(volatile void *, int);
129 unsigned long ret;
130#ifdef swp_is_buggy
131 unsigned long flags;
132#endif
133
134 switch (size) {
135#ifdef swp_is_buggy
136 case 1:
137 local_irq_save(flags);
138 ret = *(volatile unsigned char *)ptr;
139 *(volatile unsigned char *)ptr = x;
140 local_irq_restore(flags);
141 break;
142
143 case 4:
144 local_irq_save(flags);
145 ret = *(volatile unsigned long *)ptr;
146 *(volatile unsigned long *)ptr = x;
147 local_irq_restore(flags);
148 break;
149#else
150 case 1: __asm__ __volatile__ ("swpb %0, %1, [%2]"
151 : "=&r" (ret)
152 : "r" (x), "r" (ptr)
153 : "memory");
154 break;
155 case 4: __asm__ __volatile__ ("swp %0, %1, [%2]"
156 : "=&r" (ret)
157 : "r" (x), "r" (ptr)
158 : "memory");
159 break;
160#endif
161 default: __bad_xchg(ptr, size), ret = 0;
162 }
163
164 return ret;
165}
166
167#endif