blob: eda887fb62fb0d70cf37dab55cd2e0811cfd6e67 [file] [log] [blame]
Wolfgang Denk97caf672006-03-12 02:12:27 +01001/*
2 * U-boot - system.h
3 *
Aubrey Li314d22f2007-04-05 18:31:18 +08004 * Copyright (c) 2005-2007 Analog Devices Inc.
Wolfgang Denk97caf672006-03-12 02:12:27 +01005 *
6 * See file CREDITS for list of people who contributed to this
7 * project.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of
12 * the License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
Aubrey Li314d22f2007-04-05 18:31:18 +080021 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
22 * MA 02110-1301 USA
Wolfgang Denk97caf672006-03-12 02:12:27 +010023 */
24
25#ifndef _BLACKFIN_SYSTEM_H
26#define _BLACKFIN_SYSTEM_H
27
28#include <linux/config.h> /* get configuration macros */
29#include <asm/linkage.h>
30#include <asm/blackfin.h>
31#include <asm/segment.h>
32#include <asm/entry.h>
33
34#define prepare_to_switch() do { } while(0)
35
36/*
37 * switch_to(n) should switch tasks to task ptr, first checking that
38 * ptr isn't the current task, in which case it does nothing. This
39 * also clears the TS-flag if the task we switched to has used the
40 * math co-processor latest.
41 *
42 * 05/25/01 - Tony Kou (tonyko@lineo.ca)
43 *
44 * Adapted for BlackFin (ADI) by Ted Ma, Metrowerks, and Motorola GSG
45 * Copyright (c) 2002 Arcturus Networks Inc. (www.arcturusnetworks.com)
46 * Copyright (c) 2003 Metrowerks (www.metrowerks.com)
47 */
48
49asmlinkage void resume(void);
50
51#define switch_to(prev,next,last) { \
52 void *_last; \
53 __asm__ __volatile__( \
54 "r0 = %1;\n\t" \
55 "r1 = %2;\n\t" \
56 "call resume;\n\t" \
57 "%0 = r0;\n\t" \
58 : "=d" (_last) \
59 : "d" (prev), \
60 "d" (next) \
61 : "CC", "R0", "R1", "R2", "R3", "R4", "R5", "P0", "P1");\
62 (last) = _last; \
63}
64
65/* Force kerenl switch to user mode -- Steven Chen */
66#define switch_to_user_mode() { \
67 __asm__ __volatile__( \
68 "call kernel_to_user_mode;\n\t" \
69 :: \
70 : "CC", "R0", "R1", "R2", "R3", "R4", "R5", "P0", "P1");\
71}
72
73/*
74 * Interrupt configuring macros.
75 */
76
77extern int irq_flags;
78
79#define __sti() { \
80 __asm__ __volatile__ ( \
81 "r3 = %0;" \
82 "sti r3;" \
83 ::"m"(irq_flags):"R3"); \
84}
85
86#define __cli() { \
87 __asm__ __volatile__ ( \
88 "cli r3;" \
89 :::"R3"); \
90}
91
92#define __save_flags(x) { \
93 __asm__ __volatile__ ( \
94 "cli r3;" \
95 "%0 = r3;" \
96 "sti r3;" \
97 ::"m"(x):"R3"); \
98}
99
100#define __save_and_cli(x) { \
101 __asm__ __volatile__ ( \
102 "cli r3;" \
103 "%0 = r3;" \
104 ::"m"(x):"R3"); \
105}
106
107#define __restore_flags(x) { \
108 __asm__ __volatile__ ( \
109 "r3 = %0;" \
110 "sti r3;" \
111 ::"m"(x):"R3"); \
112}
113
114/* For spinlocks etc */
115#define local_irq_save(x) __save_and_cli(x)
116#define local_irq_restore(x) __restore_flags(x)
117#define local_irq_disable() __cli()
118#define local_irq_enable() __sti()
119
120#define cli() __cli()
121#define sti() __sti()
122#define save_flags(x) __save_flags(x)
123#define restore_flags(x) __restore_flags(x)
124#define save_and_cli(x) __save_and_cli(x)
125
126/*
127 * Force strict CPU ordering.
128 */
129#define nop() asm volatile ("nop;\n\t"::)
130#define mb() asm volatile ("" : : :"memory")
131#define rmb() asm volatile ("" : : :"memory")
132#define wmb() asm volatile ("" : : :"memory")
133#define set_rmb(var, value) do { xchg(&var, value); } while (0)
134#define set_mb(var, value) set_rmb(var, value)
135#define set_wmb(var, value) do { var = value; wmb(); } while (0)
136
137#ifdef CONFIG_SMP
138#define smp_mb() mb()
139#define smp_rmb() rmb()
140#define smp_wmb() wmb()
141#else
142#define smp_mb() barrier()
143#define smp_rmb() barrier()
144#define smp_wmb() barrier()
145#endif
146
147#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
148#define tas(ptr) (xchg((ptr),1))
149
150struct __xchg_dummy {
151 unsigned long a[100];
152};
153#define __xg(x) ((volatile struct __xchg_dummy *)(x))
154
155static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
156 int size)
157{
158 unsigned long tmp;
159 unsigned long flags = 0;
160
161 save_and_cli(flags);
162
163 switch (size) {
164 case 1:
165 __asm__ __volatile__("%0 = %2;\n\t" "%2 = %1;\n\t": "=&d"(tmp): "d"(x), "m"(*__xg(ptr)):"memory");
166 break;
167 case 2:
168 __asm__ __volatile__("%0 = %2;\n\t" "%2 = %1;\n\t": "=&d"(tmp): "d"(x), "m"(*__xg(ptr)):"memory");
169 break;
170 case 4:
171 __asm__ __volatile__("%0 = %2;\n\t" "%2 = %1;\n\t": "=&d"(tmp): "d"(x), "m"(*__xg(ptr)):"memory");
172 break;
173 }
174 restore_flags(flags);
175 return tmp;
176}
177
178/* Depend on whether Blackfin has hard reset function */
179/* YES it does, but it is tricky to implement - FIXME later ...MaTed--- */
180#define HARD_RESET_NOW() ({})
181
182#endif /* _BLACKFIN_SYSTEM_H */