1 | /* |
---|
2 | This file is part of MutekP. |
---|
3 | |
---|
4 | MutekP is free software; you can redistribute it and/or modify it |
---|
5 | under the terms of the GNU General Public License as published by |
---|
6 | the Free Software Foundation; either version 2 of the License, or |
---|
7 | (at your option) any later version. |
---|
8 | |
---|
9 | MutekP is distributed in the hope that it will be useful, but |
---|
10 | WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
---|
12 | General Public License for more details. |
---|
13 | |
---|
14 | You should have received a copy of the GNU General Public License |
---|
15 | along with MutekP; if not, write to the Free Software Foundation, |
---|
16 | Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
---|
17 | |
---|
18 | UPMC / LIP6 / SOC (c) 2009 |
---|
19 | Copyright Ghassan Almaless <ghassan.almaless@gmail.com> |
---|
20 | */ |
---|
21 | |
---|
22 | #ifndef _CPU_SYSCALL_H_ |
---|
23 | #define _CPU_SYSCALL_H_ |
---|
24 | |
---|
25 | #include <sys/types.h> |
---|
26 | |
---|
27 | |
---|
28 | static inline void cpu_set_tls(void *ptr) |
---|
29 | { |
---|
30 | asm volatile ("or $27, $0, %0" :: "r" ((unsigned long)ptr)); |
---|
31 | } |
---|
32 | |
---|
33 | static inline void* cpu_get_tls(void) |
---|
34 | { |
---|
35 | register unsigned long ptr; |
---|
36 | asm volatile ("or %0, $0, $27" :"=&r" (ptr)); |
---|
37 | return (void*) ptr; |
---|
38 | } |
---|
39 | |
---|
40 | static inline void cpu_invalid_dcache_line(void *ptr) |
---|
41 | { |
---|
42 | __asm__ volatile |
---|
43 | ("cache %0, (%1) \n" |
---|
44 | "sync \n" |
---|
45 | : : "i" (0x11) , "r" (ptr) |
---|
46 | ); |
---|
47 | } |
---|
48 | |
---|
49 | static inline bool_t cpu_atomic_cas(void *ptr, sint_t old, sint_t new) |
---|
50 | { |
---|
51 | bool_t isAtomic; |
---|
52 | |
---|
53 | __asm__ volatile |
---|
54 | (".set noreorder \n" |
---|
55 | "sync \n" |
---|
56 | "or $8, $0, %3 \n" |
---|
57 | "lw $3, (%1) \n" |
---|
58 | "bne $3, %2, 1f \n" |
---|
59 | "li %0, 0 \n" |
---|
60 | "ll $3, (%1) \n" |
---|
61 | "bne $3, %2, 1f \n" |
---|
62 | "li %0, 0 \n" |
---|
63 | "sc $8, (%1) \n" |
---|
64 | "or %0, $8, $0 \n" |
---|
65 | "sync \n" |
---|
66 | ".set reorder \n" |
---|
67 | "1: \n" |
---|
68 | : "=&r" (isAtomic): "r" (ptr), "r" (old) , "r" (new) : "$3", "$8" |
---|
69 | ); |
---|
70 | |
---|
71 | return isAtomic; |
---|
72 | } |
---|
73 | |
---|
74 | static inline sint_t cpu_atomic_add(void *ptr, sint_t val) |
---|
75 | { |
---|
76 | sint_t current; |
---|
77 | |
---|
78 | __asm__ volatile |
---|
79 | ("1: \n" |
---|
80 | "ll %0, (%1) \n" |
---|
81 | "addu $3, %0, %2 \n" |
---|
82 | "sc $3, (%1) \n" |
---|
83 | "beq $3, $0, 1b \n" |
---|
84 | "sync \n" |
---|
85 | "cache 0x11, (%1) \n" |
---|
86 | :"=&r"(current) : "r" (ptr), "r" (val) : "$3" |
---|
87 | ); |
---|
88 | |
---|
89 | return current; |
---|
90 | } |
---|
91 | |
---|
92 | static inline bool_t cpu_spinlock_trylock(void *lock) |
---|
93 | { |
---|
94 | register uint_t retval; |
---|
95 | |
---|
96 | retval = false; |
---|
97 | |
---|
98 | //if(*((volatile uint_t *)lock) == 0) |
---|
99 | retval = cpu_atomic_cas(lock, 0, 1); |
---|
100 | |
---|
101 | if(retval == false) return true; |
---|
102 | |
---|
103 | cpu_invalid_dcache_line(lock); |
---|
104 | return false; |
---|
105 | } |
---|
106 | |
---|
107 | static inline void cpu_spinlock_lock(void *lock) |
---|
108 | { |
---|
109 | register bool_t retval; |
---|
110 | |
---|
111 | while(1) |
---|
112 | { |
---|
113 | if(*((volatile uint_t *)lock) == 0) |
---|
114 | { |
---|
115 | retval = cpu_atomic_cas(lock, 0, 1); |
---|
116 | if(retval == true) break; |
---|
117 | } |
---|
118 | } |
---|
119 | |
---|
120 | cpu_invalid_dcache_line(lock); |
---|
121 | } |
---|
122 | |
---|
123 | static inline uint_t cpu_load_word(void *ptr) |
---|
124 | { |
---|
125 | register uint_t val; |
---|
126 | |
---|
127 | __asm__ volatile |
---|
128 | ("lw %0, 0(%1) \n" |
---|
129 | : "=&r" (val) : "r"(ptr)); |
---|
130 | |
---|
131 | return val; |
---|
132 | } |
---|
133 | |
---|
134 | static inline void cpu_active_wait(uint_t val) |
---|
135 | { |
---|
136 | __asm__ volatile |
---|
137 | ("1: \n" |
---|
138 | "addiu $2, %0, -1 \n" |
---|
139 | "bne $2, $0, 1b \n" |
---|
140 | "nop \n" |
---|
141 | :: "r"(val) : "$2"); |
---|
142 | } |
---|
143 | |
---|
144 | static inline void cpu_wbflush(void) |
---|
145 | { |
---|
146 | __asm__ volatile |
---|
147 | ("sync \n"::); |
---|
148 | } |
---|
149 | |
---|
150 | #if 0 |
---|
151 | /* Try to take a spinlock */ |
---|
152 | static inline bool_t cpu_spinlock_trylock (void *lock) |
---|
153 | { |
---|
154 | register bool_t state = 0; |
---|
155 | |
---|
156 | __asm__ volatile |
---|
157 | ("or $2, $0, %1 \n" |
---|
158 | "1: \n" |
---|
159 | "ll $3, ($2) \n" |
---|
160 | ".set noreorder \n" |
---|
161 | "beq $3, $0, 2f \n" |
---|
162 | "or %0, $0, $3 \n" |
---|
163 | "j 3f \n" |
---|
164 | "2: \n" |
---|
165 | "ori $3, $0, 1 \n" |
---|
166 | "sc $3, ($2) \n" |
---|
167 | "nop \n" |
---|
168 | "beq $3, $0, 1b \n" |
---|
169 | "sync \n" |
---|
170 | "3: \n" |
---|
171 | ".set reorder \n" |
---|
172 | : "=&r" (state) : "r" (lock) : "$2","$3" |
---|
173 | ); |
---|
174 | |
---|
175 | return state; |
---|
176 | } |
---|
177 | #endif |
---|
178 | |
---|
179 | /* Unlock a spinlock */ |
---|
180 | static inline void cpu_spinlock_unlock (void *lock) |
---|
181 | { |
---|
182 | __asm__ volatile |
---|
183 | ("or $2, $0, %0 \n" |
---|
184 | "sync \n" |
---|
185 | "sw $0, 0($2) \n" |
---|
186 | "sync \n": : "r" (lock) : "$2" |
---|
187 | ); |
---|
188 | } |
---|
189 | |
---|
190 | |
---|
191 | /* System call */ |
---|
192 | void* cpu_syscall(void *arg0, void *arg1, void *arg2, void *arg3, int service_nr); |
---|
193 | |
---|
194 | #define CPU_SET_GP(val) \ |
---|
195 | asm volatile ("add $28, $0, %0": :"r" ((val))) |
---|
196 | |
---|
197 | |
---|
198 | #endif /* _CPU_SYSCALL_H_ */ |
---|