| [1] | 1 | /* Copyright (c) 2007-2009, Stanford University | 
|---|
 | 2 | * All rights reserved. | 
|---|
 | 3 | * | 
|---|
 | 4 | * Redistribution and use in source and binary forms, with or without | 
|---|
 | 5 | * modification, are permitted provided that the following conditions are met: | 
|---|
 | 6 | *     * Redistributions of source code must retain the above copyright | 
|---|
 | 7 | *       notice, this list of conditions and the following disclaimer. | 
|---|
 | 8 | *     * Redistributions in binary form must reproduce the above copyright | 
|---|
 | 9 | *       notice, this list of conditions and the following disclaimer in the | 
|---|
 | 10 | *       documentation and/or other materials provided with the distribution. | 
|---|
 | 11 | *     * Neither the name of Stanford University nor the names of its  | 
|---|
 | 12 | *       contributors may be used to endorse or promote products derived from  | 
|---|
 | 13 | *       this software without specific prior written permission. | 
|---|
 | 14 | * | 
|---|
 | 15 | * THIS SOFTWARE IS PROVIDED BY STANFORD UNIVERSITY ``AS IS'' AND ANY | 
|---|
 | 16 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | 
|---|
 | 17 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | 
|---|
 | 18 | * DISCLAIMED. IN NO EVENT SHALL STANFORD UNIVERSITY BE LIABLE FOR ANY | 
|---|
 | 19 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | 
|---|
 | 20 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | 
|---|
 | 21 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | 
|---|
 | 22 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 
|---|
 | 23 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | 
|---|
 | 24 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 
|---|
 | 25 | */  | 
|---|
 | 26 |  | 
|---|
 | 27 | /** | 
|---|
 | 28 |  * define __x86_64__ for x86-64 | 
|---|
 | 29 |  * define CPU_V9 for sparc | 
|---|
 | 30 |  */ | 
|---|
 | 31 | #ifndef ATOMIC_H | 
|---|
 | 32 | #define ATOMIC_H | 
|---|
 | 33 |  | 
|---|
 | 34 | #include <stdint.h> | 
|---|
 | 35 | #include <cpu-syscall.h> | 
|---|
 | 36 |  | 
|---|
 | 37 | static inline void spin_wait(int n) | 
|---|
 | 38 | { | 
|---|
 | 39 |   volatile int tmp = n; | 
|---|
 | 40 |    | 
|---|
 | 41 |   while(tmp > 0) | 
|---|
 | 42 |   { | 
|---|
 | 43 |     tmp--; | 
|---|
 | 44 |     cpu_wbflush(); | 
|---|
 | 45 |   } | 
|---|
 | 46 | } | 
|---|
 | 47 |  | 
|---|
 | 48 | #define set_and_flush(x,y)              \ | 
|---|
 | 49 |   do {                                  \ | 
|---|
 | 50 |     void*       p;                      \ | 
|---|
 | 51 |     p = &(x);                           \ | 
|---|
 | 52 |     cpu_wbflush();                      \ | 
|---|
 | 53 |     (x) = (y);                          \ | 
|---|
 | 54 |     flush(p);                           \ | 
|---|
 | 55 |   } while (0) | 
|---|
 | 56 |  | 
|---|
 | 57 | /* a bunch of atomic ops follow... */ | 
|---|
 | 58 |  | 
|---|
 | 59 | static inline void flush(void* addr) | 
|---|
 | 60 | { | 
|---|
 | 61 |   __asm__ volatile | 
|---|
 | 62 |     ("cache    %0,     (%1)              \n" | 
|---|
 | 63 |      "sync                               \n" | 
|---|
 | 64 |      : : "i" (0x11) , "r" (addr) | 
|---|
 | 65 |     ); | 
|---|
 | 66 | } | 
|---|
 | 67 |  | 
|---|
 | 68 | static inline uintptr_t atomic_read(void* addr) { return cpu_load_word(addr); } | 
|---|
 | 69 |  | 
|---|
 | 70 |  | 
|---|
 | 71 | /* returns zero if already set, returns nonzero if not set */ | 
|---|
 | 72 | /* we don't use bts because it is super-slow */ | 
|---|
 | 73 |  | 
|---|
 | 74 |  | 
|---|
 | 75 | /* adds 1 to value pointed to in 'n', returns old value */ | 
|---|
 | 76 | static inline unsigned int fetch_and_inc(unsigned int* n) | 
|---|
 | 77 | { | 
|---|
 | 78 |   unsigned int  oldval; | 
|---|
 | 79 |    | 
|---|
 | 80 |   return (unsigned) cpu_atomic_add(n, 1); | 
|---|
 | 81 | } | 
|---|
 | 82 |  | 
|---|
 | 83 | /* returns true on swap */ | 
|---|
 | 84 | static inline int cmp_and_swp(uintptr_t v, uintptr_t* cmper, uintptr_t matcher) | 
|---|
 | 85 | { | 
|---|
 | 86 |   return cpu_atomic_cas(cmper, matcher, v); | 
|---|
 | 87 | } | 
|---|
 | 88 |  | 
|---|
 | 89 | static inline int test_and_set(uintptr_t* n) | 
|---|
 | 90 | { | 
|---|
 | 91 |   return cmp_and_swp(1, n, 0); | 
|---|
 | 92 | } | 
|---|
 | 93 | /** | 
|---|
 | 94 |  * @n - value to store | 
|---|
 | 95 |  * @v - location to store into | 
|---|
 | 96 |  * @return previous value in v | 
|---|
 | 97 |  */ | 
|---|
 | 98 | static inline uintptr_t atomic_xchg(uintptr_t n, uintptr_t* v) | 
|---|
 | 99 | { | 
|---|
 | 100 |   uintptr_t old; | 
|---|
 | 101 |   bool_t isAtomic; | 
|---|
 | 102 |    | 
|---|
 | 103 |   do | 
|---|
 | 104 |   { | 
|---|
 | 105 |     old = cpu_load_word(v); | 
|---|
 | 106 |     isAtomic = cpu_atomic_cas(v, *v, n); | 
|---|
 | 107 |   }while(isAtomic == false); | 
|---|
 | 108 |    | 
|---|
 | 109 |   return old; | 
|---|
 | 110 | } | 
|---|
 | 111 |  | 
|---|
 | 112 | #endif | 
|---|
 | 113 | // vim: ts=4 | 
|---|