Skip to content

Commit

Permalink
Significantly improve Atomic.h by pulling in code from libatomic_ops …
Browse files Browse the repository at this point in the history
…by HP. This is a little outdated, but reasonably complete.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@71973 91177308-0d34-0410-b5e6-96231b3b80d8
  • Loading branch information
resistor committed May 17, 2009
1 parent de52f86 commit 6962815
Show file tree
Hide file tree
Showing 2 changed files with 159 additions and 40 deletions.
1 change: 1 addition & 0 deletions LICENSE.TXT
Original file line number Diff line number Diff line change
Expand Up @@ -67,3 +67,4 @@ Autoconf llvm/autoconf
llvm/projects/sample/autoconf
CellSPU backend llvm/lib/Target/CellSPU/README.txt
Google Test llvm/utils/unittest/googletest
Atomics Library llvm/include/llvm/System/Atomic.h
198 changes: 158 additions & 40 deletions include/llvm/System/Atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,28 +9,63 @@
//
// This file declares the llvm::sys atomic operations.
//
// Portions of this file use code from libatomic_ops, for which the following
// license applies:
//
// Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SYSTEM_ATOMIC_H
#define LLVM_SYSTEM_ATOMIC_H

#include <stdint.h>

#if defined(__APPLE__)
#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ == 0)
#include <libkern/OSAtomic.h>
#endif
#elif LLVM_ON_WIN32
#if defined(_HPUX_SOURCE) && defined(__ia64)
#include <machine/sys/inline.h>
#elif defined(_MSC_VER)
#include <windows.h>
#endif
#endif // defined(_HPUX_SOURCE) && defined(__ia64)


namespace llvm {
namespace sys {

inline void CompilerFence() {
#if defined(__GNUC__) && !defined(__INTEL_COMPILER)
__asm__ __volatile__("" : : : "memory");
#elif defined(_MSC_VER)
__asm { };
#elif defined(__INTEL_COMPILER)
__memory_barrier(); /* Too strong? IA64-only? */
#else
/* We conjecture that the following usually gives us the right */
/* semantics or an error. */
asm("");
#endif // defined(__GNUC__) && !defined(__INTEL_COMPILER)
}

#if !defined(ENABLE_THREADS) || ENABLE_THREADS == 0
inline void MemoryFence() {
return;
CompilerFence();
}

typedef uint32_t cas_flag;
Expand All @@ -41,47 +76,130 @@ namespace llvm {
return result;
}

#elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)
inline void MemoryFence() {
__sync_synchronize();
}
#elif defined(__GNUC__)

typedef uint32_t cas_flag;
inline cas_flag CompareAndSwap(cas_flag* dest, cas_flag exc, cas_flag c) {
return __sync_val_compare_and_swap(dest, exc, c);
}

#elif defined(__APPLE__)
inline void MemoryFence() {
OSMemoryBarrier();
# if defined(__i386__) || defined(__x86_64__)
# if defined(__SSE2__)
__asm__ __volatile__("mfence" : : : "memory");
# else
unsigned char dummy = 0;
volatile unsigned char* addr = &dummy;
unsigned char oldval;
__asm __ __volatile__("xchgb %0, %1" : "=r"(oldval),
"=m"(*addr), "0"(0xff), "m"(*addr) : "memory");
# endif // defined(__SSE2__)
# elif defined(__ia64__)
__asm__ __volatile__("mf" : : : "memory");
# elif defined(__alpha__)
__asm__ __volatile__("mb" : : : "memory");
# elif defined(__sparc__)
__asm__ __volatile__("membar #StoreStore | #LoadStore | #LoadLoad | #StoreLoad");
# elif defined(__powerpc__) || defined(__ppc__)
__asm__ __volatile__("sync" : : : "memory");
# elif defined(__arm__)
__asm__ __volatile__ ("mcr p15, 0, r0, c7, c10, 5 @ dmb");
# endif
} // defined(__i386__) || defined(__x86_64__)

typedef unsigned long cas_flag;
inline cas_flag CompareAndSwap(cas_flag* ptr,
cas_flag new_value,
cas_flag old_value) {
cas_flag prev;
# if defined(__i386__) || defined(__x86_64__)
__asm__ __volatile__("lock; cmpxchgl %1,%2"
: "=a" (prev)
: "q" (new_value), "m" (*ptr), "0" (old_value)
: "memory");
# elif defined(__ia64__)
MemoryFence();
# if defined(_ILP32)
__asm__("zxt4 %1=%1": "=r"(prev) : "0"(prev));
__asm__ __volatile__("addp4 %1=0,%1;;\n"
"mov ar.ccv=%[old] ;; cmpxchg 4"
".acq %0=[%1],%[new_val],ar.ccv"
: "=r"(prev) "1"(addr),
: "=r"(addr), [new_value]"r"(new_value), [old_value]"r"(old_value)
: "memory");
# else
__asm__ __volatile__(
"mov ar.ccv=%[old] ;; cmpxchg 8"
".acq %0=[%1],%[new_val],ar.ccv"
: "=r"(prev)
: "r"(ptr), [new_value]"r"(new_value),
[old_value]"r"(old_value)
: "memory");
# endif // defined(_ILP32)
# elif defined(__alpha__)
cas_flag was_equal;
__asm__ __volatile__(
"1: ldq_l %0,%1\n"
" cmpeq %0,%4,%2\n"
" mov %3,%0\n"
" beq %2,2f\n"
" stq_c %0,%1\n"
" beq %0,1b\n"
"2:\n"
:"=&r" (prev), "=m" (*ptr), "=&r" (was_equal)
: "r" (new_value), "Ir" (old_value)
:"memory");
#elif defined(__sparc__)
#error No CAS implementation for SPARC yet.
#elif defined(__powerpc__) || defined(__ppc__)
int result = 0;
__asm__ __volatile__(
"1:lwarx %0,0,%2\n" /* load and reserve */
"cmpw %0, %4\n" /* if load is not equal to */
"bne 2f\n" /* old, fail */
"stwcx. %3,0,%2\n" /* else store conditional */
"bne- 1b\n" /* retry if lost reservation */
"li %1,1\n" /* result = 1; */
"2:\n"
: "=&r"(prev), "=&r"(result)
: "r"(ptr), "r"(new_value), "r"(old_value), "1"(result)
: "memory", "cc");
#elif defined(__arm__)
int result;
__asm__ __volatile__ (
"\n"
"0:\t"
"ldr %1,[%2] \n\t"
"mov %0,#0 \n\t"
"cmp %1,%4 \n\t"
"bne 1f \n\t"
"swp %0,%3,[%2] \n\t"
"cmp %1,%0 \n\t"
"swpne %1,%0,[%2] \n\t"
"bne 0b \n\t"
"mov %0,#1 \n"
"1:\n\t"
""
: "=&r"(result), "=&r"(prev)
: "r" ptr), "r" (new_value), "r" (old_value)
: "cc", "memory");
#endif // defined(__i386__)
return prev;
}

typedef int32_t cas_flag;
inline cas_flag CompareAndSwap(cas_flag* dest, cas_flag exc, cas_flag c) {
cas_flag old = *dest;
OSAtomicCompareAndSwap32(c, exc, dest);
return old;
}
#elif defined(LLVM_ON_WIN32)
#elif defined(_MSC_VER) && _M_IX86 > 400
inline void MemoryFence() {
#ifdef _MSC_VER
MemoryBarrier();
#elif 0
// FIXME: Requires SSE2 support
__asm__ __volatile__("mfence":::"memory");
#else
// FIXME: just temporary workaround. We need to emit some fence...
__asm__ __volatile__("":::"memory");
#endif
LONG dummy = 0;
InterlockedExchanged((LONG volatile *)&dummy, (LONG)0);
}

typedef volatile long cas_flag;
inline cas_flag CompareAndSwap(cas_flag* dest, cas_flag exc, cas_flag c) {
return InterlockedCompareExchange(dest, exc, c);

typedef DWORD cas_flag;
inline cas_flag CompareAndSwap(cas_flag* ptr,
cas_flag new_value,
cas_flag old_value) {
/* FIXME - This is nearly useless on win64. */
/* Use InterlockedCompareExchange64 for win64? */
return InterlockedCompareExchange((DWORD volatile *)addr,
(DWORD)new_value, (DWORD) old_value)
}
#else
#error No memory atomics implementation for your platform!
#endif
#error No atomics implementation found for your platform.
#endif // !defined(ENABLE_THREADS) || ENABLE_THREADS == 0

}
}
Expand Down

0 comments on commit 6962815

Please sign in to comment.