diff --git a/inc/base/atomicops.h b/inc/base/atomicops.h index 6947ef02..7ce8c1c2 100644 --- a/inc/base/atomicops.h +++ b/inc/base/atomicops.h @@ -136,6 +136,8 @@ Atomic64 Release_Load(volatile const Atomic64* ptr); #include "base/atomicops_internals_x86_msvc.h" #elif defined(OS_MACOSX) #include "base/atomicops_internals_mac.h" +#elif defined(COMPILER_GCC) && defined(ARCH_CPU_ARM64) +#include "base/atomicops_internals_arm64_gcc.h" #elif (defined(COMPILER_GCC) && defined(ARCH_CPU_ARM_FAMILY)) || \ defined(OS_NACL) #include "base/atomicops_internals_gcc.h" diff --git a/inc/base/atomicops_internals_arm64_gcc.h b/inc/base/atomicops_internals_arm64_gcc.h new file mode 100644 index 00000000..a2b0abc1 --- /dev/null +++ b/inc/base/atomicops_internals_arm64_gcc.h @@ -0,0 +1,360 @@ +// Copyright 2014 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// This file is an internal atomic implementation, use base/atomicops.h instead. + +// TODO(rmcilroy): Investigate whether we can use __sync__ intrinsics instead of +// the hand coded assembly without introducing perf regressions. +// TODO(rmcilroy): Investigate whether we can use acquire / release versions of +// exclusive load / store assembly instructions and do away with +// the barriers. + +#ifndef BASE_ATOMICOPS_INTERNALS_ARM64_GCC_H_ +#define BASE_ATOMICOPS_INTERNALS_ARM64_GCC_H_ + +#if defined(OS_QNX) +#include +#endif + +namespace base { +namespace subtle { + +inline void MemoryBarrier() { + __asm__ __volatile__ ( // NOLINT + "dmb ish \n\t" // Data memory barrier. + ::: "memory" + ); // NOLINT +} + + +inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + Atomic32 prev; + int32_t temp; + + __asm__ __volatile__ ( // NOLINT + "0: \n\t" + "ldxr %w[prev], %[ptr] \n\t" // Load the previous value. + "cmp %w[prev], %w[old_value] \n\t" + "bne 1f \n\t" + "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value. + "cbnz %w[temp], 0b \n\t" // Retry if it did not work. + "1: \n\t" + "clrex \n\t" // In case we didn't swap. + : [prev]"=&r" (prev), + [temp]"=&r" (temp), + [ptr]"+Q" (*ptr) + : [old_value]"r" (old_value), + [new_value]"r" (new_value) + : "memory", "cc" + ); // NOLINT + + return prev; +} + +inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, + Atomic32 new_value) { + Atomic32 result; + int32_t temp; + + __asm__ __volatile__ ( // NOLINT + "0: \n\t" + "ldxr %w[result], %[ptr] \n\t" // Load the previous value. + "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value. + "cbnz %w[temp], 0b \n\t" // Retry if it did not work. + : [result]"=&r" (result), + [temp]"=&r" (temp), + [ptr]"+Q" (*ptr) + : [new_value]"r" (new_value) + : "memory" + ); // NOLINT + + return result; +} + +inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment) { + Atomic32 result; + int32_t temp; + + __asm__ __volatile__ ( // NOLINT + "0: \n\t" + "ldxr %w[result], %[ptr] \n\t" // Load the previous value. + "add %w[result], %w[result], %w[increment]\n\t" + "stxr %w[temp], %w[result], %[ptr] \n\t" // Try to store the result. + "cbnz %w[temp], 0b \n\t" // Retry on failure. + : [result]"=&r" (result), + [temp]"=&r" (temp), + [ptr]"+Q" (*ptr) + : [increment]"r" (increment) + : "memory" + ); // NOLINT + + return result; +} + +inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment) { + MemoryBarrier(); + Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment); + MemoryBarrier(); + + return result; +} + +inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + Atomic32 prev; + int32_t temp; + + __asm__ __volatile__ ( // NOLINT + "0: \n\t" + "ldxr %w[prev], %[ptr] \n\t" // Load the previous value. + "cmp %w[prev], %w[old_value] \n\t" + "bne 1f \n\t" + "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value. + "cbnz %w[temp], 0b \n\t" // Retry if it did not work. + "dmb ish \n\t" // Data memory barrier. + "1: \n\t" + // If the compare failed the 'dmb' is unnecessary, but we still need a + // 'clrex'. + "clrex \n\t" + : [prev]"=&r" (prev), + [temp]"=&r" (temp), + [ptr]"+Q" (*ptr) + : [old_value]"r" (old_value), + [new_value]"r" (new_value) + : "memory", "cc" + ); // NOLINT + + return prev; +} + +inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + Atomic32 prev; + int32_t temp; + + MemoryBarrier(); + + __asm__ __volatile__ ( // NOLINT + "0: \n\t" + "ldxr %w[prev], %[ptr] \n\t" // Load the previous value. + "cmp %w[prev], %w[old_value] \n\t" + "bne 1f \n\t" + "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value. + "cbnz %w[temp], 0b \n\t" // Retry if it did not work. + "1: \n\t" + // If the compare failed the we still need a 'clrex'. + "clrex \n\t" + : [prev]"=&r" (prev), + [temp]"=&r" (temp), + [ptr]"+Q" (*ptr) + : [old_value]"r" (old_value), + [new_value]"r" (new_value) + : "memory", "cc" + ); // NOLINT + + return prev; +} + +inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { + *ptr = value; +} + +inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { + *ptr = value; + MemoryBarrier(); +} + +inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { + MemoryBarrier(); + *ptr = value; +} + +inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { + return *ptr; +} + +inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { + Atomic32 value = *ptr; + MemoryBarrier(); + return value; +} + +inline Atomic32 Release_Load(volatile const Atomic32* ptr) { + MemoryBarrier(); + return *ptr; +} + +// 64-bit versions of the operations. +// See the 32-bit versions for comments. + +inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value) { + Atomic64 prev; + int32_t temp; + + __asm__ __volatile__ ( // NOLINT + "0: \n\t" + "ldxr %[prev], %[ptr] \n\t" + "cmp %[prev], %[old_value] \n\t" + "bne 1f \n\t" + "stxr %w[temp], %[new_value], %[ptr] \n\t" + "cbnz %w[temp], 0b \n\t" + "1: \n\t" + "clrex \n\t" + : [prev]"=&r" (prev), + [temp]"=&r" (temp), + [ptr]"+Q" (*ptr) + : [old_value]"r" (old_value), + [new_value]"r" (new_value) + : "memory", "cc" + ); // NOLINT + + return prev; +} + +inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, + Atomic64 new_value) { + Atomic64 result; + int32_t temp; + + __asm__ __volatile__ ( // NOLINT + "0: \n\t" + "ldxr %[result], %[ptr] \n\t" + "stxr %w[temp], %[new_value], %[ptr] \n\t" + "cbnz %w[temp], 0b \n\t" + : [result]"=&r" (result), + [temp]"=&r" (temp), + [ptr]"+Q" (*ptr) + : [new_value]"r" (new_value) + : "memory" + ); // NOLINT + + return result; +} + +inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, + Atomic64 increment) { + Atomic64 result; + int32_t temp; + + __asm__ __volatile__ ( // NOLINT + "0: \n\t" + "ldxr %[result], %[ptr] \n\t" + "add %[result], %[result], %[increment] \n\t" + "stxr %w[temp], %[result], %[ptr] \n\t" + "cbnz %w[temp], 0b \n\t" + : [result]"=&r" (result), + [temp]"=&r" (temp), + [ptr]"+Q" (*ptr) + : [increment]"r" (increment) + : "memory" + ); // NOLINT + + return result; +} + +inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, + Atomic64 increment) { + MemoryBarrier(); + Atomic64 result = NoBarrier_AtomicIncrement(ptr, increment); + MemoryBarrier(); + + return result; +} + +inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value) { + Atomic64 prev; + int32_t temp; + + __asm__ __volatile__ ( // NOLINT + "0: \n\t" + "ldxr %[prev], %[ptr] \n\t" + "cmp %[prev], %[old_value] \n\t" + "bne 1f \n\t" + "stxr %w[temp], %[new_value], %[ptr] \n\t" + "cbnz %w[temp], 0b \n\t" + "dmb ish \n\t" + "1: \n\t" + "clrex \n\t" + : [prev]"=&r" (prev), + [temp]"=&r" (temp), + [ptr]"+Q" (*ptr) + : [old_value]"r" (old_value), + [new_value]"r" (new_value) + : "memory", "cc" + ); // NOLINT + + return prev; +} + +inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value) { + Atomic64 prev; + int32_t temp; + + MemoryBarrier(); + + __asm__ __volatile__ ( // NOLINT + "0: \n\t" + "ldxr %[prev], %[ptr] \n\t" + "cmp %[prev], %[old_value] \n\t" + "bne 1f \n\t" + "stxr %w[temp], %[new_value], %[ptr] \n\t" + "cbnz %w[temp], 0b \n\t" + "1: \n\t" + "clrex \n\t" + : [prev]"=&r" (prev), + [temp]"=&r" (temp), + [ptr]"+Q" (*ptr) + : [old_value]"r" (old_value), + [new_value]"r" (new_value) + : "memory", "cc" + ); // NOLINT + + return prev; +} + +inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { + *ptr = value; +} + +inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { + *ptr = value; + MemoryBarrier(); +} + +inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { + MemoryBarrier(); + *ptr = value; +} + +inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { + return *ptr; +} + +inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { + Atomic64 value = *ptr; + MemoryBarrier(); + return value; +} + +inline Atomic64 Release_Load(volatile const Atomic64* ptr) { + MemoryBarrier(); + return *ptr; +} + +} // namespace base::subtle +} // namespace base + +#endif // BASE_ATOMICOPS_INTERNALS_ARM64_GCC_H_ diff --git a/inc/base/third_party/nspr/prcpucfg_linux.h b/inc/base/third_party/nspr/prcpucfg_linux.h index 140167a7..2a1d16c6 100644 --- a/inc/base/third_party/nspr/prcpucfg_linux.h +++ b/inc/base/third_party/nspr/prcpucfg_linux.h @@ -509,6 +509,59 @@ #define PR_BYTES_PER_WORD_LOG2 2 #define PR_BYTES_PER_DWORD_LOG2 3 +#elif defined(__aarch64__) + +#ifdef __AARCH64EB__ +#undef IS_LITTLE_ENDIAN +#define IS_BIG_ENDIAN 1 +#elif defined(__AARCH64EL__) +#define IS_LITTLE_ENDIAN 1 +#undef IS_BIG_ENDIAN +#else +#error "Unknown Aarch64 endianness." +#endif +#define IS_64 + +#define PR_BYTES_PER_BYTE 1 +#define PR_BYTES_PER_SHORT 2 +#define PR_BYTES_PER_INT 4 +#define PR_BYTES_PER_INT64 8 +#define PR_BYTES_PER_LONG 8 +#define PR_BYTES_PER_FLOAT 4 +#define PR_BYTES_PER_DOUBLE 8 +#define PR_BYTES_PER_WORD 8 +#define PR_BYTES_PER_DWORD 8 + +#define PR_BITS_PER_BYTE 8 +#define PR_BITS_PER_SHORT 16 +#define PR_BITS_PER_INT 32 +#define PR_BITS_PER_INT64 64 +#define PR_BITS_PER_LONG 64 +#define PR_BITS_PER_FLOAT 32 +#define PR_BITS_PER_DOUBLE 64 +#define PR_BITS_PER_WORD 64 + +#define PR_BITS_PER_BYTE_LOG2 3 +#define PR_BITS_PER_SHORT_LOG2 4 +#define PR_BITS_PER_INT_LOG2 5 +#define PR_BITS_PER_INT64_LOG2 6 +#define PR_BITS_PER_LONG_LOG2 6 +#define PR_BITS_PER_FLOAT_LOG2 5 +#define PR_BITS_PER_DOUBLE_LOG2 6 +#define PR_BITS_PER_WORD_LOG2 6 + +#define PR_ALIGN_OF_SHORT 2 +#define PR_ALIGN_OF_INT 4 +#define PR_ALIGN_OF_LONG 8 +#define PR_ALIGN_OF_INT64 8 +#define PR_ALIGN_OF_FLOAT 4 +#define PR_ALIGN_OF_DOUBLE 8 +#define PR_ALIGN_OF_POINTER 8 +#define PR_ALIGN_OF_WORD 8 + +#define PR_BYTES_PER_WORD_LOG2 3 +#define PR_BYTES_PER_DWORD_LOG2 3 + #elif defined(__hppa__) #undef IS_LITTLE_ENDIAN diff --git a/inc/build/build_config.h b/inc/build/build_config.h index e5d7bc55..f7c81f5e 100644 --- a/inc/build/build_config.h +++ b/inc/build/build_config.h @@ -112,6 +112,11 @@ #define ARCH_CPU_ARMEL 1 #define ARCH_CPU_32_BITS 1 #define ARCH_CPU_LITTLE_ENDIAN 1 +#elif defined(__aarch64__) +#define ARCH_CPU_ARM_FAMILY 1 +#define ARCH_CPU_ARM64 1 +#define ARCH_CPU_64_BITS 1 +#define ARCH_CPU_LITTLE_ENDIAN 1 #elif defined(__pnacl__) #define ARCH_CPU_32_BITS 1 #elif defined(__MIPSEL__)