[go: nahoru, domu]

blob: e67c51a2d3c22da8ce7d7191b8971b2326fd78e4 [file] [log] [blame]
deanm@google.com611dbe02008-08-05 09:57:361// Copyright 2008, Google Inc.
2// All rights reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are
6// met:
7//
8// * Redistributions of source code must retain the above copyright
9// notice, this list of conditions and the following disclaimer.
10// * Redistributions in binary form must reproduce the above
11// copyright notice, this list of conditions and the following disclaimer
12// in the documentation and/or other materials provided with the
13// distribution.
14// * Neither the name of Google Inc. nor the names of its
15// contributors may be used to endorse or promote products derived from
16// this software without specific prior written permission.
17//
18// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30// For atomic operations on reference counts, see atomic_refcount.h.
31// For atomic operations on sequence numbers, see atomic_sequence_num.h.
32
33// The routines exported by this module are subtle. If you use them, even if
34// you get the code right, it will depend on careful reasoning about atomicity
35// and memory ordering; it will be less readable, and harder to maintain. If
36// you plan to use these routines, you should have a good reason, such as solid
37// evidence that performance would otherwise suffer, or there being no
38// alternative. You should assume only properties explicitly guaranteed by the
39// specifications in this file. You are almost certainly _not_ writing code
40// just for the x86; if you assume x86 semantics, x86 hardware bugs and
41// implementations on other archtectures will cause your code to break. If you
42// do not know what you are doing, avoid these routines, and use a Mutex.
43//
44// It is incorrect to make direct assignments to/from an atomic variable.
45// You should use one of the Load or Store routines. The NoBarrier
46// versions are provided when no barriers are needed:
47// NoBarrier_Store()
48// NoBarrier_Load()
49// Although there are currently no compiler enforcement, you are encouraged
50// to use these.
51//
52
53#ifndef BASE_ATOMICOPS_H_
54#define BASE_ATOMICOPS_H_
55
56#include "base/basictypes.h"
57
58#if defined(_WIN64) || defined(__x86_64__) || defined(__LP64)
59#define HAS_64_BIT
60#endif
61#ifndef WIN32
62#define __w64
63#endif
64
65namespace base {
66namespace subtle {
67
68// Bug 1308991. We need this for /Wp64, to mark it safe for AtomicWord casting.
69typedef __w64 int32 Atomic32;
70#ifdef HAS_64_BIT
71typedef int64 Atomic64;
72#endif
73
74// Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or
75// Atomic64 routines below, depending on your architecture.
76typedef intptr_t AtomicWord;
77
78// Atomically execute:
79// result = *ptr;
80// if (*ptr == old_value)
81// *ptr = new_value;
82// return result;
83//
84// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
85// Always return the old value of "*ptr"
86//
87// This routine implies no memory barriers.
88Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
89 Atomic32 old_value,
90 Atomic32 new_value);
91
92// Atomically store new_value into *ptr, returning the previous value held in
93// *ptr. This routine implies no memory barriers.
94Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
95
96// Atomically increment *ptr by "increment". Returns the new value of
97// *ptr with the increment applied. This routine implies no memory barriers.
98Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment);
99
100Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
101 Atomic32 increment);
102
103// These following lower-level operations are typically useful only to people
104// implementing higher-level synchronization operations like spinlocks,
105// mutexes, and condition-variables. They combine CompareAndSwap(), a load, or
106// a store with appropriate memory-ordering instructions. "Acquire" operations
107// ensure that no later memory access can be reordered ahead of the operation.
108// "Release" operations ensure that no previous memory access can be reordered
109// after the operation. "Barrier" operations have both "Acquire" and "Release"
110// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
111// access.
112Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
113 Atomic32 old_value,
114 Atomic32 new_value);
115Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
116 Atomic32 old_value,
117 Atomic32 new_value);
118
119void MemoryBarrier();
120void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
121void Acquire_Store(volatile Atomic32* ptr, Atomic32 value);
122void Release_Store(volatile Atomic32* ptr, Atomic32 value);
123
124Atomic32 NoBarrier_Load(volatile const Atomic32* ptr);
125Atomic32 Acquire_Load(volatile const Atomic32* ptr);
126Atomic32 Release_Load(volatile const Atomic32* ptr);
127
128// 64-bit atomic operations (only available on 64-bit processors).
129#ifdef HAS_64_BIT
130Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
131 Atomic64 old_value,
132 Atomic64 new_value);
133Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
134Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
135Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
136
137Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
138 Atomic64 old_value,
139 Atomic64 new_value);
140Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
141 Atomic64 old_value,
142 Atomic64 new_value);
143void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value);
144void Acquire_Store(volatile Atomic64* ptr, Atomic64 value);
145void Release_Store(volatile Atomic64* ptr, Atomic64 value);
146Atomic64 NoBarrier_Load(volatile const Atomic64* ptr);
147Atomic64 Acquire_Load(volatile const Atomic64* ptr);
148Atomic64 Release_Load(volatile const Atomic64* ptr);
149#endif // HAS_64_bit
150
151} // namespace base::subtle
152} // namespace base
153
154// Include our platform specific implementation.
155#if defined(_MSC_VER) && defined(_M_IX86)
156#include "base/atomicops_internals_x86_msvc.h"
157#elif defined(__MACH__) && defined(__APPLE__) && defined(__i386__)
158#include "base/atomicops_internals_x86_macosx.h"
159#elif defined(__GNUC__) && (defined(__i386) || defined(ARCH_K8))
160#include "base/atomicops_internals_x86_gcc.h"
161#else
162#error "Atomic operations are not supported on your platform"
163#endif
164
165#endif // BASE_ATOMICOPS_H_