19 #ifndef GRPC_CORE_LIB_GPRPP_ATOMIC_H 20 #define GRPC_CORE_LIB_GPRPP_ATOMIC_H 31 RELAXED = std::memory_order_relaxed,
32 CONSUME = std::memory_order_consume,
33 ACQUIRE = std::memory_order_acquire,
34 RELEASE = std::memory_order_release,
35 ACQ_REL = std::memory_order_acq_rel,
36 SEQ_CST = std::memory_order_seq_cst
42 explicit Atomic(T val = T()) : storage_(val) {}
45 return storage_.load(static_cast<std::memory_order>(order));
49 storage_.store(val, static_cast<std::memory_order>(order));
53 return storage_.exchange(desired, static_cast<std::memory_order>(order));
59 *expected, desired, static_cast<std::memory_order>(success),
60 static_cast<std::memory_order>(failure)));
66 *expected, desired, static_cast<std::memory_order>(success),
67 static_cast<std::memory_order>(failure)));
70 template <
typename Arg>
73 static_cast<Arg>(arg), static_cast<std::memory_order>(order)));
76 template <
typename Arg>
79 static_cast<Arg>(arg), static_cast<std::memory_order>(order)));
85 T count = storage_.load(static_cast<std::memory_order>(load_order));
99 std::atomic<T> storage_;
#define GPR_ATM_INC_ADD_THEN(blah)
Definition: atm_gcc_atomic.h:45
void Store(T val, MemoryOrder order)
Definition: atomic.h:48
T FetchAdd(Arg arg, MemoryOrder order=MemoryOrder::SEQ_CST)
Definition: atomic.h:71
bool CompareExchangeWeak(T *expected, T desired, MemoryOrder success, MemoryOrder failure)
Definition: atomic.h:56
Round Robin Policy.
Definition: backend_metric.cc:24
Atomic(T val=T())
Definition: atomic.h:42
MemoryOrder
Definition: atomic.h:30
bool CompareExchangeStrong(T *expected, T desired, MemoryOrder success, MemoryOrder failure)
Definition: atomic.h:63
bool IncrementIfNonzero(MemoryOrder load_order=MemoryOrder::ACQUIRE)
Definition: atomic.h:84
T Load(MemoryOrder order) const
Definition: atomic.h:44
T Exchange(T desired, MemoryOrder order)
Definition: atomic.h:52
#define GPR_ATM_INC_CAS_THEN(blah)
Definition: atm_gcc_atomic.h:44
T FetchSub(Arg arg, MemoryOrder order=MemoryOrder::SEQ_CST)
Definition: atomic.h:77