libstdc++
atomic_futex.h
Go to the documentation of this file.
1 // -*- C++ -*- header.
2 
3 // Copyright (C) 2015-2025 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10 
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15 
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19 
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24 
25 /** @file bits/atomic_futex.h
26  * This is an internal header file, included by other library headers.
27  * Do not attempt to use it directly.
28  */
29 
30 #ifndef _GLIBCXX_ATOMIC_FUTEX_H
31 #define _GLIBCXX_ATOMIC_FUTEX_H 1
32 
33 #ifdef _GLIBCXX_SYSHDR
34 #pragma GCC system_header
35 #endif
36 
37 #include <atomic>
38 #if ! (defined(_GLIBCXX_HAVE_LINUX_FUTEX) && ATOMIC_INT_LOCK_FREE > 1)
39 #include <mutex>
40 #include <condition_variable>
41 #endif
42 #include <bits/chrono.h>
43 
44 #ifndef _GLIBCXX_ALWAYS_INLINE
45 #define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
46 #endif
47 
48 namespace std _GLIBCXX_VISIBILITY(default)
49 {
50 _GLIBCXX_BEGIN_NAMESPACE_VERSION
51 
52 #ifdef _GLIBCXX_HAS_GTHREADS
53 #if defined(_GLIBCXX_HAVE_LINUX_FUTEX) && ATOMIC_INT_LOCK_FREE > 1
54  struct __atomic_futex_unsigned_base
55  {
56  // __s and __ns are measured against CLOCK_REALTIME. Returns false
57  // iff a timeout occurred.
58  bool
59  _M_futex_wait_until(unsigned *__addr, unsigned __val, bool __has_timeout,
61 
62  // __s and __ns are measured against CLOCK_MONOTONIC. Returns
63  // false iff a timeout occurred.
64  bool
65  _M_futex_wait_until_steady(unsigned *__addr, unsigned __val,
66  bool __has_timeout, chrono::seconds __s, chrono::nanoseconds __ns);
67 
68  // This can be executed after the object has been destroyed.
69  static void _M_futex_notify_all(unsigned* __addr);
70  };
71 
72  template <unsigned _Waiter_bit = 0x80000000>
73  class __atomic_futex_unsigned : __atomic_futex_unsigned_base
74  {
75  typedef chrono::steady_clock __clock_t;
76 
77  // This must be lock-free and at offset 0.
78  atomic<unsigned> _M_data;
79 
80  public:
81  explicit
82  __atomic_futex_unsigned(unsigned __data) : _M_data(__data)
83  { }
84 
85  _GLIBCXX_ALWAYS_INLINE unsigned
86  _M_load(memory_order __mo)
87  {
88  return _M_data.load(__mo) & ~_Waiter_bit;
89  }
90 
91  private:
92  // If a timeout occurs, returns a current value after the timeout;
93  // otherwise, returns the operand's value if equal is true or a different
94  // value if equal is false.
95  // The assumed value is the caller's assumption about the current value
96  // when making the call.
97  // __s and __ns are measured against CLOCK_REALTIME.
98  unsigned
99  _M_load_and_test_until(unsigned __assumed, unsigned __operand,
100  bool __equal, memory_order __mo, bool __has_timeout,
102  {
103  for (;;)
104  {
105  // Don't bother checking the value again because we expect the caller
106  // to have done it recently.
107  // memory_order_relaxed is sufficient because we can rely on just the
108  // modification order (store_notify uses an atomic RMW operation too),
109  // and the futex syscalls synchronize between themselves.
110  _M_data.fetch_or(_Waiter_bit, memory_order_relaxed);
111  bool __ret = _M_futex_wait_until((unsigned*)(void*)&_M_data,
112  __assumed | _Waiter_bit,
113  __has_timeout, __s, __ns);
114  // Fetch the current value after waiting (clears _Waiter_bit).
115  __assumed = _M_load(__mo);
116  if (!__ret || ((__operand == __assumed) == __equal))
117  return __assumed;
118  // TODO adapt wait time
119  }
120  }
121 
122  // If a timeout occurs, returns a current value after the timeout;
123  // otherwise, returns the operand's value if equal is true or a different
124  // value if equal is false.
125  // The assumed value is the caller's assumption about the current value
126  // when making the call.
127  // __s and __ns are measured against CLOCK_MONOTONIC.
128  unsigned
129  _M_load_and_test_until_steady(unsigned __assumed, unsigned __operand,
130  bool __equal, memory_order __mo, bool __has_timeout,
132  {
133  for (;;)
134  {
135  // Don't bother checking the value again because we expect the caller
136  // to have done it recently.
137  // memory_order_relaxed is sufficient because we can rely on just the
138  // modification order (store_notify uses an atomic RMW operation too),
139  // and the futex syscalls synchronize between themselves.
140  _M_data.fetch_or(_Waiter_bit, memory_order_relaxed);
141  bool __ret = _M_futex_wait_until_steady((unsigned*)(void*)&_M_data,
142  __assumed | _Waiter_bit,
143  __has_timeout, __s, __ns);
144  // Fetch the current value after waiting (clears _Waiter_bit).
145  __assumed = _M_load(__mo);
146  if (!__ret || ((__operand == __assumed) == __equal))
147  return __assumed;
148  // TODO adapt wait time
149  }
150  }
151 
152  // Returns the operand's value if equal is true or a different value if
153  // equal is false.
154  // The assumed value is the caller's assumption about the current value
155  // when making the call.
156  unsigned
157  _M_load_and_test(unsigned __assumed, unsigned __operand,
158  bool __equal, memory_order __mo)
159  {
160  return _M_load_and_test_until(__assumed, __operand, __equal, __mo,
161  false, {}, {});
162  }
163 
164  // If a timeout occurs, returns a current value after the timeout;
165  // otherwise, returns the operand's value if equal is true or a different
166  // value if equal is false.
167  // The assumed value is the caller's assumption about the current value
168  // when making the call.
169  template<typename _Dur>
170  unsigned
171  _M_load_and_test_until_impl(unsigned __assumed, unsigned __operand,
172  bool __equal, memory_order __mo,
173  const chrono::time_point<std::chrono::system_clock, _Dur>& __atime)
174  {
175  auto __d = __atime.time_since_epoch();
176  if (__d < __d.zero()) [[__unlikely__]]
177  return false;
178  auto __s = chrono::duration_cast<chrono::seconds>(__d);
179  auto __ns = chrono::duration_cast<chrono::nanoseconds>(__d - __s);
180  return _M_load_and_test_until(__assumed, __operand, __equal, __mo,
181  true, __s, __ns);
182  }
183 
184  template<typename _Dur>
185  unsigned
186  _M_load_and_test_until_impl(unsigned __assumed, unsigned __operand,
187  bool __equal, memory_order __mo,
188  const chrono::time_point<std::chrono::steady_clock, _Dur>& __atime)
189  {
190  auto __d = __atime.time_since_epoch();
191  if (__d < __d.zero()) [[__unlikely__]]
192  return false;
193  auto __s = chrono::duration_cast<chrono::seconds>(__d);
194  auto __ns = chrono::duration_cast<chrono::nanoseconds>(__d - __s);
195  return _M_load_and_test_until_steady(__assumed, __operand, __equal, __mo,
196  true, __s, __ns);
197  }
198 
199  public:
200 
201  _GLIBCXX_ALWAYS_INLINE unsigned
202  _M_load_when_not_equal(unsigned __val, memory_order __mo)
203  {
204  unsigned __i = _M_load(__mo);
205  if ((__i & ~_Waiter_bit) != __val)
206  return (__i & ~_Waiter_bit);
207  // TODO Spin-wait first.
208  return _M_load_and_test(__i, __val, false, __mo);
209  }
210 
211  _GLIBCXX_ALWAYS_INLINE void
212  _M_load_when_equal(unsigned __val, memory_order __mo)
213  {
214  unsigned __i = _M_load(__mo);
215  if ((__i & ~_Waiter_bit) == __val)
216  return;
217  // TODO Spin-wait first.
218  _M_load_and_test(__i, __val, true, __mo);
219  }
220 
221  // Returns false iff a timeout occurred.
222  template<typename _Rep, typename _Period>
223  _GLIBCXX_ALWAYS_INLINE bool
224  _M_load_when_equal_for(unsigned __val, memory_order __mo,
225  const chrono::duration<_Rep, _Period>& __rtime)
226  {
227  using __dur = typename __clock_t::duration;
228  return _M_load_when_equal_until(__val, __mo,
229  __clock_t::now() + chrono::__detail::ceil<__dur>(__rtime));
230  }
231 
232  // Returns false iff a timeout occurred.
233  template<typename _Clock, typename _Duration>
234  _GLIBCXX_ALWAYS_INLINE bool
235  _M_load_when_equal_until(unsigned __val, memory_order __mo,
236  const chrono::time_point<_Clock, _Duration>& __atime)
237  {
238  typename _Clock::time_point __c_entry = _Clock::now();
239  do {
240  const __clock_t::time_point __s_entry = __clock_t::now();
241  const auto __delta = __atime - __c_entry;
242  const auto __s_atime = __s_entry +
243  chrono::__detail::ceil<__clock_t::duration>(__delta);
244  if (_M_load_when_equal_until(__val, __mo, __s_atime))
245  return true;
246  __c_entry = _Clock::now();
247  } while (__c_entry < __atime);
248  return false;
249  }
250 
251  // Returns false iff a timeout occurred.
252  template<typename _Duration>
253  _GLIBCXX_ALWAYS_INLINE bool
254  _M_load_when_equal_until(unsigned __val, memory_order __mo,
255  const chrono::time_point<std::chrono::system_clock, _Duration>& __atime)
256  {
257  unsigned __i = _M_load(__mo);
258  if ((__i & ~_Waiter_bit) == __val)
259  return true;
260  // TODO Spin-wait first. Ignore effect on timeout.
261  __i = _M_load_and_test_until_impl(__i, __val, true, __mo, __atime);
262  return (__i & ~_Waiter_bit) == __val;
263  }
264 
265  // Returns false iff a timeout occurred.
266  template<typename _Duration>
267  _GLIBCXX_ALWAYS_INLINE bool
268  _M_load_when_equal_until(unsigned __val, memory_order __mo,
269  const chrono::time_point<std::chrono::steady_clock, _Duration>& __atime)
270  {
271  unsigned __i = _M_load(__mo);
272  if ((__i & ~_Waiter_bit) == __val)
273  return true;
274  // TODO Spin-wait first. Ignore effect on timeout.
275  __i = _M_load_and_test_until_impl(__i, __val, true, __mo, __atime);
276  return (__i & ~_Waiter_bit) == __val;
277  }
278 
279  _GLIBCXX_ALWAYS_INLINE void
280  _M_store_notify_all(unsigned __val, memory_order __mo)
281  {
282  unsigned* __futex = (unsigned *)(void *)&_M_data;
283  if (_M_data.exchange(__val, __mo) & _Waiter_bit)
284  _M_futex_notify_all(__futex);
285  }
286  };
287 
288 #else // ! (_GLIBCXX_HAVE_LINUX_FUTEX && ATOMIC_INT_LOCK_FREE > 1)
289 
290  // If futexes are not available, use a mutex and a condvar to wait.
291  // Because we access the data only within critical sections, all accesses
292  // are sequentially consistent; thus, we satisfy any provided memory_order.
293  template <unsigned _Waiter_bit = 0x80000000>
294  class __atomic_futex_unsigned
295  {
296  typedef chrono::system_clock __clock_t;
297 
298  unsigned _M_data;
299  mutex _M_mutex;
300  condition_variable _M_condvar;
301 
302  public:
303  explicit
304  __atomic_futex_unsigned(unsigned __data) : _M_data(__data)
305  { }
306 
307  _GLIBCXX_ALWAYS_INLINE unsigned
308  _M_load(memory_order)
309  {
310  unique_lock<mutex> __lock(_M_mutex);
311  return _M_data;
312  }
313 
314  _GLIBCXX_ALWAYS_INLINE unsigned
315  _M_load_when_not_equal(unsigned __val, memory_order)
316  {
317  unique_lock<mutex> __lock(_M_mutex);
318  while (_M_data == __val)
319  _M_condvar.wait(__lock);
320  return _M_data;
321  }
322 
323  _GLIBCXX_ALWAYS_INLINE void
324  _M_load_when_equal(unsigned __val, memory_order)
325  {
326  unique_lock<mutex> __lock(_M_mutex);
327  while (_M_data != __val)
328  _M_condvar.wait(__lock);
329  }
330 
331  template<typename _Rep, typename _Period>
332  _GLIBCXX_ALWAYS_INLINE bool
333  _M_load_when_equal_for(unsigned __val, memory_order,
334  const chrono::duration<_Rep, _Period>& __rtime)
335  {
336  unique_lock<mutex> __lock(_M_mutex);
337  return _M_condvar.wait_for(__lock, __rtime,
338  [&] { return _M_data == __val;});
339  }
340 
341  template<typename _Clock, typename _Duration>
342  _GLIBCXX_ALWAYS_INLINE bool
343  _M_load_when_equal_until(unsigned __val, memory_order,
344  const chrono::time_point<_Clock, _Duration>& __atime)
345  {
346  unique_lock<mutex> __lock(_M_mutex);
347  return _M_condvar.wait_until(__lock, __atime,
348  [&] { return _M_data == __val;});
349  }
350 
351  _GLIBCXX_ALWAYS_INLINE void
352  _M_store_notify_all(unsigned __val, memory_order)
353  {
354  unique_lock<mutex> __lock(_M_mutex);
355  _M_data = __val;
356  _M_condvar.notify_all();
357  }
358  };
359 
360 #endif // _GLIBCXX_HAVE_LINUX_FUTEX && ATOMIC_INT_LOCK_FREE > 1
361 #endif // _GLIBCXX_HAS_GTHREADS
362 
363 _GLIBCXX_END_NAMESPACE_VERSION
364 } // namespace std
365 
366 #endif
duration< int64_t > seconds
seconds
Definition: chrono.h:901
duration< int64_t, nano > nanoseconds
nanoseconds
Definition: chrono.h:892
memory_order
Enumeration for memory_order.
Definition: atomic_base.h:66
ISO C++ entities toplevel namespace is std.