libstdc++
atomic_futex.h
Go to the documentation of this file.
1 // -*- C++ -*- header.
2 
3 // Copyright (C) 2015-2024 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10 
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15 
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19 
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24 
25 /** @file bits/atomic_futex.h
26  * This is an internal header file, included by other library headers.
27  * Do not attempt to use it directly.
28  */
29 
30 #ifndef _GLIBCXX_ATOMIC_FUTEX_H
31 #define _GLIBCXX_ATOMIC_FUTEX_H 1
32 
33 #pragma GCC system_header
34 
35 #include <atomic>
36 #if ! (defined(_GLIBCXX_HAVE_LINUX_FUTEX) && ATOMIC_INT_LOCK_FREE > 1)
37 #include <mutex>
38 #include <condition_variable>
39 #endif
40 #include <bits/chrono.h>
41 
42 #ifndef _GLIBCXX_ALWAYS_INLINE
43 #define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
44 #endif
45 
46 namespace std _GLIBCXX_VISIBILITY(default)
47 {
48 _GLIBCXX_BEGIN_NAMESPACE_VERSION
49 
50 #ifdef _GLIBCXX_HAS_GTHREADS
51 #if defined(_GLIBCXX_HAVE_LINUX_FUTEX) && ATOMIC_INT_LOCK_FREE > 1
52  struct __atomic_futex_unsigned_base
53  {
54  // __s and __ns are measured against CLOCK_REALTIME. Returns false
55  // iff a timeout occurred.
56  bool
57  _M_futex_wait_until(unsigned *__addr, unsigned __val, bool __has_timeout,
59 
60  // __s and __ns are measured against CLOCK_MONOTONIC. Returns
61  // false iff a timeout occurred.
62  bool
63  _M_futex_wait_until_steady(unsigned *__addr, unsigned __val,
64  bool __has_timeout, chrono::seconds __s, chrono::nanoseconds __ns);
65 
66  // This can be executed after the object has been destroyed.
67  static void _M_futex_notify_all(unsigned* __addr);
68  };
69 
70  template <unsigned _Waiter_bit = 0x80000000>
71  class __atomic_futex_unsigned : __atomic_futex_unsigned_base
72  {
73  typedef chrono::steady_clock __clock_t;
74 
75  // This must be lock-free and at offset 0.
76  atomic<unsigned> _M_data;
77 
78  public:
79  explicit
80  __atomic_futex_unsigned(unsigned __data) : _M_data(__data)
81  { }
82 
83  _GLIBCXX_ALWAYS_INLINE unsigned
84  _M_load(memory_order __mo)
85  {
86  return _M_data.load(__mo) & ~_Waiter_bit;
87  }
88 
89  private:
90  // If a timeout occurs, returns a current value after the timeout;
91  // otherwise, returns the operand's value if equal is true or a different
92  // value if equal is false.
93  // The assumed value is the caller's assumption about the current value
94  // when making the call.
95  // __s and __ns are measured against CLOCK_REALTIME.
96  unsigned
97  _M_load_and_test_until(unsigned __assumed, unsigned __operand,
98  bool __equal, memory_order __mo, bool __has_timeout,
100  {
101  for (;;)
102  {
103  // Don't bother checking the value again because we expect the caller
104  // to have done it recently.
105  // memory_order_relaxed is sufficient because we can rely on just the
106  // modification order (store_notify uses an atomic RMW operation too),
107  // and the futex syscalls synchronize between themselves.
108  _M_data.fetch_or(_Waiter_bit, memory_order_relaxed);
109  bool __ret = _M_futex_wait_until((unsigned*)(void*)&_M_data,
110  __assumed | _Waiter_bit,
111  __has_timeout, __s, __ns);
112  // Fetch the current value after waiting (clears _Waiter_bit).
113  __assumed = _M_load(__mo);
114  if (!__ret || ((__operand == __assumed) == __equal))
115  return __assumed;
116  // TODO adapt wait time
117  }
118  }
119 
120  // If a timeout occurs, returns a current value after the timeout;
121  // otherwise, returns the operand's value if equal is true or a different
122  // value if equal is false.
123  // The assumed value is the caller's assumption about the current value
124  // when making the call.
125  // __s and __ns are measured against CLOCK_MONOTONIC.
126  unsigned
127  _M_load_and_test_until_steady(unsigned __assumed, unsigned __operand,
128  bool __equal, memory_order __mo, bool __has_timeout,
130  {
131  for (;;)
132  {
133  // Don't bother checking the value again because we expect the caller
134  // to have done it recently.
135  // memory_order_relaxed is sufficient because we can rely on just the
136  // modification order (store_notify uses an atomic RMW operation too),
137  // and the futex syscalls synchronize between themselves.
138  _M_data.fetch_or(_Waiter_bit, memory_order_relaxed);
139  bool __ret = _M_futex_wait_until_steady((unsigned*)(void*)&_M_data,
140  __assumed | _Waiter_bit,
141  __has_timeout, __s, __ns);
142  // Fetch the current value after waiting (clears _Waiter_bit).
143  __assumed = _M_load(__mo);
144  if (!__ret || ((__operand == __assumed) == __equal))
145  return __assumed;
146  // TODO adapt wait time
147  }
148  }
149 
150  // Returns the operand's value if equal is true or a different value if
151  // equal is false.
152  // The assumed value is the caller's assumption about the current value
153  // when making the call.
154  unsigned
155  _M_load_and_test(unsigned __assumed, unsigned __operand,
156  bool __equal, memory_order __mo)
157  {
158  return _M_load_and_test_until(__assumed, __operand, __equal, __mo,
159  false, {}, {});
160  }
161 
162  // If a timeout occurs, returns a current value after the timeout;
163  // otherwise, returns the operand's value if equal is true or a different
164  // value if equal is false.
165  // The assumed value is the caller's assumption about the current value
166  // when making the call.
167  template<typename _Dur>
168  unsigned
169  _M_load_and_test_until_impl(unsigned __assumed, unsigned __operand,
170  bool __equal, memory_order __mo,
171  const chrono::time_point<std::chrono::system_clock, _Dur>& __atime)
172  {
173  auto __d = __atime.time_since_epoch();
174  if (__d < __d.zero()) [[__unlikely__]]
175  return false;
176  auto __s = chrono::duration_cast<chrono::seconds>(__d);
177  auto __ns = chrono::duration_cast<chrono::nanoseconds>(__d - __s);
178  return _M_load_and_test_until(__assumed, __operand, __equal, __mo,
179  true, __s, __ns);
180  }
181 
182  template<typename _Dur>
183  unsigned
184  _M_load_and_test_until_impl(unsigned __assumed, unsigned __operand,
185  bool __equal, memory_order __mo,
186  const chrono::time_point<std::chrono::steady_clock, _Dur>& __atime)
187  {
188  auto __d = __atime.time_since_epoch();
189  if (__d < __d.zero()) [[__unlikely__]]
190  return false;
191  auto __s = chrono::duration_cast<chrono::seconds>(__d);
192  auto __ns = chrono::duration_cast<chrono::nanoseconds>(__d - __s);
193  return _M_load_and_test_until_steady(__assumed, __operand, __equal, __mo,
194  true, __s, __ns);
195  }
196 
197  public:
198 
199  _GLIBCXX_ALWAYS_INLINE unsigned
200  _M_load_when_not_equal(unsigned __val, memory_order __mo)
201  {
202  unsigned __i = _M_load(__mo);
203  if ((__i & ~_Waiter_bit) != __val)
204  return (__i & ~_Waiter_bit);
205  // TODO Spin-wait first.
206  return _M_load_and_test(__i, __val, false, __mo);
207  }
208 
209  _GLIBCXX_ALWAYS_INLINE void
210  _M_load_when_equal(unsigned __val, memory_order __mo)
211  {
212  unsigned __i = _M_load(__mo);
213  if ((__i & ~_Waiter_bit) == __val)
214  return;
215  // TODO Spin-wait first.
216  _M_load_and_test(__i, __val, true, __mo);
217  }
218 
219  // Returns false iff a timeout occurred.
220  template<typename _Rep, typename _Period>
221  _GLIBCXX_ALWAYS_INLINE bool
222  _M_load_when_equal_for(unsigned __val, memory_order __mo,
223  const chrono::duration<_Rep, _Period>& __rtime)
224  {
225  using __dur = typename __clock_t::duration;
226  return _M_load_when_equal_until(__val, __mo,
227  __clock_t::now() + chrono::__detail::ceil<__dur>(__rtime));
228  }
229 
230  // Returns false iff a timeout occurred.
231  template<typename _Clock, typename _Duration>
232  _GLIBCXX_ALWAYS_INLINE bool
233  _M_load_when_equal_until(unsigned __val, memory_order __mo,
234  const chrono::time_point<_Clock, _Duration>& __atime)
235  {
236  typename _Clock::time_point __c_entry = _Clock::now();
237  do {
238  const __clock_t::time_point __s_entry = __clock_t::now();
239  const auto __delta = __atime - __c_entry;
240  const auto __s_atime = __s_entry +
241  chrono::__detail::ceil<__clock_t::duration>(__delta);
242  if (_M_load_when_equal_until(__val, __mo, __s_atime))
243  return true;
244  __c_entry = _Clock::now();
245  } while (__c_entry < __atime);
246  return false;
247  }
248 
249  // Returns false iff a timeout occurred.
250  template<typename _Duration>
251  _GLIBCXX_ALWAYS_INLINE bool
252  _M_load_when_equal_until(unsigned __val, memory_order __mo,
253  const chrono::time_point<std::chrono::system_clock, _Duration>& __atime)
254  {
255  unsigned __i = _M_load(__mo);
256  if ((__i & ~_Waiter_bit) == __val)
257  return true;
258  // TODO Spin-wait first. Ignore effect on timeout.
259  __i = _M_load_and_test_until_impl(__i, __val, true, __mo, __atime);
260  return (__i & ~_Waiter_bit) == __val;
261  }
262 
263  // Returns false iff a timeout occurred.
264  template<typename _Duration>
265  _GLIBCXX_ALWAYS_INLINE bool
266  _M_load_when_equal_until(unsigned __val, memory_order __mo,
267  const chrono::time_point<std::chrono::steady_clock, _Duration>& __atime)
268  {
269  unsigned __i = _M_load(__mo);
270  if ((__i & ~_Waiter_bit) == __val)
271  return true;
272  // TODO Spin-wait first. Ignore effect on timeout.
273  __i = _M_load_and_test_until_impl(__i, __val, true, __mo, __atime);
274  return (__i & ~_Waiter_bit) == __val;
275  }
276 
277  _GLIBCXX_ALWAYS_INLINE void
278  _M_store_notify_all(unsigned __val, memory_order __mo)
279  {
280  unsigned* __futex = (unsigned *)(void *)&_M_data;
281  if (_M_data.exchange(__val, __mo) & _Waiter_bit)
282  _M_futex_notify_all(__futex);
283  }
284  };
285 
286 #else // ! (_GLIBCXX_HAVE_LINUX_FUTEX && ATOMIC_INT_LOCK_FREE > 1)
287 
288  // If futexes are not available, use a mutex and a condvar to wait.
289  // Because we access the data only within critical sections, all accesses
290  // are sequentially consistent; thus, we satisfy any provided memory_order.
291  template <unsigned _Waiter_bit = 0x80000000>
292  class __atomic_futex_unsigned
293  {
294  typedef chrono::system_clock __clock_t;
295 
296  unsigned _M_data;
297  mutex _M_mutex;
298  condition_variable _M_condvar;
299 
300  public:
301  explicit
302  __atomic_futex_unsigned(unsigned __data) : _M_data(__data)
303  { }
304 
305  _GLIBCXX_ALWAYS_INLINE unsigned
306  _M_load(memory_order __mo)
307  {
308  unique_lock<mutex> __lock(_M_mutex);
309  return _M_data;
310  }
311 
312  _GLIBCXX_ALWAYS_INLINE unsigned
313  _M_load_when_not_equal(unsigned __val, memory_order __mo)
314  {
315  unique_lock<mutex> __lock(_M_mutex);
316  while (_M_data == __val)
317  _M_condvar.wait(__lock);
318  return _M_data;
319  }
320 
321  _GLIBCXX_ALWAYS_INLINE void
322  _M_load_when_equal(unsigned __val, memory_order __mo)
323  {
324  unique_lock<mutex> __lock(_M_mutex);
325  while (_M_data != __val)
326  _M_condvar.wait(__lock);
327  }
328 
329  template<typename _Rep, typename _Period>
330  _GLIBCXX_ALWAYS_INLINE bool
331  _M_load_when_equal_for(unsigned __val, memory_order __mo,
332  const chrono::duration<_Rep, _Period>& __rtime)
333  {
334  unique_lock<mutex> __lock(_M_mutex);
335  return _M_condvar.wait_for(__lock, __rtime,
336  [&] { return _M_data == __val;});
337  }
338 
339  template<typename _Clock, typename _Duration>
340  _GLIBCXX_ALWAYS_INLINE bool
341  _M_load_when_equal_until(unsigned __val, memory_order __mo,
342  const chrono::time_point<_Clock, _Duration>& __atime)
343  {
344  unique_lock<mutex> __lock(_M_mutex);
345  return _M_condvar.wait_until(__lock, __atime,
346  [&] { return _M_data == __val;});
347  }
348 
349  _GLIBCXX_ALWAYS_INLINE void
350  _M_store_notify_all(unsigned __val, memory_order __mo)
351  {
352  unique_lock<mutex> __lock(_M_mutex);
353  _M_data = __val;
354  _M_condvar.notify_all();
355  }
356  };
357 
358 #endif // _GLIBCXX_HAVE_LINUX_FUTEX && ATOMIC_INT_LOCK_FREE > 1
359 #endif // _GLIBCXX_HAS_GTHREADS
360 
361 _GLIBCXX_END_NAMESPACE_VERSION
362 } // namespace std
363 
364 #endif
duration< int64_t > seconds
seconds
Definition: chrono.h:897
duration< int64_t, nano > nanoseconds
nanoseconds
Definition: chrono.h:888
memory_order
Enumeration for memory_order.
Definition: atomic_base.h:65
ISO C++ entities toplevel namespace is std.