Squashed 'third_party/boostorg/atomic/' content from commit 19eecf8
Change-Id: I4723a39ab79969b4c0d7b7e67a4143c4e02992ed
git-subtree-dir: third_party/boostorg/atomic
git-subtree-split: 19eecf893c665410de63ab6ebb8549f405703e80
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000..3e84d7c
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,96 @@
+* text=auto !eol svneol=native#text/plain
+*.gitattributes text svneol=native#text/plain
+
+# Scriptish formats
+*.bat text svneol=native#text/plain
+*.bsh text svneol=native#text/x-beanshell
+*.cgi text svneol=native#text/plain
+*.cmd text svneol=native#text/plain
+*.js text svneol=native#text/javascript
+*.php text svneol=native#text/x-php
+*.pl text svneol=native#text/x-perl
+*.pm text svneol=native#text/x-perl
+*.py text svneol=native#text/x-python
+*.sh eol=lf svneol=LF#text/x-sh
+configure eol=lf svneol=LF#text/x-sh
+
+# Image formats
+*.bmp binary svneol=unset#image/bmp
+*.gif binary svneol=unset#image/gif
+*.ico binary svneol=unset#image/ico
+*.jpeg binary svneol=unset#image/jpeg
+*.jpg binary svneol=unset#image/jpeg
+*.png binary svneol=unset#image/png
+*.tif binary svneol=unset#image/tiff
+*.tiff binary svneol=unset#image/tiff
+*.svg text svneol=native#image/svg%2Bxml
+
+# Data formats
+*.pdf binary svneol=unset#application/pdf
+*.avi binary svneol=unset#video/avi
+*.doc binary svneol=unset#application/msword
+*.dsp text svneol=crlf#text/plain
+*.dsw text svneol=crlf#text/plain
+*.eps binary svneol=unset#application/postscript
+*.gz binary svneol=unset#application/gzip
+*.mov binary svneol=unset#video/quicktime
+*.mp3 binary svneol=unset#audio/mpeg
+*.ppt binary svneol=unset#application/vnd.ms-powerpoint
+*.ps binary svneol=unset#application/postscript
+*.psd binary svneol=unset#application/photoshop
+*.rdf binary svneol=unset#text/rdf
+*.rss text svneol=unset#text/xml
+*.rtf binary svneol=unset#text/rtf
+*.sln text svneol=native#text/plain
+*.swf binary svneol=unset#application/x-shockwave-flash
+*.tgz binary svneol=unset#application/gzip
+*.vcproj text svneol=native#text/xml
+*.vcxproj text svneol=native#text/xml
+*.vsprops text svneol=native#text/xml
+*.wav binary svneol=unset#audio/wav
+*.xls binary svneol=unset#application/vnd.ms-excel
+*.zip binary svneol=unset#application/zip
+
+# Text formats
+.htaccess text svneol=native#text/plain
+*.bbk text svneol=native#text/xml
+*.cmake text svneol=native#text/plain
+*.css text svneol=native#text/css
+*.dtd text svneol=native#text/xml
+*.htm text svneol=native#text/html
+*.html text svneol=native#text/html
+*.ini text svneol=native#text/plain
+*.log text svneol=native#text/plain
+*.mak text svneol=native#text/plain
+*.qbk text svneol=native#text/plain
+*.rst text svneol=native#text/plain
+*.sql text svneol=native#text/x-sql
+*.txt text svneol=native#text/plain
+*.xhtml text svneol=native#text/xhtml%2Bxml
+*.xml text svneol=native#text/xml
+*.xsd text svneol=native#text/xml
+*.xsl text svneol=native#text/xml
+*.xslt text svneol=native#text/xml
+*.xul text svneol=native#text/xul
+*.yml text svneol=native#text/plain
+boost-no-inspect text svneol=native#text/plain
+CHANGES text svneol=native#text/plain
+COPYING text svneol=native#text/plain
+INSTALL text svneol=native#text/plain
+Jamfile text svneol=native#text/plain
+Jamroot text svneol=native#text/plain
+Jamfile.v2 text svneol=native#text/plain
+Jamrules text svneol=native#text/plain
+Makefile* text svneol=native#text/plain
+README text svneol=native#text/plain
+TODO text svneol=native#text/plain
+
+# Code formats
+*.c text svneol=native#text/plain
+*.cpp text svneol=native#text/plain
+*.h text svneol=native#text/plain
+*.hpp text svneol=native#text/plain
+*.ipp text svneol=native#text/plain
+*.tpp text svneol=native#text/plain
+*.jam text svneol=native#text/plain
+*.java text svneol=native#text/plain
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..3d81714
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,715 @@
+# Copyright 2016, 2017 Peter Dimov
+# Distributed under the Boost Software License, Version 1.0.
+# (See accompanying file LICENSE_1_0.txt or copy at http://boost.org/LICENSE_1_0.txt)
+
+language: cpp
+
+sudo: false
+
+python: "2.7"
+
+os:
+ - linux
+ - osx
+
+branches:
+ only:
+ - master
+ - develop
+
+env:
+ matrix:
+ - BOGUS_JOB=true
+
+matrix:
+
+ exclude:
+ - env: BOGUS_JOB=true
+
+ include:
+# gcc, Linux, 64-bit
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++ CXXSTD=c++03 ADDRESS_MODEL=64
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-4.7 CXXSTD=c++03 ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - g++-4.7
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-4.7 CXXSTD=c++11 ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - g++-4.7
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-4.8 CXXSTD=c++03 ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - g++-4.8
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-4.8 CXXSTD=c++11 ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - g++-4.8
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-4.9 CXXSTD=c++03 ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - g++-4.9
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-4.9 CXXSTD=c++11 ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - g++-4.9
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-5 CXXSTD=c++03 ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - g++-5
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-5 CXXSTD=c++11 ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - g++-5
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-5 CXXSTD=c++14 ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - g++-5
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-6 CXXSTD=c++03 ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - g++-6
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-6 CXXSTD=c++11 ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - g++-6
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-6 CXXSTD=c++14 ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - g++-6
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-6 CXXSTD=c++1z ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - g++-6
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-7 CXXSTD=c++03 ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - g++-7
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-7 CXXSTD=c++11 ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - g++-7
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-7 CXXSTD=c++14 ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - g++-7
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-7 CXXSTD=c++1z ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - g++-7
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-7 CXXSTD=gnu++03 ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - g++-7
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-7 CXXSTD=gnu++11 ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - g++-7
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-7 CXXSTD=gnu++14 ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - g++-7
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-7 CXXSTD=gnu++1z ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - g++-7
+ sources:
+ - ubuntu-toolchain-r-test
+
+# clang, Linux, 64-bit
+ - os: linux
+ env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++03 ADDRESS_MODEL=64
+
+ - os: linux
+ env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++11 ADDRESS_MODEL=64
+
+ - os: linux
+ env: TOOLSET=clang COMPILER=clang++-3.5 CXXSTD=c++03 ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - clang-3.5
+ sources:
+ - ubuntu-toolchain-r-test
+ - llvm-toolchain-precise-3.5
+
+ - os: linux
+ env: TOOLSET=clang COMPILER=clang++-3.5 CXXSTD=c++11 ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - clang-3.5
+ sources:
+ - ubuntu-toolchain-r-test
+ - llvm-toolchain-precise-3.5
+
+ - os: linux
+ env: TOOLSET=clang COMPILER=clang++-3.6 CXXSTD=c++03 ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - clang-3.6
+ sources:
+ - ubuntu-toolchain-r-test
+ - llvm-toolchain-precise-3.6
+
+ - os: linux
+ env: TOOLSET=clang COMPILER=clang++-3.6 CXXSTD=c++11 ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - clang-3.6
+ sources:
+ - ubuntu-toolchain-r-test
+ - llvm-toolchain-precise-3.6
+
+ - os: linux
+ env: TOOLSET=clang COMPILER=clang++-3.7 CXXSTD=c++03 ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - clang-3.7
+ sources:
+ - ubuntu-toolchain-r-test
+ - llvm-toolchain-precise-3.7
+
+ - os: linux
+ env: TOOLSET=clang COMPILER=clang++-3.7 CXXSTD=c++11 ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - clang-3.7
+ sources:
+ - ubuntu-toolchain-r-test
+ - llvm-toolchain-precise-3.7
+
+ - os: linux
+ env: TOOLSET=clang COMPILER=clang++-3.8 CXXSTD=c++03 ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - clang-3.8
+ sources:
+ - ubuntu-toolchain-r-test
+ - llvm-toolchain-precise-3.8
+
+ - os: linux
+ env: TOOLSET=clang COMPILER=clang++-3.8 CXXSTD=c++11 ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - clang-3.8
+ sources:
+ - ubuntu-toolchain-r-test
+ - llvm-toolchain-precise-3.8
+
+ - os: linux
+ env: TOOLSET=clang COMPILER=clang++-3.8 CXXSTD=c++14 ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - clang-3.8
+ sources:
+ - ubuntu-toolchain-r-test
+ - llvm-toolchain-precise-3.8
+
+ - os: linux
+ env: TOOLSET=clang COMPILER=clang++-3.8 CXXSTD=c++1z ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - clang-3.8
+ sources:
+ - ubuntu-toolchain-r-test
+ - llvm-toolchain-precise-3.8
+
+ - os: linux
+ env: TOOLSET=clang COMPILER=clang++-3.9 CXXSTD=c++03 ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - clang-3.9
+ sources:
+ - ubuntu-toolchain-r-test
+ - llvm-toolchain-trusty-3.9
+
+ - os: linux
+ env: TOOLSET=clang COMPILER=clang++-3.9 CXXSTD=c++11 ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - clang-3.9
+ sources:
+ - ubuntu-toolchain-r-test
+ - llvm-toolchain-trusty-3.9
+
+ - os: linux
+ env: TOOLSET=clang COMPILER=clang++-3.9 CXXSTD=c++14 ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - clang-3.9
+ sources:
+ - ubuntu-toolchain-r-test
+ - llvm-toolchain-trusty-3.9
+
+ - os: linux
+ env: TOOLSET=clang COMPILER=clang++-3.9 CXXSTD=c++1z ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - clang-3.9
+ sources:
+ - ubuntu-toolchain-r-test
+ - llvm-toolchain-trusty-3.9
+
+ - os: linux
+ env: TOOLSET=clang COMPILER=clang++-4.0 CXXSTD=c++03 ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - clang-4.0
+ sources:
+ - ubuntu-toolchain-r-test
+ - llvm-toolchain-trusty-4.0
+
+ - os: linux
+ env: TOOLSET=clang COMPILER=clang++-4.0 CXXSTD=c++11 ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - clang-4.0
+ sources:
+ - ubuntu-toolchain-r-test
+ - llvm-toolchain-trusty-4.0
+
+ - os: linux
+ env: TOOLSET=clang COMPILER=clang++-4.0 CXXSTD=c++14 ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - clang-4.0
+ sources:
+ - ubuntu-toolchain-r-test
+ - llvm-toolchain-trusty-4.0
+
+ - os: linux
+ env: TOOLSET=clang COMPILER=clang++-4.0 CXXSTD=c++1z ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - clang-4.0
+ sources:
+ - ubuntu-toolchain-r-test
+ - llvm-toolchain-trusty-4.0
+
+ - os: linux
+ env: TOOLSET=clang COMPILER=clang++-5.0 CXXSTD=c++03 ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - clang-5.0
+ sources:
+ - ubuntu-toolchain-r-test
+ - llvm-toolchain-trusty-5.0
+
+ - os: linux
+ env: TOOLSET=clang COMPILER=clang++-5.0 CXXSTD=c++11 ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - clang-5.0
+ sources:
+ - ubuntu-toolchain-r-test
+ - llvm-toolchain-trusty-5.0
+
+ - os: linux
+ env: TOOLSET=clang COMPILER=clang++-5.0 CXXSTD=c++14 ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - clang-5.0
+ sources:
+ - ubuntu-toolchain-r-test
+ - llvm-toolchain-trusty-5.0
+
+ - os: linux
+ env: TOOLSET=clang COMPILER=clang++-5.0 CXXSTD=c++1z ADDRESS_MODEL=64
+ addons:
+ apt:
+ packages:
+ - clang-5.0
+ sources:
+ - ubuntu-toolchain-r-test
+ - llvm-toolchain-trusty-5.0
+
+# Travis CI capacity of OS X testers is insufficient, tests disabled until Travis CI capacity is increased
+# clang, OS X, 64-bit
+# - os: osx
+# env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++03 ADDRESS_MODEL=64
+# osx_image: xcode8.3
+#
+# - os: osx
+# env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++11 ADDRESS_MODEL=64
+# osx_image: xcode8.3
+#
+# - os: osx
+# env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++14 ADDRESS_MODEL=64
+# osx_image: xcode8.3
+#
+# - os: osx
+# env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++1z ADDRESS_MODEL=64
+# osx_image: xcode8.3
+#
+# - os: osx
+# env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++03 ADDRESS_MODEL=64
+# osx_image: xcode8.2
+#
+# - os: osx
+# env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++11 ADDRESS_MODEL=64
+# osx_image: xcode8.2
+#
+# - os: osx
+# env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++14 ADDRESS_MODEL=64
+# osx_image: xcode8.2
+#
+# - os: osx
+# env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++1z ADDRESS_MODEL=64
+# osx_image: xcode8.1
+#
+# - os: osx
+# env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++03 ADDRESS_MODEL=64
+# osx_image: xcode8.1
+#
+# - os: osx
+# env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++11 ADDRESS_MODEL=64
+# osx_image: xcode8.1
+#
+# - os: osx
+# env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++14 ADDRESS_MODEL=64
+# osx_image: xcode8.1
+#
+# - os: osx
+# env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++1z ADDRESS_MODEL=64
+# osx_image: xcode8.1
+#
+# - os: osx
+# env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++03 ADDRESS_MODEL=64
+# osx_image: xcode8.0
+#
+# - os: osx
+# env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++11 ADDRESS_MODEL=64
+# osx_image: xcode8.0
+#
+# - os: osx
+# env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++14 ADDRESS_MODEL=64
+# osx_image: xcode8.0
+#
+# - os: osx
+# env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++1z ADDRESS_MODEL=64
+# osx_image: xcode8.0
+#
+# - os: osx
+# env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++03 ADDRESS_MODEL=64
+# osx_image: xcode7.3
+#
+# - os: osx
+# env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++11 ADDRESS_MODEL=64
+# osx_image: xcode7.3
+#
+# - os: osx
+# env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++14 ADDRESS_MODEL=64
+# osx_image: xcode7.3
+#
+# - os: osx
+# env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++1z ADDRESS_MODEL=64
+# osx_image: xcode7.3
+#
+# - os: osx
+# env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++03 ADDRESS_MODEL=64
+# osx_image: xcode6.4
+#
+# - os: osx
+# env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++11 ADDRESS_MODEL=64
+# osx_image: xcode6.4
+#
+# - os: osx
+# env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++14 ADDRESS_MODEL=64
+# osx_image: xcode6.4
+#
+# - os: osx
+# env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++1z ADDRESS_MODEL=64
+# osx_image: xcode6.4
+
+# gcc, Linux, 32-bit
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-4.7 CXXSTD=c++11 ADDRESS_MODEL=32
+ addons:
+ apt:
+ packages:
+ - g++-4.7-multilib
+ - linux-libc-dev:i386
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-4.8 CXXSTD=c++11 ADDRESS_MODEL=32
+ addons:
+ apt:
+ packages:
+ - g++-4.8-multilib
+ - linux-libc-dev:i386
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-4.9 CXXSTD=c++11 ADDRESS_MODEL=32
+ addons:
+ apt:
+ packages:
+ - g++-4.9-multilib
+ - linux-libc-dev:i386
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-5 CXXSTD=c++11 ADDRESS_MODEL=32
+ addons:
+ apt:
+ packages:
+ - g++-5-multilib
+ - linux-libc-dev:i386
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-6 CXXSTD=c++11 ADDRESS_MODEL=32
+ addons:
+ apt:
+ packages:
+ - g++-6-multilib
+ - linux-libc-dev:i386
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-7 CXXSTD=c++11 ADDRESS_MODEL=32
+ addons:
+ apt:
+ packages:
+ - g++-7-multilib
+ - linux-libc-dev:i386
+ sources:
+ - ubuntu-toolchain-r-test
+
+# clang, Linux, 32-bit
+ - os: linux
+ env: TOOLSET=clang COMPILER=clang++-3.5 CXXSTD=c++11 ADDRESS_MODEL=32
+ addons:
+ apt:
+ packages:
+ - clang-3.5
+ - g++-4.8-multilib
+ - linux-libc-dev:i386
+ sources:
+ - ubuntu-toolchain-r-test
+ - llvm-toolchain-precise-3.5
+
+ - os: linux
+ env: TOOLSET=clang COMPILER=clang++-3.6 CXXSTD=c++11 ADDRESS_MODEL=32
+ addons:
+ apt:
+ packages:
+ - clang-3.6
+ - g++-4.8-multilib
+ - linux-libc-dev:i386
+ sources:
+ - ubuntu-toolchain-r-test
+ - llvm-toolchain-precise-3.6
+
+ - os: linux
+ env: TOOLSET=clang COMPILER=clang++-3.7 CXXSTD=c++11 ADDRESS_MODEL=32
+ addons:
+ apt:
+ packages:
+ - clang-3.7
+ - g++-4.8-multilib
+ - linux-libc-dev:i386
+ sources:
+ - ubuntu-toolchain-r-test
+ - llvm-toolchain-precise-3.7
+
+ - os: linux
+ env: TOOLSET=clang COMPILER=clang++-3.8 CXXSTD=c++11 ADDRESS_MODEL=32
+ addons:
+ apt:
+ packages:
+ - clang-3.8
+ - g++-4.8-multilib
+ - linux-libc-dev:i386
+ sources:
+ - ubuntu-toolchain-r-test
+ - llvm-toolchain-precise-3.8
+
+ - os: linux
+ env: TOOLSET=clang COMPILER=clang++-3.9 CXXSTD=c++11 ADDRESS_MODEL=32
+ addons:
+ apt:
+ packages:
+ - clang-3.9
+ - g++-4.8-multilib
+ - linux-libc-dev:i386
+ sources:
+ - ubuntu-toolchain-r-test
+ - llvm-toolchain-trusty-3.9
+
+ - os: linux
+ env: TOOLSET=clang COMPILER=clang++-4.0 CXXSTD=c++11 ADDRESS_MODEL=32
+ addons:
+ apt:
+ packages:
+ - clang-4.0
+ - g++-4.8-multilib
+ - linux-libc-dev:i386
+ sources:
+ - ubuntu-toolchain-r-test
+ - llvm-toolchain-trusty-4.0
+
+ - os: linux
+ env: TOOLSET=clang COMPILER=clang++-5.0 CXXSTD=c++11 ADDRESS_MODEL=32
+ addons:
+ apt:
+ packages:
+ - clang-5.0
+ - g++-4.8-multilib
+ - linux-libc-dev:i386
+ sources:
+ - ubuntu-toolchain-r-test
+ - llvm-toolchain-trusty-5.0
+
+
+install:
+ - cd ..
+ - git clone -b $TRAVIS_BRANCH --depth 1 https://github.com/boostorg/boost.git boost-root
+ - cd boost-root
+ - git submodule update --init tools/boostdep
+ - git submodule update --init tools/build
+ - git submodule update --init tools/inspect
+ - git submodule update --init libs/config
+ - cp -r $TRAVIS_BUILD_DIR/* libs/atomic
+ - python tools/boostdep/depinst/depinst.py atomic
+ - ./bootstrap.sh
+ - ./b2 headers
+
+script:
+ - |-
+ echo "using $TOOLSET : : $COMPILER : <cxxflags>-std=$CXXSTD ;" > ~/user-config.jam
+ - ./b2 -j3 libs/atomic/test toolset=$TOOLSET address-model=$ADDRESS_MODEL
+
+notifications:
+ email:
+ on_success: always
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..3eb8875
--- /dev/null
+++ b/README.md
@@ -0,0 +1,26 @@
+# 
+
+Boost.Atomic, part of collection of the [Boost C++ Libraries](http://github.com/boostorg), implements atomic operations for various CPU architectures, reflecting and extending the standard interface defined in C++11.
+
+### Directories
+
+* **build** - Boost.Atomic build scripts
+* **doc** - QuickBook documentation sources
+* **include** - Interface headers of Boost.Atomic
+* **src** - Compilable source code of Boost.Atomic
+* **test** - Boost.Atomic unit tests
+
+### More information
+
+* [Documentation](http://boost.org/libs/atomic)
+* [Report bugs](https://svn.boost.org/trac/boost/newticket?component=atomic;version=Boost%20Release%20Branch). Be sure to mention Boost version, platform and compiler you're using. A small compilable code sample to reproduce the problem is always good as well.
+* Submit your patches as pull requests against **develop** branch. Note that by submitting patches you agree to license your modifications under the [Boost Software License, Version 1.0](http://www.boost.org/LICENSE_1_0.txt).
+
+### Build status
+
+Master: [](https://ci.appveyor.com/project/Lastique/atomic/branch/master) [](https://travis-ci.org/boostorg/atomic)
+Develop: [](https://ci.appveyor.com/project/Lastique/atomic/branch/develop) [](https://travis-ci.org/boostorg/atomic)
+
+### License
+
+Distributed under the [Boost Software License, Version 1.0](http://www.boost.org/LICENSE_1_0.txt).
diff --git a/appveyor.yml b/appveyor.yml
new file mode 100644
index 0000000..072410f
--- /dev/null
+++ b/appveyor.yml
@@ -0,0 +1,144 @@
+# Copyright 2016, 2017 Peter Dimov
+# Distributed under the Boost Software License, Version 1.0.
+# (See accompanying file LICENSE_1_0.txt or copy at http://boost.org/LICENSE_1_0.txt)
+
+version: 1.0.{build}-{branch}
+
+shallow_clone: true
+
+branches:
+ only:
+ - master
+ - develop
+
+environment:
+ matrix:
+# AppVeyor doesn't provide 64-bit compilers for these MSVC versions
+# - TOOLSET: msvc-9.0
+# ADDRESS_MODEL: 64
+# APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
+# - TOOLSET: msvc-10.0
+# ADDRESS_MODEL: 64
+# APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
+# - TOOLSET: msvc-11.0
+# ADDRESS_MODEL: 64
+# APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
+ - TOOLSET: msvc-12.0
+ ADDRESS_MODEL: 64
+ APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
+ - TOOLSET: msvc-14.0
+ ADDRESS_MODEL: 64
+ APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
+ - TOOLSET: msvc-14.1
+ ADDRESS_MODEL: 64
+ APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
+# Boost.Thread does not compile for Cygwin
+# - TOOLSET: gcc
+# ADDRESS_MODEL: 64
+# B2_ARGS: cxxflags=-std=c++03
+# ADDPATH: C:\cygwin64\bin
+# APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
+# - TOOLSET: gcc
+# ADDRESS_MODEL: 64
+# B2_ARGS: cxxflags=-std=c++11
+# ADDPATH: C:\cygwin64\bin
+# APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
+ - TOOLSET: gcc
+ ADDRESS_MODEL: 64
+ B2_ARGS: cxxflags=-std=c++03
+ ADDPATH: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin
+ APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
+ - TOOLSET: gcc
+ ADDRESS_MODEL: 64
+ B2_ARGS: cxxflags=-std=c++11
+ ADDPATH: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin
+ APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
+ - TOOLSET: gcc
+ ADDRESS_MODEL: 64
+ B2_ARGS: cxxflags=-std=gnu++03
+ ADDPATH: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin
+ APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
+ - TOOLSET: gcc
+ ADDRESS_MODEL: 64
+ B2_ARGS: cxxflags=-std=gnu++11
+ ADDPATH: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin
+ APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
+
+ - TOOLSET: msvc-9.0
+ ADDRESS_MODEL: 32
+ APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
+ - TOOLSET: msvc-10.0
+ ADDRESS_MODEL: 32
+ APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
+ - TOOLSET: msvc-11.0
+ ADDRESS_MODEL: 32
+ APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
+ - TOOLSET: msvc-12.0
+ ADDRESS_MODEL: 32
+ APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
+ - TOOLSET: msvc-14.0
+ ADDRESS_MODEL: 32
+ APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
+ - TOOLSET: msvc-14.1
+ ADDRESS_MODEL: 32
+ APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
+# Boost.Thread does not compile for Cygwin
+# - TOOLSET: gcc
+# ADDRESS_MODEL: 32
+# B2_ARGS: cxxflags=-std=c++03
+# ADDPATH: C:\cygwin\bin
+# APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
+# - TOOLSET: gcc
+# ADDRESS_MODEL: 32
+# B2_ARGS: cxxflags=-std=c++11
+# ADDPATH: C:\cygwin\bin
+# APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
+ - TOOLSET: gcc
+ ADDRESS_MODEL: 32
+ B2_ARGS: cxxflags=-std=c++03
+ ADDPATH: C:\mingw\bin
+ APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
+ - TOOLSET: gcc
+ ADDRESS_MODEL: 32
+ B2_ARGS: cxxflags=-std=c++11
+ ADDPATH: C:\mingw\bin
+ APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
+# AppVeyor doesn't provide 32-bit compilers for MinGW-w64
+# - TOOLSET: gcc
+# ADDRESS_MODEL: 32
+# B2_ARGS: cxxflags=-std=c++03
+# ADDPATH: C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\bin
+# APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
+# - TOOLSET: gcc
+# ADDRESS_MODEL: 32
+# B2_ARGS: cxxflags=-std=c++11
+# ADDPATH: C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\bin
+# APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
+# - TOOLSET: gcc
+# ADDRESS_MODEL: 32
+# B2_ARGS: cxxflags=-std=gnu++03
+# ADDPATH: C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\bin
+# APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
+# - TOOLSET: gcc
+# ADDRESS_MODEL: 32
+# B2_ARGS: cxxflags=-std=gnu++11
+# ADDPATH: C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\bin
+# APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
+
+install:
+ - cd ..
+ - git clone -b %APPVEYOR_REPO_BRANCH% https://github.com/boostorg/boost.git boost-root
+ - cd boost-root
+ - git submodule update --init tools/boostdep
+ - git submodule update --init tools/build
+ - git submodule update --init tools/inspect
+ - xcopy /s /e /q %APPVEYOR_BUILD_FOLDER% libs\atomic
+ - python tools/boostdep/depinst/depinst.py atomic
+ - cmd /c bootstrap
+ - b2 headers
+
+build: off
+
+test_script:
+ - PATH=%ADDPATH%;%PATH%
+ - b2 libs/atomic/test variant=release toolset=%TOOLSET% address-model=%ADDRESS_MODEL% %B2_ARGS%
diff --git a/build/Jamfile.v2 b/build/Jamfile.v2
new file mode 100644
index 0000000..70e7720
--- /dev/null
+++ b/build/Jamfile.v2
@@ -0,0 +1,37 @@
+# Boost.Atomic Library Jamfile
+#
+# Copyright Helge Bahmann 2011.
+# Distributed under the Boost Software License, Version 1.0.
+# (See accompanying file LICENSE_1_0.txt or copy at
+# http://www.boost.org/LICENSE_1_0.txt)
+
+import common ;
+
+project boost/atomic
+ : requirements
+ <threading>multi
+ <link>shared:<define>BOOST_ATOMIC_DYN_LINK=1
+ <link>static:<define>BOOST_ATOMIC_STATIC_LINK=1
+ <define>BOOST_ATOMIC_SOURCE
+ <target-os>windows:<define>BOOST_USE_WINDOWS_H
+ <target-os>windows:<define>_WIN32_WINNT=0x0500
+ <toolset>gcc,<target-os>windows:<linkflags>"-lkernel32"
+ : usage-requirements
+ <link>shared:<define>BOOST_ATOMIC_DYN_LINK=1
+ <link>static:<define>BOOST_ATOMIC_STATIC_LINK=1
+ : source-location ../src
+ ;
+
+alias atomic_sources
+ : lockpool.cpp
+ ;
+
+explicit atomic_sources ;
+
+
+lib boost_atomic
+ : atomic_sources
+ ;
+
+
+boost-install boost_atomic ;
diff --git a/doc/Jamfile.v2 b/doc/Jamfile.v2
new file mode 100644
index 0000000..16bd022
--- /dev/null
+++ b/doc/Jamfile.v2
@@ -0,0 +1,36 @@
+# Boost.Atomic library documentation Jamfile
+#
+# Copyright Helge Bahmann 2011.
+# Copyright Tim Blechmann 2012.
+# Distributed under the Boost Software License, Version 1.0.
+# (See accompanying file LICENSE_1_0.txt or copy at
+# http://www.boost.org/LICENSE_1_0.txt)
+
+import quickbook ;
+import boostbook : boostbook ;
+
+xml atomic : atomic.qbk ;
+
+boostbook standalone
+ : atomic
+ : <xsl:param>boost.root=../../../..
+ <xsl:param>boost.libraries=../../../libraries.htm
+ <format>pdf:<xsl:param>boost.url.prefix=http://www.boost.org/doc/libs/release/libs/atomic/doc/html
+ ;
+
+install css : [ glob $(BOOST_ROOT)/doc/src/*.css ]
+ : <location>html ;
+install images : [ glob $(BOOST_ROOT)/doc/src/images/*.png ]
+ : <location>html/images ;
+explicit css ;
+explicit images ;
+
+###############################################################################
+alias boostdoc
+ : atomic
+ :
+ :
+ : ;
+explicit boostdoc ;
+alias boostrelease ;
+explicit boostrelease ;
diff --git a/doc/atomic.qbk b/doc/atomic.qbk
new file mode 100644
index 0000000..f48265b
--- /dev/null
+++ b/doc/atomic.qbk
@@ -0,0 +1,1279 @@
+[/
+ / Copyright (c) 2009 Helge Bahmann
+ / Copyright (c) 2014, 2017, 2018 Andrey Semashev
+ /
+ / Distributed under the Boost Software License, Version 1.0. (See accompanying
+ / file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+ /]
+
+[library Boost.Atomic
+ [quickbook 1.4]
+ [authors [Bahmann, Helge][Semashev, Andrey]]
+ [copyright 2011 Helge Bahmann]
+ [copyright 2012 Tim Blechmann]
+ [copyright 2013, 2017, 2018 Andrey Semashev]
+ [id atomic]
+ [dirname atomic]
+ [purpose Atomic operations]
+ [license
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ [@http://www.boost.org/LICENSE_1_0.txt])
+ ]
+]
+
+[section:introduction Introduction]
+
+[section:introduction_presenting Presenting Boost.Atomic]
+
+[*Boost.Atomic] is a library that provides [^atomic]
+data types and operations on these data types, as well as memory
+ordering constraints required for coordinating multiple threads through
+atomic variables. It implements the interface as defined by the C++11
+standard, but makes this feature available for platforms lacking
+system/compiler support for this particular C++11 feature.
+
+Users of this library should already be familiar with concurrency
+in general, as well as elementary concepts such as "mutual exclusion".
+
+The implementation makes use of processor-specific instructions where
+possible (via inline assembler, platform libraries or compiler
+intrinsics), and falls back to "emulating" atomic operations through
+locking.
+
+[endsect]
+
+[section:introduction_purpose Purpose]
+
+Operations on "ordinary" variables are not guaranteed to be atomic.
+This means that with [^int n=0] initially, two threads concurrently
+executing
+
+[c++]
+
+ void function()
+ {
+ n ++;
+ }
+
+might result in [^n==1] instead of 2: Each thread will read the
+old value into a processor register, increment it and write the result
+back. Both threads may therefore write [^1], unaware that the other thread
+is doing likewise.
+
+Declaring [^atomic<int> n=0] instead, the same operation on
+this variable will always result in [^n==2] as each operation on this
+variable is ['atomic]: This means that each operation behaves as if it
+were strictly sequentialized with respect to the other.
+
+Atomic variables are useful for two purposes:
+
+* as a means for coordinating multiple threads via custom
+ coordination protocols
+* as faster alternatives to "locked" access to simple variables
+
+Take a look at the [link atomic.usage_examples examples] section
+for common patterns.
+
+[endsect]
+
+[endsect]
+
+[section:thread_coordination Thread coordination using Boost.Atomic]
+
+The most common use of [*Boost.Atomic] is to realize custom
+thread synchronization protocols: The goal is to coordinate
+accesses of threads to shared variables in order to avoid
+"conflicts". The
+programmer must be aware of the fact that
+compilers, CPUs and the cache
+hierarchies may generally reorder memory references at will.
+As a consequence a program such as:
+
+[c++]
+
+ int x = 0, int y = 0;
+
+ thread1:
+ x = 1;
+ y = 1;
+
+ thread2
+ if (y == 1) {
+ assert(x == 1);
+ }
+
+might indeed fail as there is no guarantee that the read of `x`
+by thread2 "sees" the write by thread1.
+
+[*Boost.Atomic] uses a synchronisation concept based on the
+['happens-before] relation to describe the guarantees under
+which situations such as the above one cannot occur.
+
+The remainder of this section will discuss ['happens-before] in
+a "hands-on" way instead of giving a fully formalized definition.
+The reader is encouraged to additionally have a
+look at the discussion of the correctness of a few of the
+[link atomic.usage_examples examples] afterwards.
+
+[section:mutex Enforcing ['happens-before] through mutual exclusion]
+
+As an introductory example to understand how arguing using
+['happens-before] works, consider two threads synchronizing
+using a common mutex:
+
+[c++]
+
+ mutex m;
+
+ thread1:
+ m.lock();
+ ... /* A */
+ m.unlock();
+
+ thread2:
+ m.lock();
+ ... /* B */
+ m.unlock();
+
+The "lockset-based intuition" would be to argue that A and B
+cannot be executed concurrently as the code paths require a
+common lock to be held.
+
+One can however also arrive at the same conclusion using
+['happens-before]: Either thread1 or thread2 will succeed first
+at [^m.lock()]. If this is be thread1, then as a consequence,
+thread2 cannot succeed at [^m.lock()] before thread1 has executed
+[^m.unlock()], consequently A ['happens-before] B in this case.
+By symmetry, if thread2 succeeds at [^m.lock()] first, we can
+conclude B ['happens-before] A.
+
+Since this already exhausts all options, we can conclude that
+either A ['happens-before] B or B ['happens-before] A must
+always hold. Obviously cannot state ['which] of the two relationships
+holds, but either one is sufficient to conclude that A and B
+cannot conflict.
+
+Compare the [link boost_atomic.usage_examples.example_spinlock spinlock]
+implementation to see how the mutual exclusion concept can be
+mapped to [*Boost.Atomic].
+
+[endsect]
+
+[section:release_acquire ['happens-before] through [^release] and [^acquire]]
+
+The most basic pattern for coordinating threads via [*Boost.Atomic]
+uses [^release] and [^acquire] on an atomic variable for coordination: If ...
+
+* ... thread1 performs an operation A,
+* ... thread1 subsequently writes (or atomically
+ modifies) an atomic variable with [^release] semantic,
+* ... thread2 reads (or atomically reads-and-modifies)
+ the value this value from the same atomic variable with
+ [^acquire] semantic and
+* ... thread2 subsequently performs an operation B,
+
+... then A ['happens-before] B.
+
+Consider the following example
+
+[c++]
+
+ atomic<int> a(0);
+
+ thread1:
+ ... /* A */
+ a.fetch_add(1, memory_order_release);
+
+ thread2:
+ int tmp = a.load(memory_order_acquire);
+ if (tmp == 1) {
+ ... /* B */
+ } else {
+ ... /* C */
+ }
+
+In this example, two avenues for execution are possible:
+
+* The [^store] operation by thread1 precedes the [^load] by thread2:
+ In this case thread2 will execute B and "A ['happens-before] B"
+ holds as all of the criteria above are satisfied.
+* The [^load] operation by thread2 precedes the [^store] by thread1:
+ In this case, thread2 will execute C, but "A ['happens-before] C"
+ does ['not] hold: thread2 does not read the value written by
+ thread1 through [^a].
+
+Therefore, A and B cannot conflict, but A and C ['can] conflict.
+
+[endsect]
+
+[section:fences Fences]
+
+Ordering constraints are generally specified together with an access to
+an atomic variable. It is however also possible to issue "fence"
+operations in isolation, in this case the fence operates in
+conjunction with preceding (for `acquire`, `consume` or `seq_cst`
+operations) or succeeding (for `release` or `seq_cst`) atomic
+operations.
+
+The example from the previous section could also be written in
+the following way:
+
+[c++]
+
+ atomic<int> a(0);
+
+ thread1:
+ ... /* A */
+ atomic_thread_fence(memory_order_release);
+ a.fetch_add(1, memory_order_relaxed);
+
+ thread2:
+ int tmp = a.load(memory_order_relaxed);
+ if (tmp == 1) {
+ atomic_thread_fence(memory_order_acquire);
+ ... /* B */
+ } else {
+ ... /* C */
+ }
+
+This provides the same ordering guarantees as previously, but
+elides a (possibly expensive) memory ordering operation in
+the case C is executed.
+
+[endsect]
+
+[section:release_consume ['happens-before] through [^release] and [^consume]]
+
+The second pattern for coordinating threads via [*Boost.Atomic]
+uses [^release] and [^consume] on an atomic variable for coordination: If ...
+
+* ... thread1 performs an operation A,
+* ... thread1 subsequently writes (or atomically modifies) an
+ atomic variable with [^release] semantic,
+* ... thread2 reads (or atomically reads-and-modifies)
+ the value this value from the same atomic variable with [^consume] semantic and
+* ... thread2 subsequently performs an operation B that is ['computationally
+ dependent on the value of the atomic variable],
+
+... then A ['happens-before] B.
+
+Consider the following example
+
+[c++]
+
+ atomic<int> a(0);
+ complex_data_structure data[2];
+
+ thread1:
+ data[1] = ...; /* A */
+ a.store(1, memory_order_release);
+
+ thread2:
+ int index = a.load(memory_order_consume);
+ complex_data_structure tmp = data[index]; /* B */
+
+In this example, two avenues for execution are possible:
+
+* The [^store] operation by thread1 precedes the [^load] by thread2:
+ In this case thread2 will read [^data\[1\]] and "A ['happens-before] B"
+ holds as all of the criteria above are satisfied.
+* The [^load] operation by thread2 precedes the [^store] by thread1:
+ In this case thread2 will read [^data\[0\]] and "A ['happens-before] B"
+ does ['not] hold: thread2 does not read the value written by
+ thread1 through [^a].
+
+Here, the ['happens-before] relationship helps ensure that any
+accesses (presumable writes) to [^data\[1\]] by thread1 happen before
+before the accesses (presumably reads) to [^data\[1\]] by thread2:
+Lacking this relationship, thread2 might see stale/inconsistent
+data.
+
+Note that in this example, the fact that operation B is computationally
+dependent on the atomic variable, therefore the following program would
+be erroneous:
+
+[c++]
+
+ atomic<int> a(0);
+ complex_data_structure data[2];
+
+ thread1:
+ data[1] = ...; /* A */
+ a.store(1, memory_order_release);
+
+ thread2:
+ int index = a.load(memory_order_consume);
+ complex_data_structure tmp;
+ if (index == 0)
+ tmp = data[0];
+ else
+ tmp = data[1];
+
+[^consume] is most commonly (and most safely! see
+[link atomic.limitations limitations]) used with
+pointers, compare for example the
+[link boost_atomic.usage_examples.singleton singleton with double-checked locking].
+
+[endsect]
+
+[section:seq_cst Sequential consistency]
+
+The third pattern for coordinating threads via [*Boost.Atomic]
+uses [^seq_cst] for coordination: If ...
+
+* ... thread1 performs an operation A,
+* ... thread1 subsequently performs any operation with [^seq_cst],
+* ... thread1 subsequently performs an operation B,
+* ... thread2 performs an operation C,
+* ... thread2 subsequently performs any operation with [^seq_cst],
+* ... thread2 subsequently performs an operation D,
+
+then either "A ['happens-before] D" or "C ['happens-before] B" holds.
+
+In this case it does not matter whether thread1 and thread2 operate
+on the same or different atomic variables, or use a "stand-alone"
+[^atomic_thread_fence] operation.
+
+[endsect]
+
+[endsect]
+
+[section:interface Programming interfaces]
+
+[section:configuration Configuration and building]
+
+The library contains header-only and compiled parts. The library is
+header-only for lock-free cases but requires a separate binary to
+implement the lock-based emulation. Users are able to detect whether
+linking to the compiled part is required by checking the
+[link atomic.interface.feature_macros feature macros].
+
+The following macros affect library behavior:
+
+[table
+ [[Macro] [Description]]
+ [[`BOOST_ATOMIC_NO_CMPXCHG8B`] [Affects 32-bit x86 Oracle Studio builds. When defined,
+ the library assumes the target CPU does not support `cmpxchg8b` instruction used
+ to support 64-bit atomic operations. This is the case with very old CPUs (pre-Pentium).
+ The library does not perform runtime detection of this instruction, so running the code
+ that uses 64-bit atomics on such CPUs will result in crashes, unless this macro is defined.
+ Note that the macro does not affect MSVC, GCC and compatible compilers because the library infers
+ this information from the compiler-defined macros.]]
+ [[`BOOST_ATOMIC_NO_CMPXCHG16B`] [Affects 64-bit x86 MSVC and Oracle Studio builds. When defined,
+ the library assumes the target CPU does not support `cmpxchg16b` instruction used
+ to support 128-bit atomic operations. This is the case with some early 64-bit AMD CPUs,
+ all Intel CPUs and current AMD CPUs support this instruction. The library does not
+ perform runtime detection of this instruction, so running the code that uses 128-bit
+ atomics on such CPUs will result in crashes, unless this macro is defined. Note that
+ the macro does not affect GCC and compatible compilers because the library infers
+ this information from the compiler-defined macros.]]
+ [[`BOOST_ATOMIC_NO_MFENCE`] [Affects 32-bit x86 Oracle Studio builds. When defined,
+ the library assumes the target CPU does not support `mfence` instruction used
+ to implement thread fences. This instruction was added with SSE2 instruction set extension,
+ which was available in CPUs since Intel Pentium 4. The library does not perform runtime detection
+ of this instruction, so running the library code on older CPUs will result in crashes, unless
+ this macro is defined. Note that the macro does not affect MSVC, GCC and compatible compilers
+ because the library infers this information from the compiler-defined macros.]]
+ [[`BOOST_ATOMIC_NO_FLOATING_POINT`] [When defined, support for floating point operations is disabled.
+ Floating point types shall be treated similar to trivially copyable structs and no capability macros
+ will be defined.]]
+ [[`BOOST_ATOMIC_FORCE_FALLBACK`] [When defined, all operations are implemented with locks.
+ This is mostly used for testing and should not be used in real world projects.]]
+ [[`BOOST_ATOMIC_DYN_LINK` and `BOOST_ALL_DYN_LINK`] [Control library linking. If defined,
+ the library assumes dynamic linking, otherwise static. The latter macro affects all Boost
+ libraries, not just [*Boost.Atomic].]]
+ [[`BOOST_ATOMIC_NO_LIB` and `BOOST_ALL_NO_LIB`] [Control library auto-linking on Windows.
+ When defined, disables auto-linking. The latter macro affects all Boost libraries,
+ not just [*Boost.Atomic].]]
+]
+
+Besides macros, it is important to specify the correct compiler options for the target CPU.
+With GCC and compatible compilers this affects whether particular atomic operations are
+lock-free or not.
+
+Boost building process is described in the [@http://www.boost.org/doc/libs/release/more/getting_started/ Getting Started guide].
+For example, you can build [*Boost.Atomic] with the following command line:
+
+[pre
+ bjam --with-atomic variant=release instruction-set=core2 stage
+]
+
+[endsect]
+
+[section:interface_memory_order Memory order]
+
+ #include <boost/memory_order.hpp>
+
+The enumeration [^boost::memory_order] defines the following
+values to represent memory ordering constraints:
+
+[table
+ [[Constant] [Description]]
+ [[`memory_order_relaxed`] [No ordering constraint.
+ Informally speaking, following operations may be reordered before,
+ preceding operations may be reordered after the atomic
+ operation. This constraint is suitable only when
+ either a) further operations do not depend on the outcome
+ of the atomic operation or b) ordering is enforced through
+ stand-alone `atomic_thread_fence` operations. The operation on
+ the atomic value itself is still atomic though.
+ ]]
+ [[`memory_order_release`] [
+ Perform `release` operation. Informally speaking,
+ prevents all preceding memory operations to be reordered
+ past this point.
+ ]]
+ [[`memory_order_acquire`] [
+ Perform `acquire` operation. Informally speaking,
+ prevents succeeding memory operations to be reordered
+ before this point.
+ ]]
+ [[`memory_order_consume`] [
+ Perform `consume` operation. More relaxed (and
+ on some architectures more efficient) than `memory_order_acquire`
+ as it only affects succeeding operations that are
+ computationally-dependent on the value retrieved from
+ an atomic variable.
+ ]]
+ [[`memory_order_acq_rel`] [Perform both `release` and `acquire` operation]]
+ [[`memory_order_seq_cst`] [
+ Enforce sequential consistency. Implies `memory_order_acq_rel`, but
+ additionally enforces total order for all operations such qualified.
+ ]]
+]
+
+For compilers that support C++11 scoped enums, the library also defines scoped synonyms
+that are preferred in modern programs:
+
+[table
+ [[Pre-C++11 constant] [C++11 equivalent]]
+ [[`memory_order_relaxed`] [`memory_order::relaxed`]]
+ [[`memory_order_release`] [`memory_order::release`]]
+ [[`memory_order_acquire`] [`memory_order::acquire`]]
+ [[`memory_order_consume`] [`memory_order::consume`]]
+ [[`memory_order_acq_rel`] [`memory_order::acq_rel`]]
+ [[`memory_order_seq_cst`] [`memory_order::seq_cst`]]
+]
+
+See section [link atomic.thread_coordination ['happens-before]] for explanation
+of the various ordering constraints.
+
+[endsect]
+
+[section:interface_atomic_flag Atomic flags]
+
+ #include <boost/atomic/atomic_flag.hpp>
+
+The `boost::atomic_flag` type provides the most basic set of atomic operations
+suitable for implementing mutually exclusive access to thread-shared data. The flag
+can have one of the two possible states: set and clear. The class implements the
+following operations:
+
+[table
+ [[Syntax] [Description]]
+ [
+ [`atomic_flag()`]
+ [Initialize to the clear state. See the discussion below.]
+ ]
+ [
+ [`bool test_and_set(memory_order order)`]
+ [Sets the atomic flag to the set state; returns `true` if the flag had been set prior to the operation]
+ ]
+ [
+ [`void clear(memory_order order)`]
+ [Sets the atomic flag to the clear state]
+ ]
+]
+
+`order` always has `memory_order_seq_cst` as default parameter.
+
+Note that the default constructor `atomic_flag()` is unlike `std::atomic_flag`, which
+leaves the default-constructed object uninitialized. This potentially requires dynamic
+initialization during the program startup to perform the object initialization, which
+makes it unsafe to create global `boost::atomic_flag` objects that can be used before
+entring `main()`. Some compilers though (especially those supporting C++11 `constexpr`)
+may be smart enough to perform flag initialization statically (which is, in C++11 terms,
+a constant initialization).
+
+This difference is deliberate and is done to support C++03 compilers. C++11 defines the
+`ATOMIC_FLAG_INIT` macro which can be used to statically initialize `std::atomic_flag`
+to a clear state like this:
+
+ std::atomic_flag flag = ATOMIC_FLAG_INIT; // constant initialization
+
+This macro cannot be implemented in C++03 because for that `atomic_flag` would have to be
+an aggregate type, which it cannot be because it has to prohibit copying and consequently
+define the default constructor. Thus the closest equivalent C++03 code using [*Boost.Atomic]
+would be:
+
+ boost::atomic_flag flag; // possibly, dynamic initialization in C++03;
+ // constant initialization in C++11
+
+The same code is also valid in C++11, so this code can be used universally. However, for
+interface parity with `std::atomic_flag`, if possible, the library also defines the
+`BOOST_ATOMIC_FLAG_INIT` macro, which is equivalent to `ATOMIC_FLAG_INIT`:
+
+ boost::atomic_flag flag = BOOST_ATOMIC_FLAG_INIT; // constant initialization
+
+This macro will only be implemented on a C++11 compiler. When this macro is not available,
+the library defines `BOOST_ATOMIC_NO_ATOMIC_FLAG_INIT`.
+
+[endsect]
+
+[section:interface_atomic_object Atomic objects]
+
+ #include <boost/atomic/atomic.hpp>
+
+[^boost::atomic<['T]>] provides methods for atomically accessing
+variables of a suitable type [^['T]]. The type is suitable if
+it is /trivially copyable/ (3.9/9 \[basic.types\]). Following are
+examples of the types compatible with this requirement:
+
+* a scalar type (e.g. integer, boolean, enum or pointer type)
+* a [^class] or [^struct] that has no non-trivial copy or move
+ constructors or assignment operators, has a trivial destructor,
+ and that is comparable via [^memcmp].
+
+Note that classes with virtual functions or virtual base classes
+do not satisfy the requirements. Also be warned
+that structures with "padding" between data members may compare
+non-equal via [^memcmp] even though all members are equal. This may also be
+the case with some floating point types, which include padding bits themselves.
+
+[section:interface_atomic_generic [^boost::atomic<['T]>] template class]
+
+All atomic objects support the following operations and properties:
+
+[table
+ [[Syntax] [Description]]
+ [
+ [`atomic()`]
+ [Initialize to an unspecified value]
+ ]
+ [
+ [`atomic(T initial_value)`]
+ [Initialize to [^initial_value]]
+ ]
+ [
+ [`bool is_lock_free()`]
+ [Checks if the atomic object is lock-free; the returned value is consistent with the `is_always_lock_free` static constant, see below]
+ ]
+ [
+ [`T load(memory_order order)`]
+ [Return current value]
+ ]
+ [
+ [`void store(T value, memory_order order)`]
+ [Write new value to atomic variable]
+ ]
+ [
+ [`T exchange(T new_value, memory_order order)`]
+ [Exchange current value with `new_value`, returning current value]
+ ]
+ [
+ [`bool compare_exchange_weak(T & expected, T desired, memory_order order)`]
+ [Compare current value with `expected`, change it to `desired` if matches.
+ Returns `true` if an exchange has been performed, and always writes the
+ previous value back in `expected`. May fail spuriously, so must generally be
+ retried in a loop.]
+ ]
+ [
+ [`bool compare_exchange_weak(T & expected, T desired, memory_order success_order, memory_order failure_order)`]
+ [Compare current value with `expected`, change it to `desired` if matches.
+ Returns `true` if an exchange has been performed, and always writes the
+ previous value back in `expected`. May fail spuriously, so must generally be
+ retried in a loop.]
+ ]
+ [
+ [`bool compare_exchange_strong(T & expected, T desired, memory_order order)`]
+ [Compare current value with `expected`, change it to `desired` if matches.
+ Returns `true` if an exchange has been performed, and always writes the
+ previous value back in `expected`.]
+ ]
+ [
+ [`bool compare_exchange_strong(T & expected, T desired, memory_order success_order, memory_order failure_order))`]
+ [Compare current value with `expected`, change it to `desired` if matches.
+ Returns `true` if an exchange has been performed, and always writes the
+ previous value back in `expected`.]
+ ]
+ [
+ [`static bool is_always_lock_free`]
+ [This static boolean constant indicates if any atomic object of this type is lock-free]
+ ]
+]
+
+`order` always has `memory_order_seq_cst` as default parameter.
+
+The `compare_exchange_weak`/`compare_exchange_strong` variants
+taking four parameters differ from the three parameter variants
+in that they allow a different memory ordering constraint to
+be specified in case the operation fails.
+
+In addition to these explicit operations, each
+[^atomic<['T]>] object also supports
+implicit [^store] and [^load] through the use of "assignment"
+and "conversion to [^T]" operators. Avoid using these operators,
+as they do not allow to specify a memory ordering
+constraint which always defaults to `memory_order_seq_cst`.
+
+[endsect]
+
+[section:interface_atomic_integral [^boost::atomic<['integral]>] template class]
+
+In addition to the operations listed in the previous section,
+[^boost::atomic<['I]>] for integral
+types [^['I]], except `bool`, supports the following operations,
+which correspond to [^std::atomic<['I]>]:
+
+[table
+ [[Syntax] [Description]]
+ [
+ [`I fetch_add(I v, memory_order order)`]
+ [Add `v` to variable, returning previous value]
+ ]
+ [
+ [`I fetch_sub(I v, memory_order order)`]
+ [Subtract `v` from variable, returning previous value]
+ ]
+ [
+ [`I fetch_and(I v, memory_order order)`]
+ [Apply bit-wise "and" with `v` to variable, returning previous value]
+ ]
+ [
+ [`I fetch_or(I v, memory_order order)`]
+ [Apply bit-wise "or" with `v` to variable, returning previous value]
+ ]
+ [
+ [`I fetch_xor(I v, memory_order order)`]
+ [Apply bit-wise "xor" with `v` to variable, returning previous value]
+ ]
+]
+
+Additionally, as a [*Boost.Atomic] extension, the following operations are also provided:
+
+[table
+ [[Syntax] [Description]]
+ [
+ [`I fetch_negate(memory_order order)`]
+ [Change the sign of the value stored in the variable, returning previous value]
+ ]
+ [
+ [`I fetch_complement(memory_order order)`]
+ [Set the variable to the one\'s complement of the current value, returning previous value]
+ ]
+ [
+ [`I negate(memory_order order)`]
+ [Change the sign of the value stored in the variable, returning the result]
+ ]
+ [
+ [`I add(I v, memory_order order)`]
+ [Add `v` to variable, returning the result]
+ ]
+ [
+ [`I sub(I v, memory_order order)`]
+ [Subtract `v` from variable, returning the result]
+ ]
+ [
+ [`I bitwise_and(I v, memory_order order)`]
+ [Apply bit-wise "and" with `v` to variable, returning the result]
+ ]
+ [
+ [`I bitwise_or(I v, memory_order order)`]
+ [Apply bit-wise "or" with `v` to variable, returning the result]
+ ]
+ [
+ [`I bitwise_xor(I v, memory_order order)`]
+ [Apply bit-wise "xor" with `v` to variable, returning the result]
+ ]
+ [
+ [`I bitwise_complement(memory_order order)`]
+ [Set the variable to the one\'s complement of the current value, returning the result]
+ ]
+ [
+ [`void opaque_negate(memory_order order)`]
+ [Change the sign of the value stored in the variable, returning nothing]
+ ]
+ [
+ [`void opaque_add(I v, memory_order order)`]
+ [Add `v` to variable, returning nothing]
+ ]
+ [
+ [`void opaque_sub(I v, memory_order order)`]
+ [Subtract `v` from variable, returning nothing]
+ ]
+ [
+ [`void opaque_and(I v, memory_order order)`]
+ [Apply bit-wise "and" with `v` to variable, returning nothing]
+ ]
+ [
+ [`void opaque_or(I v, memory_order order)`]
+ [Apply bit-wise "or" with `v` to variable, returning nothing]
+ ]
+ [
+ [`void opaque_xor(I v, memory_order order)`]
+ [Apply bit-wise "xor" with `v` to variable, returning nothing]
+ ]
+ [
+ [`void opaque_complement(memory_order order)`]
+ [Set the variable to the one\'s complement of the current value, returning nothing]
+ ]
+ [
+ [`bool negate_and_test(memory_order order)`]
+ [Change the sign of the value stored in the variable, returning `true` if the result is non-zero and `false` otherwise]
+ ]
+ [
+ [`bool add_and_test(I v, memory_order order)`]
+ [Add `v` to variable, returning `true` if the result is non-zero and `false` otherwise]
+ ]
+ [
+ [`bool sub_and_test(I v, memory_order order)`]
+ [Subtract `v` from variable, returning `true` if the result is non-zero and `false` otherwise]
+ ]
+ [
+ [`bool and_and_test(I v, memory_order order)`]
+ [Apply bit-wise "and" with `v` to variable, returning `true` if the result is non-zero and `false` otherwise]
+ ]
+ [
+ [`bool or_and_test(I v, memory_order order)`]
+ [Apply bit-wise "or" with `v` to variable, returning `true` if the result is non-zero and `false` otherwise]
+ ]
+ [
+ [`bool xor_and_test(I v, memory_order order)`]
+ [Apply bit-wise "xor" with `v` to variable, returning `true` if the result is non-zero and `false` otherwise]
+ ]
+ [
+ [`bool complement_and_test(memory_order order)`]
+ [Set the variable to the one\'s complement of the current value, returning `true` if the result is non-zero and `false` otherwise]
+ ]
+ [
+ [`bool bit_test_and_set(unsigned int n, memory_order order)`]
+ [Set bit number `n` in the variable to 1, returning `true` if the bit was previously set to 1 and `false` otherwise]
+ ]
+ [
+ [`bool bit_test_and_reset(unsigned int n, memory_order order)`]
+ [Set bit number `n` in the variable to 0, returning `true` if the bit was previously set to 1 and `false` otherwise]
+ ]
+ [
+ [`bool bit_test_and_complement(unsigned int n, memory_order order)`]
+ [Change bit number `n` in the variable to the opposite value, returning `true` if the bit was previously set to 1 and `false` otherwise]
+ ]
+]
+
+[note In Boost.Atomic 1.66 the [^['op]_and_test] operations returned the opposite value (i.e. `true` if the result is zero). This was changed
+to the current behavior in 1.67 for consistency with other operations in Boost.Atomic, as well as with conventions taken in the C++ standard library.
+Boost.Atomic 1.66 was the only release shipped with the old behavior. Users upgrading from Boost 1.66 to a later release can define
+`BOOST_ATOMIC_HIGHLIGHT_OP_AND_TEST` macro when building their code to generate deprecation warnings on the [^['op]_and_test] function calls
+(the functions are not actually deprecated though; this is just a way to highlight their use).]
+
+`order` always has `memory_order_seq_cst` as default parameter.
+
+The [^opaque_['op]] and [^['op]_and_test] variants of the operations
+may result in a more efficient code on some architectures because
+the original value of the atomic variable is not preserved. In the
+[^bit_test_and_['op]] operations, the bit number `n` starts from 0, which
+means the least significand bit, and must not exceed
+[^std::numeric_limits<['I]>::digits - 1].
+
+In addition to these explicit operations, each
+[^boost::atomic<['I]>] object also
+supports implicit pre-/post- increment/decrement, as well
+as the operators `+=`, `-=`, `&=`, `|=` and `^=`.
+Avoid using these operators, as they do not allow to specify a memory ordering
+constraint which always defaults to `memory_order_seq_cst`.
+
+[endsect]
+
+[section:interface_atomic_floating_point [^boost::atomic<['floating-point]>] template class]
+
+[note The support for floating point types is optional and can be disabled by defining `BOOST_ATOMIC_NO_FLOATING_POINT`.]
+
+In addition to the operations applicable to all atomic objects,
+[^boost::atomic<['F]>] for floating point
+types [^['F]] supports the following operations,
+which correspond to [^std::atomic<['F]>]:
+
+[table
+ [[Syntax] [Description]]
+ [
+ [`F fetch_add(F v, memory_order order)`]
+ [Add `v` to variable, returning previous value]
+ ]
+ [
+ [`F fetch_sub(F v, memory_order order)`]
+ [Subtract `v` from variable, returning previous value]
+ ]
+]
+
+Additionally, as a [*Boost.Atomic] extension, the following operations are also provided:
+
+[table
+ [[Syntax] [Description]]
+ [
+ [`F fetch_negate(memory_order order)`]
+ [Change the sign of the value stored in the variable, returning previous value]
+ ]
+ [
+ [`F negate(memory_order order)`]
+ [Change the sign of the value stored in the variable, returning the result]
+ ]
+ [
+ [`F add(F v, memory_order order)`]
+ [Add `v` to variable, returning the result]
+ ]
+ [
+ [`F sub(F v, memory_order order)`]
+ [Subtract `v` from variable, returning the result]
+ ]
+ [
+ [`void opaque_negate(memory_order order)`]
+ [Change the sign of the value stored in the variable, returning nothing]
+ ]
+ [
+ [`void opaque_add(F v, memory_order order)`]
+ [Add `v` to variable, returning nothing]
+ ]
+ [
+ [`void opaque_sub(F v, memory_order order)`]
+ [Subtract `v` from variable, returning nothing]
+ ]
+]
+
+`order` always has `memory_order_seq_cst` as default parameter.
+
+The [^opaque_['op]] variants of the operations
+may result in a more efficient code on some architectures because
+the original value of the atomic variable is not preserved.
+
+In addition to these explicit operations, each
+[^boost::atomic<['F]>] object also supports operators `+=` and `-=`.
+Avoid using these operators, as they do not allow to specify a memory ordering
+constraint which always defaults to `memory_order_seq_cst`.
+
+When using atomic operations with floating point types, bear in mind that [*Boost.Atomic]
+always performs bitwise comparison of the stored values. This means that operations like
+`compare_exchange*` may fail if the stored value and comparand have different binary representation,
+even if they would normally compare equal. This is typically the case when either of the numbers
+is [@https://en.wikipedia.org/wiki/Denormal_number denormalized]. This also means that the behavior
+with regard to special floating point values like NaN and signed zero is also different from normal C++.
+
+Another source of the problem is padding bits that are added to some floating point types for alignment.
+One widespread example of that is Intel x87 extended double format, which is typically stored as 80 bits
+of value padded with 16 or 48 unused bits. These padding bits are often uninitialized and contain garbage,
+which makes two equal numbers have different binary representation. The library attempts to account for
+the known such cases, but in general it is possible that some platforms are not covered. Note that the C++
+standard makes no guarantees about reliability of `compare_exchange*` operations in the face of padding or
+trap bits.
+
+[endsect]
+
+[section:interface_atomic_pointer [^boost::atomic<['pointer]>] template class]
+
+In addition to the operations applicable to all atomic objects,
+[^boost::atomic<['P]>] for pointer
+types [^['P]] (other than pointers to [^void], function or member pointers) support
+the following operations, which correspond to [^std::atomic<['P]>]:
+
+[table
+ [[Syntax] [Description]]
+ [
+ [`T fetch_add(ptrdiff_t v, memory_order order)`]
+ [Add `v` to variable, returning previous value]
+ ]
+ [
+ [`T fetch_sub(ptrdiff_t v, memory_order order)`]
+ [Subtract `v` from variable, returning previous value]
+ ]
+]
+
+Similarly to integers, the following [*Boost.Atomic] extensions are also provided:
+
+[table
+ [[Syntax] [Description]]
+ [
+ [`void add(ptrdiff_t v, memory_order order)`]
+ [Add `v` to variable, returning the result]
+ ]
+ [
+ [`void sub(ptrdiff_t v, memory_order order)`]
+ [Subtract `v` from variable, returning the result]
+ ]
+ [
+ [`void opaque_add(ptrdiff_t v, memory_order order)`]
+ [Add `v` to variable, returning nothing]
+ ]
+ [
+ [`void opaque_sub(ptrdiff_t v, memory_order order)`]
+ [Subtract `v` from variable, returning nothing]
+ ]
+ [
+ [`bool add_and_test(ptrdiff_t v, memory_order order)`]
+ [Add `v` to variable, returning `true` if the result is non-null and `false` otherwise]
+ ]
+ [
+ [`bool sub_and_test(ptrdiff_t v, memory_order order)`]
+ [Subtract `v` from variable, returning `true` if the result is non-null and `false` otherwise]
+ ]
+]
+
+`order` always has `memory_order_seq_cst` as default parameter.
+
+In addition to these explicit operations, each
+[^boost::atomic<['P]>] object also
+supports implicit pre-/post- increment/decrement, as well
+as the operators `+=`, `-=`. Avoid using these operators,
+as they do not allow explicit specification of a memory ordering
+constraint which always defaults to `memory_order_seq_cst`.
+
+[endsect]
+
+[section:interface_atomic_convenience_typedefs [^boost::atomic<['T]>] convenience typedefs]
+
+For convenience, several shorthand typedefs of [^boost::atomic<['T]>] are provided:
+
+[c++]
+
+ typedef atomic< char > atomic_char;
+ typedef atomic< unsigned char > atomic_uchar;
+ typedef atomic< signed char > atomic_schar;
+ typedef atomic< unsigned short > atomic_ushort;
+ typedef atomic< short > atomic_short;
+ typedef atomic< unsigned int > atomic_uint;
+ typedef atomic< int > atomic_int;
+ typedef atomic< unsigned long > atomic_ulong;
+ typedef atomic< long > atomic_long;
+ typedef atomic< unsigned long long > atomic_ullong;
+ typedef atomic< long long > atomic_llong;
+
+ typedef atomic< void* > atomic_address;
+ typedef atomic< bool > atomic_bool;
+ typedef atomic< wchar_t > atomic_wchar_t;
+ typedef atomic< char16_t > atomic_char16_t;
+ typedef atomic< char32_t > atomic_char32_t;
+
+ typedef atomic< uint8_t > atomic_uint8_t;
+ typedef atomic< int8_t > atomic_int8_t;
+ typedef atomic< uint16_t > atomic_uint16_t;
+ typedef atomic< int16_t > atomic_int16_t;
+ typedef atomic< uint32_t > atomic_uint32_t;
+ typedef atomic< int32_t > atomic_int32_t;
+ typedef atomic< uint64_t > atomic_uint64_t;
+ typedef atomic< int64_t > atomic_int64_t;
+
+ typedef atomic< int_least8_t > atomic_int_least8_t;
+ typedef atomic< uint_least8_t > atomic_uint_least8_t;
+ typedef atomic< int_least16_t > atomic_int_least16_t;
+ typedef atomic< uint_least16_t > atomic_uint_least16_t;
+ typedef atomic< int_least32_t > atomic_int_least32_t;
+ typedef atomic< uint_least32_t > atomic_uint_least32_t;
+ typedef atomic< int_least64_t > atomic_int_least64_t;
+ typedef atomic< uint_least64_t > atomic_uint_least64_t;
+ typedef atomic< int_fast8_t > atomic_int_fast8_t;
+ typedef atomic< uint_fast8_t > atomic_uint_fast8_t;
+ typedef atomic< int_fast16_t > atomic_int_fast16_t;
+ typedef atomic< uint_fast16_t > atomic_uint_fast16_t;
+ typedef atomic< int_fast32_t > atomic_int_fast32_t;
+ typedef atomic< uint_fast32_t > atomic_uint_fast32_t;
+ typedef atomic< int_fast64_t > atomic_int_fast64_t;
+ typedef atomic< uint_fast64_t > atomic_uint_fast64_t;
+ typedef atomic< intmax_t > atomic_intmax_t;
+ typedef atomic< uintmax_t > atomic_uintmax_t;
+
+ typedef atomic< std::size_t > atomic_size_t;
+ typedef atomic< std::ptrdiff_t > atomic_ptrdiff_t;
+
+ typedef atomic< intptr_t > atomic_intptr_t;
+ typedef atomic< uintptr_t > atomic_uintptr_t;
+
+The typedefs are provided only if the corresponding type is available.
+
+[endsect]
+
+[endsect]
+
+[section:interface_fences Fences]
+
+ #include <boost/atomic/fences.hpp>
+
+[table
+ [[Syntax] [Description]]
+ [
+ [`void atomic_thread_fence(memory_order order)`]
+ [Issue fence for coordination with other threads.]
+ ]
+ [
+ [`void atomic_signal_fence(memory_order order)`]
+ [Issue fence for coordination with signal handler (only in same thread).]
+ ]
+]
+
+[endsect]
+
+[section:feature_macros Feature testing macros]
+
+ #include <boost/atomic/capabilities.hpp>
+
+[*Boost.Atomic] defines a number of macros to allow compile-time
+detection whether an atomic data type is implemented using
+"true" atomic operations, or whether an internal "lock" is
+used to provide atomicity. The following macros will be
+defined to `0` if operations on the data type always
+require a lock, to `1` if operations on the data type may
+sometimes require a lock, and to `2` if they are always lock-free:
+
+[table
+ [[Macro] [Description]]
+ [
+ [`BOOST_ATOMIC_FLAG_LOCK_FREE`]
+ [Indicate whether `atomic_flag` is lock-free]
+ ]
+ [
+ [`BOOST_ATOMIC_BOOL_LOCK_FREE`]
+ [Indicate whether `atomic<bool>` is lock-free]
+ ]
+ [
+ [`BOOST_ATOMIC_CHAR_LOCK_FREE`]
+ [Indicate whether `atomic<char>` (including signed/unsigned variants) is lock-free]
+ ]
+ [
+ [`BOOST_ATOMIC_CHAR16_T_LOCK_FREE`]
+ [Indicate whether `atomic<char16_t>` (including signed/unsigned variants) is lock-free]
+ ]
+ [
+ [`BOOST_ATOMIC_CHAR32_T_LOCK_FREE`]
+ [Indicate whether `atomic<char32_t>` (including signed/unsigned variants) is lock-free]
+ ]
+ [
+ [`BOOST_ATOMIC_WCHAR_T_LOCK_FREE`]
+ [Indicate whether `atomic<wchar_t>` (including signed/unsigned variants) is lock-free]
+ ]
+ [
+ [`BOOST_ATOMIC_SHORT_LOCK_FREE`]
+ [Indicate whether `atomic<short>` (including signed/unsigned variants) is lock-free]
+ ]
+ [
+ [`BOOST_ATOMIC_INT_LOCK_FREE`]
+ [Indicate whether `atomic<int>` (including signed/unsigned variants) is lock-free]
+ ]
+ [
+ [`BOOST_ATOMIC_LONG_LOCK_FREE`]
+ [Indicate whether `atomic<long>` (including signed/unsigned variants) is lock-free]
+ ]
+ [
+ [`BOOST_ATOMIC_LLONG_LOCK_FREE`]
+ [Indicate whether `atomic<long long>` (including signed/unsigned variants) is lock-free]
+ ]
+ [
+ [`BOOST_ATOMIC_ADDRESS_LOCK_FREE` or `BOOST_ATOMIC_POINTER_LOCK_FREE`]
+ [Indicate whether `atomic<T *>` is lock-free]
+ ]
+ [
+ [`BOOST_ATOMIC_THREAD_FENCE`]
+ [Indicate whether `atomic_thread_fence` function is lock-free]
+ ]
+ [
+ [`BOOST_ATOMIC_SIGNAL_FENCE`]
+ [Indicate whether `atomic_signal_fence` function is lock-free]
+ ]
+]
+
+In addition to these standard macros, [*Boost.Atomic] also defines a number of extension macros,
+which can also be useful. Like the standard ones, these macros are defined to values `0`, `1` and `2`
+to indicate whether the corresponding operations are lock-free or not.
+
+[table
+ [[Macro] [Description]]
+ [
+ [`BOOST_ATOMIC_INT8_LOCK_FREE`]
+ [Indicate whether `atomic<int8_type>` is lock-free.]
+ ]
+ [
+ [`BOOST_ATOMIC_INT16_LOCK_FREE`]
+ [Indicate whether `atomic<int16_type>` is lock-free.]
+ ]
+ [
+ [`BOOST_ATOMIC_INT32_LOCK_FREE`]
+ [Indicate whether `atomic<int32_type>` is lock-free.]
+ ]
+ [
+ [`BOOST_ATOMIC_INT64_LOCK_FREE`]
+ [Indicate whether `atomic<int64_type>` is lock-free.]
+ ]
+ [
+ [`BOOST_ATOMIC_INT128_LOCK_FREE`]
+ [Indicate whether `atomic<int128_type>` is lock-free.]
+ ]
+ [
+ [`BOOST_ATOMIC_NO_ATOMIC_FLAG_INIT`]
+ [Defined after including `atomic_flag.hpp`, if the implementation
+ does not support the `BOOST_ATOMIC_FLAG_INIT` macro for static
+ initialization of `atomic_flag`. This macro is typically defined
+ for pre-C++11 compilers.]
+ ]
+]
+
+In the table above, `intN_type` is a type that fits storage of contiguous `N` bits, suitably aligned for atomic operations.
+
+For floating-point types the following macros are similarly defined:
+
+[table
+ [[Macro] [Description]]
+ [
+ [`BOOST_ATOMIC_FLOAT_LOCK_FREE`]
+ [Indicate whether `atomic<float>` is lock-free.]
+ ]
+ [
+ [`BOOST_ATOMIC_DOUBLE_LOCK_FREE`]
+ [Indicate whether `atomic<double>` is lock-free.]
+ ]
+ [
+ [`BOOST_ATOMIC_LONG_DOUBLE_LOCK_FREE`]
+ [Indicate whether `atomic<long double>` is lock-free.]
+ ]
+]
+
+These macros are not defined when support for floating point types is disabled by user.
+
+[endsect]
+
+[endsect]
+
+[section:usage_examples Usage examples]
+
+[include examples.qbk]
+
+[endsect]
+
+[/
+[section:platform_support Implementing support for additional platforms]
+
+[include platform.qbk]
+
+[endsect]
+]
+
+[/ [xinclude autodoc.xml] ]
+
+[section:limitations Limitations]
+
+While [*Boost.Atomic] strives to implement the atomic operations
+from C++11 and later as faithfully as possible, there are a few
+limitations that cannot be lifted without compiler support:
+
+* [*Aggregate initialization syntax is not supported]: Since [*Boost.Atomic]
+ sometimes uses storage type that is different from the value type,
+ the `atomic<>` template needs an initialization constructor that
+ performs the necessary conversion. This makes `atomic<>` a non-aggregate
+ type and prohibits aggregate initialization syntax (`atomic<int> a = {10}`).
+ [*Boost.Atomic] does support direct and unified initialization syntax though.
+ [*Advice]: Always use direct initialization (`atomic<int> a(10)`) or unified
+ initialization (`atomic<int> a{10}`) syntax.
+* [*Initializing constructor is not `constexpr` for some types]: For value types
+ other than integral types and `bool`, `atomic<>` initializing constructor needs
+ to perform runtime conversion to the storage type. This limitation may be
+ lifted for more categories of types in the future.
+* [*Default constructor is not trivial in C++03]: Because the initializing
+ constructor has to be defined in `atomic<>`, the default constructor
+ must also be defined. In C++03 the constructor cannot be defined as defaulted
+ and therefore it is not trivial. In C++11 the constructor is defaulted (and trivial,
+ if the default constructor of the value type is). In any case, the default
+ constructor of `atomic<>` performs default initialization of the atomic value,
+ as required in C++11. [*Advice]: In C++03, do not use [*Boost.Atomic] in contexts
+ where trivial default constructor is important (e.g. as a global variable which
+ is required to be statically initialized).
+* [*C++03 compilers may transform computation dependency to control dependency]:
+ Crucially, `memory_order_consume` only affects computationally-dependent
+ operations, but in general there is nothing preventing a compiler
+ from transforming a computation dependency into a control dependency.
+ A fully compliant C++11 compiler would be forbidden from such a transformation,
+ but in practice most if not all compilers have chosen to promote
+ `memory_order_consume` to `memory_order_acquire` instead
+ (see [@https://gcc.gnu.org/bugzilla/show_bug.cgi?id=59448 this] gcc bug
+ for example). In the current implementation [*Boost.Atomic] follows that trend,
+ but this may change in the future.
+ [*Advice]: In general, avoid `memory_order_consume` and use `memory_order_acquire`
+ instead. Use `memory_order_consume` only in conjunction with
+ pointer values, and only if you can ensure that the compiler cannot
+ speculate and transform these into control dependencies.
+* [*Fence operations may enforce "too strong" compiler ordering]:
+ Semantically, `memory_order_acquire`/`memory_order_consume`
+ and `memory_order_release` need to restrain reordering of
+ memory operations only in one direction. Since in C++03 there is no
+ way to express this constraint to the compiler, these act
+ as "full compiler barriers" in C++03 implementation. In corner
+ cases this may result in a slightly less efficient code than a C++11 compiler
+ could generate. [*Boost.Atomic] will use compiler intrinsics, if possible,
+ to express the proper ordering constraints.
+* [*Atomic operations may enforce "too strong" memory ordering in debug mode]:
+ On some compilers, disabling optimizations makes it impossible to provide
+ memory ordering constraints as compile-time constants to the compiler intrinsics.
+ This causes the compiler to silently ignore the provided constraints and choose
+ the "strongest" memory order (`memory_order_seq_cst`) to generate code. Not only
+ this reduces performance, this may hide bugs in the user's code (e.g. if the user
+ used a wrong memory order constraint, which caused a data race).
+ [*Advice]: Always test your code with optimizations enabled.
+* [*No interprocess fallback]: using `atomic<T>` in shared memory only works
+ correctly, if `atomic<T>::is_lock_free() == true`.
+* [*Signed integers must use [@https://en.wikipedia.org/wiki/Two%27s_complement two's complement]
+ representation]: [*Boost.Atomic] makes this requirement in order to implement
+ conversions between signed and unsigned integers internally. C++11 requires all
+ atomic arithmetic operations on integers to be well defined according to two's complement
+ arithmetics, which means that Boost.Atomic has to operate on unsigned integers internally
+ to avoid undefined behavior that results from signed integer overflows. Platforms
+ with other signed integer representations are not supported.
+
+[endsect]
+
+[section:porting Porting]
+
+[section:unit_tests Unit tests]
+
+[*Boost.Atomic] provides a unit test suite to verify that the
+implementation behaves as expected:
+
+* [*fallback_api.cpp] verifies that the fallback-to-locking aspect
+ of [*Boost.Atomic] compiles and has correct value semantics.
+* [*native_api.cpp] verifies that all atomic operations have correct
+ value semantics (e.g. "fetch_add" really adds the desired value,
+ returning the previous). It is a rough "smoke-test" to help weed
+ out the most obvious mistakes (for example width overflow,
+ signed/unsigned extension, ...).
+* [*lockfree.cpp] verifies that the [*BOOST_ATOMIC_*_LOCKFREE] macros
+ are set properly according to the expectations for a given
+ platform, and that they match up with the [*is_always_lock_free] and
+ [*is_lock_free] members of the [*atomic] object instances.
+* [*atomicity.cpp] lets two threads race against each other modifying
+ a shared variable, verifying that the operations behave atomic
+ as appropriate. By nature, this test is necessarily stochastic, and
+ the test self-calibrates to yield 99% confidence that a
+ positive result indicates absence of an error. This test is
+ very useful on uni-processor systems with preemption already.
+* [*ordering.cpp] lets two threads race against each other accessing
+ multiple shared variables, verifying that the operations
+ exhibit the expected ordering behavior. By nature, this test is
+ necessarily stochastic, and the test attempts to self-calibrate to
+ yield 99% confidence that a positive result indicates absence
+ of an error. This only works on true multi-processor (or multi-core)
+ systems. It does not yield any result on uni-processor systems
+ or emulators (due to there being no observable reordering even
+ the order=relaxed case) and will report that fact.
+
+[endsect]
+
+[section:tested_compilers Tested compilers]
+
+[*Boost.Atomic] has been tested on and is known to work on
+the following compilers/platforms:
+
+* gcc 4.x: i386, x86_64, ppc32, ppc64, sparcv9, armv6, alpha
+* Visual Studio Express 2008/Windows XP, x86, x64, ARM
+
+[endsect]
+
+[section:acknowledgements Acknowledgements]
+
+* Adam Wulkiewicz created the logo used on the [@https://github.com/boostorg/atomic GitHub project page]. The logo was taken from his [@https://github.com/awulkiew/boost-logos collection] of Boost logos.
+
+[endsect]
+
+[endsect]
diff --git a/doc/examples.qbk b/doc/examples.qbk
new file mode 100644
index 0000000..e34c402
--- /dev/null
+++ b/doc/examples.qbk
@@ -0,0 +1,398 @@
+[/
+ / Copyright (c) 2009 Helge Bahmann
+ /
+ / Distributed under the Boost Software License, Version 1.0. (See accompanying
+ / file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+ /]
+
+[section:example_reference_counters Reference counting]
+
+The purpose of a ['reference counter] is to count the number
+of pointers to an object. The object can be destroyed as
+soon as the reference counter reaches zero.
+
+[section Implementation]
+
+[c++]
+
+ #include <boost/intrusive_ptr.hpp>
+ #include <boost/atomic.hpp>
+
+ class X {
+ public:
+ typedef boost::intrusive_ptr<X> pointer;
+ X() : refcount_(0) {}
+
+ private:
+ mutable boost::atomic<int> refcount_;
+ friend void intrusive_ptr_add_ref(const X * x)
+ {
+ x->refcount_.fetch_add(1, boost::memory_order_relaxed);
+ }
+ friend void intrusive_ptr_release(const X * x)
+ {
+ if (x->refcount_.fetch_sub(1, boost::memory_order_release) == 1) {
+ boost::atomic_thread_fence(boost::memory_order_acquire);
+ delete x;
+ }
+ }
+ };
+
+[endsect]
+
+[section Usage]
+
+[c++]
+
+ X::pointer x = new X;
+
+[endsect]
+
+[section Discussion]
+
+Increasing the reference counter can always be done with
+[^memory_order_relaxed]: New references to an object can only
+be formed from an existing reference, and passing an existing
+reference from one thread to another must already provide any
+required synchronization.
+
+It is important to enforce any possible access to the object in
+one thread (through an existing reference) to ['happen before]
+deleting the object in a different thread. This is achieved
+by a "release" operation after dropping a reference (any
+access to the object through this reference must obviously
+happened before), and an "acquire" operation before
+deleting the object.
+
+It would be possible to use [^memory_order_acq_rel] for the
+[^fetch_sub] operation, but this results in unneeded "acquire"
+operations when the reference counter does not yet reach zero
+and may impose a performance penalty.
+
+[endsect]
+
+[endsect]
+
+[section:example_spinlock Spinlock]
+
+The purpose of a ['spin lock] is to prevent multiple threads
+from concurrently accessing a shared data structure. In contrast
+to a mutex, threads will busy-wait and waste CPU cycles instead
+of yielding the CPU to another thread. ['Do not use spinlocks
+unless you are certain that you understand the consequences.]
+
+[section Implementation]
+
+[c++]
+
+ #include <boost/atomic.hpp>
+
+ class spinlock {
+ private:
+ typedef enum {Locked, Unlocked} LockState;
+ boost::atomic<LockState> state_;
+
+ public:
+ spinlock() : state_(Unlocked) {}
+
+ void lock()
+ {
+ while (state_.exchange(Locked, boost::memory_order_acquire) == Locked) {
+ /* busy-wait */
+ }
+ }
+ void unlock()
+ {
+ state_.store(Unlocked, boost::memory_order_release);
+ }
+ };
+
+[endsect]
+
+[section Usage]
+
+[c++]
+
+ spinlock s;
+
+ s.lock();
+ // access data structure here
+ s.unlock();
+
+[endsect]
+
+[section Discussion]
+
+The purpose of the spinlock is to make sure that one access
+to the shared data structure always strictly "happens before"
+another. The usage of acquire/release in lock/unlock is required
+and sufficient to guarantee this ordering.
+
+It would be correct to write the "lock" operation in the following
+way:
+
+[c++]
+
+ lock()
+ {
+ while (state_.exchange(Locked, boost::memory_order_relaxed) == Locked) {
+ /* busy-wait */
+ }
+ atomic_thread_fence(boost::memory_order_acquire);
+ }
+
+This "optimization" is however a) useless and b) may in fact hurt:
+a) Since the thread will be busily spinning on a blocked spinlock,
+it does not matter if it will waste the CPU cycles with just
+"exchange" operations or with both useless "exchange" and "acquire"
+operations. b) A tight "exchange" loop without any
+memory-synchronizing instruction introduced through an "acquire"
+operation will on some systems monopolize the memory subsystem
+and degrade the performance of other system components.
+
+[endsect]
+
+[endsect]
+
+[section:singleton Singleton with double-checked locking pattern]
+
+The purpose of the ['Singleton with double-checked locking pattern] is to ensure
+that at most one instance of a particular object is created.
+If one instance has been created already, access to the existing
+object should be as light-weight as possible.
+
+[section Implementation]
+
+[c++]
+
+ #include <boost/atomic.hpp>
+ #include <boost/thread/mutex.hpp>
+
+ class X {
+ public:
+ static X * instance()
+ {
+ X * tmp = instance_.load(boost::memory_order_consume);
+ if (!tmp) {
+ boost::mutex::scoped_lock guard(instantiation_mutex);
+ tmp = instance_.load(boost::memory_order_consume);
+ if (!tmp) {
+ tmp = new X;
+ instance_.store(tmp, boost::memory_order_release);
+ }
+ }
+ return tmp;
+ }
+ private:
+ static boost::atomic<X *> instance_;
+ static boost::mutex instantiation_mutex;
+ };
+
+ boost::atomic<X *> X::instance_(0);
+
+[endsect]
+
+[section Usage]
+
+[c++]
+
+ X * x = X::instance();
+ // dereference x
+
+[endsect]
+
+[section Discussion]
+
+The mutex makes sure that only one instance of the object is
+ever created. The [^instance] method must make sure that any
+dereference of the object strictly "happens after" creating
+the instance in another thread. The use of [^memory_order_release]
+after creating and initializing the object and [^memory_order_consume]
+before dereferencing the object provides this guarantee.
+
+It would be permissible to use [^memory_order_acquire] instead of
+[^memory_order_consume], but this provides a stronger guarantee
+than is required since only operations depending on the value of
+the pointer need to be ordered.
+
+[endsect]
+
+[endsect]
+
+[section:example_ringbuffer Wait-free ring buffer]
+
+A ['wait-free ring buffer] provides a mechanism for relaying objects
+from one single "producer" thread to one single "consumer" thread without
+any locks. The operations on this data structure are "wait-free" which
+means that each operation finishes within a constant number of steps.
+This makes this data structure suitable for use in hard real-time systems
+or for communication with interrupt/signal handlers.
+
+[section Implementation]
+
+[c++]
+
+ #include <boost/atomic.hpp>
+
+ template<typename T, size_t Size>
+ class ringbuffer {
+ public:
+ ringbuffer() : head_(0), tail_(0) {}
+
+ bool push(const T & value)
+ {
+ size_t head = head_.load(boost::memory_order_relaxed);
+ size_t next_head = next(head);
+ if (next_head == tail_.load(boost::memory_order_acquire))
+ return false;
+ ring_[head] = value;
+ head_.store(next_head, boost::memory_order_release);
+ return true;
+ }
+ bool pop(T & value)
+ {
+ size_t tail = tail_.load(boost::memory_order_relaxed);
+ if (tail == head_.load(boost::memory_order_acquire))
+ return false;
+ value = ring_[tail];
+ tail_.store(next(tail), boost::memory_order_release);
+ return true;
+ }
+ private:
+ size_t next(size_t current)
+ {
+ return (current + 1) % Size;
+ }
+ T ring_[Size];
+ boost::atomic<size_t> head_, tail_;
+ };
+
+[endsect]
+
+[section Usage]
+
+[c++]
+
+ ringbuffer<int, 32> r;
+
+ // try to insert an element
+ if (r.push(42)) { /* succeeded */ }
+ else { /* buffer full */ }
+
+ // try to retrieve an element
+ int value;
+ if (r.pop(value)) { /* succeeded */ }
+ else { /* buffer empty */ }
+
+[endsect]
+
+[section Discussion]
+
+The implementation makes sure that the ring indices do
+not "lap-around" each other to ensure that no elements
+are either lost or read twice.
+
+Furthermore it must guarantee that read-access to a
+particular object in [^pop] "happens after" it has been
+written in [^push]. This is achieved by writing [^head_ ]
+with "release" and reading it with "acquire". Conversely
+the implementation also ensures that read access to
+a particular ring element "happens before" before
+rewriting this element with a new value by accessing [^tail_]
+with appropriate ordering constraints.
+
+[endsect]
+
+[endsect]
+
+[section:mp_queue Wait-free multi-producer queue]
+
+The purpose of the ['wait-free multi-producer queue] is to allow
+an arbitrary number of producers to enqueue objects which are
+retrieved and processed in FIFO order by a single consumer.
+
+[section Implementation]
+
+[c++]
+
+ template<typename T>
+ class waitfree_queue {
+ public:
+ struct node {
+ T data;
+ node * next;
+ };
+ void push(const T &data)
+ {
+ node * n = new node;
+ n->data = data;
+ node * stale_head = head_.load(boost::memory_order_relaxed);
+ do {
+ n->next = stale_head;
+ } while (!head_.compare_exchange_weak(stale_head, n, boost::memory_order_release));
+ }
+
+ node * pop_all(void)
+ {
+ T * last = pop_all_reverse(), * first = 0;
+ while(last) {
+ T * tmp = last;
+ last = last->next;
+ tmp->next = first;
+ first = tmp;
+ }
+ return first;
+ }
+
+ waitfree_queue() : head_(0) {}
+
+ // alternative interface if ordering is of no importance
+ node * pop_all_reverse(void)
+ {
+ return head_.exchange(0, boost::memory_order_consume);
+ }
+ private:
+ boost::atomic<node *> head_;
+ };
+
+[endsect]
+
+[section Usage]
+
+[c++]
+
+ waitfree_queue<int> q;
+
+ // insert elements
+ q.push(42);
+ q.push(2);
+
+ // pop elements
+ waitfree_queue<int>::node * x = q.pop_all()
+ while(x) {
+ X * tmp = x;
+ x = x->next;
+ // process tmp->data, probably delete it afterwards
+ delete tmp;
+ }
+
+[endsect]
+
+[section Discussion]
+
+The implementation guarantees that all objects enqueued are
+processed in the order they were enqueued by building a singly-linked
+list of object in reverse processing order. The queue is atomically
+emptied by the consumer and brought into correct order.
+
+It must be guaranteed that any access to an object to be enqueued
+by the producer "happens before" any access by the consumer. This
+is assured by inserting objects into the list with ['release] and
+dequeuing them with ['consume] memory order. It is not
+necessary to use ['acquire] memory order in [^waitfree_queue::pop_all]
+because all operations involved depend on the value of
+the atomic pointer through dereference
+
+[endsect]
+
+[endsect]