Squashed 'third_party/boostorg/math/' content from commit 0e9549f
Change-Id: I7c2a13cb6a5beea4a471341510d8364cedd71613
git-subtree-dir: third_party/boostorg/math
git-subtree-split: 0e9549ff2f854e6edafaf4627d65026f2f533a18
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000..3e84d7c
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,96 @@
+* text=auto !eol svneol=native#text/plain
+*.gitattributes text svneol=native#text/plain
+
+# Scriptish formats
+*.bat text svneol=native#text/plain
+*.bsh text svneol=native#text/x-beanshell
+*.cgi text svneol=native#text/plain
+*.cmd text svneol=native#text/plain
+*.js text svneol=native#text/javascript
+*.php text svneol=native#text/x-php
+*.pl text svneol=native#text/x-perl
+*.pm text svneol=native#text/x-perl
+*.py text svneol=native#text/x-python
+*.sh eol=lf svneol=LF#text/x-sh
+configure eol=lf svneol=LF#text/x-sh
+
+# Image formats
+*.bmp binary svneol=unset#image/bmp
+*.gif binary svneol=unset#image/gif
+*.ico binary svneol=unset#image/ico
+*.jpeg binary svneol=unset#image/jpeg
+*.jpg binary svneol=unset#image/jpeg
+*.png binary svneol=unset#image/png
+*.tif binary svneol=unset#image/tiff
+*.tiff binary svneol=unset#image/tiff
+*.svg text svneol=native#image/svg%2Bxml
+
+# Data formats
+*.pdf binary svneol=unset#application/pdf
+*.avi binary svneol=unset#video/avi
+*.doc binary svneol=unset#application/msword
+*.dsp text svneol=crlf#text/plain
+*.dsw text svneol=crlf#text/plain
+*.eps binary svneol=unset#application/postscript
+*.gz binary svneol=unset#application/gzip
+*.mov binary svneol=unset#video/quicktime
+*.mp3 binary svneol=unset#audio/mpeg
+*.ppt binary svneol=unset#application/vnd.ms-powerpoint
+*.ps binary svneol=unset#application/postscript
+*.psd binary svneol=unset#application/photoshop
+*.rdf binary svneol=unset#text/rdf
+*.rss text svneol=unset#text/xml
+*.rtf binary svneol=unset#text/rtf
+*.sln text svneol=native#text/plain
+*.swf binary svneol=unset#application/x-shockwave-flash
+*.tgz binary svneol=unset#application/gzip
+*.vcproj text svneol=native#text/xml
+*.vcxproj text svneol=native#text/xml
+*.vsprops text svneol=native#text/xml
+*.wav binary svneol=unset#audio/wav
+*.xls binary svneol=unset#application/vnd.ms-excel
+*.zip binary svneol=unset#application/zip
+
+# Text formats
+.htaccess text svneol=native#text/plain
+*.bbk text svneol=native#text/xml
+*.cmake text svneol=native#text/plain
+*.css text svneol=native#text/css
+*.dtd text svneol=native#text/xml
+*.htm text svneol=native#text/html
+*.html text svneol=native#text/html
+*.ini text svneol=native#text/plain
+*.log text svneol=native#text/plain
+*.mak text svneol=native#text/plain
+*.qbk text svneol=native#text/plain
+*.rst text svneol=native#text/plain
+*.sql text svneol=native#text/x-sql
+*.txt text svneol=native#text/plain
+*.xhtml text svneol=native#text/xhtml%2Bxml
+*.xml text svneol=native#text/xml
+*.xsd text svneol=native#text/xml
+*.xsl text svneol=native#text/xml
+*.xslt text svneol=native#text/xml
+*.xul text svneol=native#text/xul
+*.yml text svneol=native#text/plain
+boost-no-inspect text svneol=native#text/plain
+CHANGES text svneol=native#text/plain
+COPYING text svneol=native#text/plain
+INSTALL text svneol=native#text/plain
+Jamfile text svneol=native#text/plain
+Jamroot text svneol=native#text/plain
+Jamfile.v2 text svneol=native#text/plain
+Jamrules text svneol=native#text/plain
+Makefile* text svneol=native#text/plain
+README text svneol=native#text/plain
+TODO text svneol=native#text/plain
+
+# Code formats
+*.c text svneol=native#text/plain
+*.cpp text svneol=native#text/plain
+*.h text svneol=native#text/plain
+*.hpp text svneol=native#text/plain
+*.ipp text svneol=native#text/plain
+*.tpp text svneol=native#text/plain
+*.jam text svneol=native#text/plain
+*.java text svneol=native#text/plain
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..116d4aa
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,8 @@
+/doc/inspect.htm
+/**/*.log
+reporting/*/third_party
+reporting/*/html
+inspect.html
+/example/float128_examples.cpp
+test/cuda
+*.DS_Store
\ No newline at end of file
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..2700829
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,590 @@
+# Copyright 2016, 2017 Peter Dimov
+# Distributed under the Boost Software License, Version 1.0.
+# (See accompanying file LICENSE_1_0.txt or copy at http://boost.org/LICENSE_1_0.txt)
+
+language: cpp
+
+sudo: false
+
+python: "2.7"
+
+os:
+ - linux
+ - osx
+
+branches:
+ only:
+ - master
+ - develop
+
+env:
+ matrix:
+ - BOGUS_JOB=true
+
+matrix:
+
+ exclude:
+ - env: BOGUS_JOB=true
+
+ include:
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-6 CXXSTD=c++03 TEST_SUITE=special_fun
+ addons:
+ apt:
+ packages:
+ - g++-6
+ - libgmp-dev
+ - libmpfr-dev
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-6 CXXSTD=c++11 TEST_SUITE=special_fun
+ addons:
+ apt:
+ packages:
+ - g++-6
+ - libgmp-dev
+ - libmpfr-dev
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-6 CXXSTD=c++14 TEST_SUITE=special_fun
+ addons:
+ apt:
+ packages:
+ - g++-6
+ - libgmp-dev
+ - libmpfr-dev
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-6 CXXSTD=c++1z TEST_SUITE=special_fun
+ addons:
+ apt:
+ packages:
+ - g++-6
+ - libgmp-dev
+ - libmpfr-dev
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-6 CXXSTD=c++03 TEST_SUITE=distribution_tests
+ addons:
+ apt:
+ packages:
+ - g++-6
+ - libgmp-dev
+ - libmpfr-dev
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-6 CXXSTD=c++11 TEST_SUITE=distribution_tests
+ addons:
+ apt:
+ packages:
+ - g++-6
+ - libgmp-dev
+ - libmpfr-dev
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-6 CXXSTD=c++14 TEST_SUITE=distribution_tests
+ addons:
+ apt:
+ packages:
+ - g++-6
+ - libgmp-dev
+ - libmpfr-dev
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-6 CXXSTD=c++1z TEST_SUITE=distribution_tests
+ addons:
+ apt:
+ packages:
+ - g++-6
+ - libgmp-dev
+ - libmpfr-dev
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-6 CXXSTD=c++03 TEST_SUITE=misc
+ addons:
+ apt:
+ packages:
+ - g++-6
+ - libgmp-dev
+ - libmpfr-dev
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-6 CXXSTD=c++11 TEST_SUITE=misc
+ addons:
+ apt:
+ packages:
+ - g++-6
+ - libgmp-dev
+ - libmpfr-dev
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-6 CXXSTD=c++14 TEST_SUITE=misc
+ addons:
+ apt:
+ packages:
+ - g++-6
+ - libgmp-dev
+ - libmpfr-dev
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-6 CXXSTD=gnu++14 TEST_SUITE=misc
+ addons:
+ apt:
+ packages:
+ - g++-6
+ - libgmp-dev
+ - libmpfr-dev
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-6 CXXSTD=c++1z TEST_SUITE=misc
+ addons:
+ apt:
+ packages:
+ - g++-6
+ - libgmp-dev
+ - libmpfr-dev
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-6 CXXSTD=c++03 TEST_SUITE=float128_tests
+ addons:
+ apt:
+ packages:
+ - g++-6
+ - libgmp-dev
+ - libmpfr-dev
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-6 CXXSTD=c++11 TEST_SUITE=float128_tests
+ addons:
+ apt:
+ packages:
+ - g++-6
+ - libgmp-dev
+ - libmpfr-dev
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-6 CXXSTD=c++14 TEST_SUITE=float128_tests
+ addons:
+ apt:
+ packages:
+ - g++-6
+ - libgmp-dev
+ - libmpfr-dev
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-6 CXXSTD=gnu++14 TEST_SUITE=float128_tests
+ addons:
+ apt:
+ packages:
+ - g++-6
+ - libgmp-dev
+ - libmpfr-dev
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-6 CXXSTD=c++1z TEST_SUITE=float128_tests
+ addons:
+ apt:
+ packages:
+ - g++-6
+ - libgmp-dev
+ - libmpfr-dev
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-6 CXXSTD=c++03 TEST_SUITE=../example//examples
+ addons:
+ apt:
+ packages:
+ - g++-6
+ - libgmp-dev
+ - libmpfr-dev
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-6 CXXSTD=c++11 TEST_SUITE=../example//examples
+ addons:
+ apt:
+ packages:
+ - g++-6
+ - libgmp-dev
+ - libmpfr-dev
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-6 CXXSTD=c++14 TEST_SUITE=../example//examples
+ addons:
+ apt:
+ packages:
+ - g++-6
+ - libgmp-dev
+ - libmpfr-dev
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-6 CXXSTD=gnu++14 TEST_SUITE=../example//examples
+ addons:
+ apt:
+ packages:
+ - g++-6
+ - libgmp-dev
+ - libmpfr-dev
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-6 CXXSTD=c++1z TEST_SUITE=../example//examples
+ addons:
+ apt:
+ packages:
+ - g++-6
+ - libgmp-dev
+ - libmpfr-dev
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-5 CXXSTD=c++14 TEST_SUITE=special_fun
+ addons:
+ apt:
+ packages:
+ - g++-5
+ - libgmp-dev
+ - libmpfr-dev
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-5 CXXSTD=c++14 TEST_SUITE=distribution_tests
+ addons:
+ apt:
+ packages:
+ - g++-5
+ - libgmp-dev
+ - libmpfr-dev
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-5 CXXSTD=c++14 TEST_SUITE=misc
+ addons:
+ apt:
+ packages:
+ - g++-5
+ - libgmp-dev
+ - libmpfr-dev
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-5 CXXSTD=c++14 TEST_SUITE=float128_tests
+ addons:
+ apt:
+ packages:
+ - g++-5
+ - libgmp-dev
+ - libmpfr-dev
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ env: TOOLSET=gcc COMPILER=g++-5 CXXSTD=c++14 TEST_SUITE=../example//examples
+ addons:
+ apt:
+ packages:
+ - g++-5
+ - libgmp-dev
+ - libmpfr-dev
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ dist: trusty
+ compiler: g++-7
+ env: TOOLSET=gcc COMPILER=g++-7 CXXSTD=c++14 TEST_SUITE=special_fun
+ addons:
+ apt:
+ packages:
+ - g++-7
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ dist: trusty
+ compiler: g++-7
+ env: TOOLSET=gcc COMPILER=g++-7 CXXSTD=c++14 TEST_SUITE=distribution_tests
+ addons:
+ apt:
+ packages:
+ - g++-7
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ dist: trusty
+ compiler: g++-7
+ env: TOOLSET=gcc COMPILER=g++-7 CXXSTD=c++14 TEST_SUITE=misc
+ addons:
+ apt:
+ packages:
+ - g++-7
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ dist: trusty
+ compiler: g++-7
+ env: TOOLSET=gcc COMPILER=g++-7 CXXSTD=c++14 TEST_SUITE=float128_tests
+ addons:
+ apt:
+ packages:
+ - g++-7
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ dist: trusty
+ compiler: g++-7
+ env: TOOLSET=gcc COMPILER=g++-7 CXXSTD=c++14 TEST_SUITE=../example//examples
+ addons:
+ apt:
+ packages:
+ - g++-7
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ dist: trusty
+ compiler: g++-7
+ env: TOOLSET=gcc COMPILER=g++-7 CXXSTD=gnu++03 TEST_SUITE=special_fun
+ addons:
+ apt:
+ packages:
+ - g++-7
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ dist: trusty
+ compiler: g++-7
+ env: TOOLSET=gcc COMPILER=g++-7 CXXSTD=gnu++03 TEST_SUITE=distribution_tests
+ addons:
+ apt:
+ packages:
+ - g++-7
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ dist: trusty
+ compiler: g++-7
+ env: TOOLSET=gcc COMPILER=g++-7 CXXSTD=gnu++03 TEST_SUITE=misc
+ addons:
+ apt:
+ packages:
+ - g++-7
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ dist: trusty
+ compiler: g++-7
+ env: TOOLSET=gcc COMPILER=g++-7 CXXSTD=gnu++03 TEST_SUITE=float128_tests
+ addons:
+ apt:
+ packages:
+ - g++-7
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ dist: trusty
+ compiler: g++-7
+ env: TOOLSET=gcc COMPILER=g++-7 CXXSTD=gnu++03 TEST_SUITE=../example//examples
+ addons:
+ apt:
+ packages:
+ - g++-7
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ dist: trusty
+ compiler: g++-7
+ env: TOOLSET=gcc COMPILER=g++-7 CXXSTD=gnu++14 TEST_SUITE=special_fun
+ addons:
+ apt:
+ packages:
+ - g++-7
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ dist: trusty
+ compiler: g++-7
+ env: TOOLSET=gcc COMPILER=g++-7 CXXSTD=gnu++14 TEST_SUITE=distribution_tests
+ addons:
+ apt:
+ packages:
+ - g++-7
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ dist: trusty
+ compiler: g++-7
+ env: TOOLSET=gcc COMPILER=g++-7 CXXSTD=gnu++14 TEST_SUITE=misc
+ addons:
+ apt:
+ packages:
+ - g++-7
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ dist: trusty
+ compiler: g++-7
+ env: TOOLSET=gcc COMPILER=g++-7 CXXSTD=gnu++14 TEST_SUITE=float128_tests
+ addons:
+ apt:
+ packages:
+ - g++-7
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ dist: trusty
+ compiler: g++-7
+ env: TOOLSET=gcc COMPILER=g++-7 CXXSTD=gnu++14 TEST_SUITE=../example//examples
+ addons:
+ apt:
+ packages:
+ - g++-7
+ sources:
+ - ubuntu-toolchain-r-test
+
+ - os: linux
+ compiler: clang++-4.0
+ env: TOOLSET=clang COMPILER=clang++-4.0 CXXSTD=c++11 TEST_SUITE=special_fun
+ addons:
+ apt:
+ packages:
+ - clang-4.0
+ sources:
+ - ubuntu-toolchain-r-test
+ - llvm-toolchain-trusty-4.0
+
+ - os: linux
+ compiler: clang++-4.0
+ env: TOOLSET=clang COMPILER=clang++-4.0 CXXSTD=c++11 TEST_SUITE=distribution_tests
+ addons:
+ apt:
+ packages:
+ - clang-4.0
+ sources:
+ - ubuntu-toolchain-r-test
+ - llvm-toolchain-trusty-4.0
+
+ - os: linux
+ compiler: clang++-4.0
+ env: TOOLSET=clang COMPILER=clang++-4.0 CXXSTD=c++11 TEST_SUITE=misc
+ addons:
+ apt:
+ packages:
+ - clang-4.0
+ sources:
+ - ubuntu-toolchain-r-test
+ - llvm-toolchain-trusty-4.0
+
+ - os: linux
+ compiler: clang++-4.0
+ env: TOOLSET=clang COMPILER=clang++-4.0 CXXSTD=c++11 TEST_SUITE=../example//examples
+ addons:
+ apt:
+ packages:
+ - clang-4.0
+ sources:
+ - ubuntu-toolchain-r-test
+ - llvm-toolchain-trusty-4.0
+
+ - os: osx
+ env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++14 TEST_SUITE=special_fun
+ osx_image: xcode9.3
+
+ - os: osx
+ env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++14 TEST_SUITE=distribution_tests
+ osx_image: xcode9.3
+
+ - os: osx
+ env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++14 TEST_SUITE=misc
+ osx_image: xcode9.3
+
+ - os: osx
+ env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++14 TEST_SUITE=float128_tests
+ osx_image: xcode9.3
+
+ - os: osx
+ env: TOOLSET=clang COMPILER=clang++ CXXSTD=c++14 TEST_SUITE=../example//examples
+ osx_image: xcode9.3
+
+
+install:
+ - cd ..
+ - git clone -b $TRAVIS_BRANCH --depth 1 https://github.com/boostorg/boost.git boost-root
+ - cd boost-root
+ - git submodule update --init tools/build
+ - git submodule update --init libs/config
+ - git submodule update --init libs/format
+ - git submodule update --init libs/numeric
+ - git submodule update --init tools/boostdep
+ - cp -r $TRAVIS_BUILD_DIR/* libs/math
+ - python tools/boostdep/depinst/depinst.py math
+ - ./bootstrap.sh
+ - ./b2 headers
+
+script:
+ - |-
+ echo "using $TOOLSET : : $COMPILER : <cxxflags>-std=$CXXSTD ;" > ~/user-config.jam
+ - (cd libs/config/test && ../../../b2 config_info_travis_install toolset=$TOOLSET && ./config_info_travis)
+ - (cd libs/math/test && travis_wait 60 ../../../b2 -j3 -d+0 -q toolset=$TOOLSET $TEST_SUITE define=CI_SUPPRESS_KNOWN_ISSUES define=SLOW_COMPILER)
+
+notifications:
+ email:
+ on_success: always
diff --git a/appveyor.yml b/appveyor.yml
new file mode 100644
index 0000000..82afce4
--- /dev/null
+++ b/appveyor.yml
@@ -0,0 +1,108 @@
+# Copyright 2016 Peter Dimov
+# Distributed under the Boost Software License, Version 1.0.
+# (See accompanying file LICENSE_1_0.txt or copy at http://boost.org/LICENSE_1_0.txt)
+
+version: 1.0.{build}-{branch}
+
+shallow_clone: true
+
+branches:
+ only:
+ - master
+ - develop
+
+platform:
+ - x64
+
+environment:
+ matrix:
+ - ARGS: --toolset=msvc-9.0 address-model=32
+ - ARGS: --toolset=msvc-10.0 address-model=32
+ - ARGS: --toolset=msvc-11.0 address-model=32
+ - ARGS: --toolset=msvc-12.0 address-model=32
+ - ARGS: --toolset=msvc-14.0 address-model=32
+ - ARGS: --toolset=msvc-12.0 address-model=64
+ - ARGS: --toolset=msvc-14.0 address-model=64
+ TEST_SUITE: special_fun distribution_tests
+ - ARGS: --toolset=msvc-14.0 address-model=64
+ TEST_SUITE: misc ../example//examples
+
+ - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
+ ARGS: --toolset=msvc-14.1 address-model=64
+ TEST_SUITE: special_fun distribution_tests
+
+ - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
+ ARGS: --toolset=msvc-14.1 address-model=64
+ TEST_SUITE: misc ../example//examples
+
+ - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
+ ARGS: --toolset=msvc-14.1 address-model=32
+ TEST_SUITE: special_fun distribution_tests
+
+ - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
+ ARGS: --toolset=msvc-14.1 address-model=32
+ TEST_SUITE: misc ../example//examples
+
+ - ARGS: --toolset=gcc address-model=64
+ TEST_SUITE: float128_tests
+ PATH: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin;%PATH%
+
+ - ARGS: --toolset=gcc address-model=64 cxxflags=-std=gnu++1z
+ TEST_SUITE: float128_tests
+ PATH: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin;%PATH%
+
+ - ARGS: --toolset=gcc address-model=64
+ TEST_SUITE: special_fun
+ PATH: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin;%PATH%
+
+ - ARGS: --toolset=gcc address-model=64 cxxflags=-std=gnu++1z
+ TEST_SUITE: special_fun
+ PATH: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin;%PATH%
+
+ - ARGS: --toolset=gcc address-model=64
+ TEST_SUITE: distribution_tests
+ PATH: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin;%PATH%
+
+ - ARGS: --toolset=gcc address-model=64 cxxflags=-std=gnu++1z
+ TEST_SUITE: distribution_tests
+ PATH: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin;%PATH%
+
+ - ARGS: --toolset=gcc address-model=64
+ TEST_SUITE: misc
+ PATH: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin;%PATH%
+
+ - ARGS: --toolset=gcc address-model=64 cxxflags=-std=gnu++1z
+ TEST_SUITE: misc
+ PATH: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin;%PATH%
+
+ - ARGS: --toolset=gcc address-model=64
+ TEST_SUITE: ../example//examples
+ PATH: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin;%PATH%
+
+ - ARGS: --toolset=gcc address-model=64 cxxflags=-std=gnu++1z
+ TEST_SUITE: ../example//examples
+ PATH: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin;%PATH%
+
+
+install:
+ - cd ..
+ - git clone -b %APPVEYOR_REPO_BRANCH% --depth 1 https://github.com/boostorg/boost.git boost-root
+ - cd boost-root
+ - git submodule update --init tools/build
+ - git submodule update --init libs/config
+ - git submodule update --init libs/format
+ - git submodule update --init libs/numeric
+ - xcopy /s /e /q %APPVEYOR_BUILD_FOLDER% libs\math
+ - git submodule update --init tools/boostdep
+ - python tools/boostdep/depinst/depinst.py math
+ - bootstrap
+ - b2 headers
+
+build: off
+
+test_script:
+ - cd libs\config\test
+ - ..\..\..\b2 config_info_travis_install %ARGS%
+ - config_info_travis
+ - cd ..\..\math\test
+ - ..\..\..\b2 -j3 --hash %ARGS% define=CI_SUPPRESS_KNOWN_ISSUES %TEST_SUITE%
diff --git a/build/Jamfile.v2 b/build/Jamfile.v2
new file mode 100644
index 0000000..e19fb2e
--- /dev/null
+++ b/build/Jamfile.v2
@@ -0,0 +1,127 @@
+# copyright John Maddock 2008
+# Distributed under the Boost Software License, Version 1.0.
+# (See accompanying file LICENSE_1_0.txt or copy at
+# http://www.boost.org/LICENSE_1_0.txt.
+
+import testing ;
+import pch ;
+
+project
+ : requirements
+ <toolset>intel-win:<cxxflags>-nologo
+ <toolset>intel-win:<linkflags>-nologo
+ #<toolset>intel-linux:<pch>off
+ <toolset>intel-darwin:<pch>off
+ <toolset>msvc-7.1:<pch>off
+ <toolset>gcc,<target-os>windows:<pch>off
+ #<toolset>gcc:<cxxflags>-fvisibility=hidden
+ <toolset>intel-linux:<cxxflags>-fvisibility=hidden
+ #<toolset>sun:<cxxflags>-xldscope=hidden
+ [ check-target-builds ../config//has_gcc_visibility "gcc visibility" : <toolset>gcc:<cxxflags>-fvisibility=hidden : ]
+ ;
+
+cpp-pch pch : ../src/tr1/pch.hpp : <include>../src/tr1 <link>shared:<define>BOOST_MATH_TR1_DYN_LINK=1 ;
+
+C99_SOURCES = acosh
+asinh
+atanh
+cbrt
+copysign
+erfc
+erf
+expm1
+fmax
+fmin
+fpclassify
+hypot
+lgamma
+llround
+log1p
+lround
+nextafter
+nexttoward
+round
+tgamma
+trunc ;
+
+TR1_SOURCES =
+assoc_laguerre
+assoc_legendre
+beta
+comp_ellint_1
+comp_ellint_2
+comp_ellint_3
+cyl_bessel_i
+cyl_bessel_j
+cyl_bessel_k
+cyl_neumann
+ellint_1
+ellint_2
+ellint_3
+expint
+hermite
+laguerre
+legendre
+riemann_zeta
+sph_bessel
+sph_legendre
+sph_neumann
+;
+
+# Configure checks.
+
+import project ;
+import configure ;
+import property ;
+import property-set ;
+import targets ;
+
+obj long_double_check : ../config/has_long_double_support.cpp ;
+explicit long_double_check ;
+
+# Library targets
+lib boost_math_tr1 : ../src/tr1/$(TR1_SOURCES).cpp pch
+ :
+ <link>shared:<define>BOOST_MATH_TR1_DYN_LINK=1
+ <include>../src/tr1
+ ;
+
+lib boost_math_tr1f : ../src/tr1/$(TR1_SOURCES)f.cpp pch
+ :
+ <link>shared:<define>BOOST_MATH_TR1_DYN_LINK=1
+ <include>../src/tr1
+ ;
+
+lib boost_math_tr1l : ../src/tr1/$(TR1_SOURCES)l.cpp pch
+ :
+ <link>shared:<define>BOOST_MATH_TR1_DYN_LINK=1
+ <dependency>../config//has_long_double_support
+ <include>../src/tr1
+ [ check-target-builds ../config//has_long_double_support "long double support" : : <build>no ]
+ ;
+
+lib boost_math_c99 : ../src/tr1/$(C99_SOURCES).cpp pch
+ :
+ <link>shared:<define>BOOST_MATH_TR1_DYN_LINK=1
+ <include>../src/tr1
+ ;
+
+lib boost_math_c99f : ../src/tr1/$(C99_SOURCES)f.cpp pch
+ :
+ <link>shared:<define>BOOST_MATH_TR1_DYN_LINK=1
+ <include>../src/tr1
+ ;
+
+lib boost_math_c99l : ../src/tr1/$(C99_SOURCES)l.cpp pch
+ :
+ <link>shared:<define>BOOST_MATH_TR1_DYN_LINK=1
+ <dependency>../config//has_long_double_support
+ <include>../src/tr1
+ [ check-target-builds ../config//has_long_double_support "long double support" : : <build>no ]
+ ;
+
+boost-install boost_math_c99 boost_math_c99f boost_math_c99l boost_math_tr1 boost_math_tr1f boost_math_tr1l ;
+
+
+
+
diff --git a/config/Jamfile.v2 b/config/Jamfile.v2
new file mode 100644
index 0000000..a63ea38
--- /dev/null
+++ b/config/Jamfile.v2
@@ -0,0 +1,46 @@
+# copyright John Maddock 2008
+# Distributed under the Boost Software License, Version 1.0.
+# (See accompanying file LICENSE_1_0.txt or copy at
+# http://www.boost.org/LICENSE_1_0.txt.
+
+import modules ;
+import path ;
+
+local ntl-path = [ modules.peek : NTL_PATH ] ;
+local gmp_path = [ modules.peek : GMP_PATH ] ;
+local e_float_path = [ modules.peek : E_FLOAT_PATH ] ;
+
+lib quadmath ;
+lib fftw3 ;
+lib fftw3f ;
+lib fftw3l ;
+lib fftw3q ;
+
+obj has_long_double_support : has_long_double_support.cpp ;
+obj has_mpfr_class : has_mpfr_class.cpp :
+ <include>$(gmp_path) <include>$(gmp_path)/mpfr <include>$(gmp_path)/gmpfrxx ;
+obj has_mpreal : has_mpreal.cpp :
+ <include>$(gmp_path) <include>$(gmp_path)/mpfr <include>$(gmp_path)/mpfrc++ ;
+obj has_ntl_rr : has_ntl_rr.cpp : <include>$(ntl-path)/include ;
+obj has_gmpxx : has_gmpxx.cpp :
+ <include>$(gmp_path) <include>$(gmp_path)/mpfr <include>$(gmp_path)/gmpfrxx ;
+obj has_gcc_visibility : has_gcc_visibility.cpp :
+ <toolset>gcc:<cxxflags>-fvisibility=hidden <toolset>gcc:<cxxflags>-Werror ;
+obj has_e_float : has_e_float.cpp : <include>$(e_float_path) ;
+exe has_float128 : has_float128.cpp quadmath ;
+exe has_fftw3 : has_fftw3.cpp fftw3 fftw3f fftw3l ;
+exe has_intel_quad : has_intel_quad.cpp : <cxxflags>-Qoption,cpp,--extended_float_type ;
+obj has_128bit_floatmax_t : has_128bit_floatmax_t.cpp ;
+
+explicit has_long_double_support ;
+explicit has_mpfr_class ;
+explicit has_mpreal ;
+explicit has_ntl_rr ;
+explicit has_gmpxx ;
+explicit has_gcc_visibility ;
+explicit has_e_float ;
+explicit has_float128 ;
+explicit has_intel_quad ;
+explicit has_128bit_floatmax_t ;
+explicit has_fftw3 ;
+
diff --git a/config/has_128bit_floatmax_t.cpp b/config/has_128bit_floatmax_t.cpp
new file mode 100644
index 0000000..aee4837
--- /dev/null
+++ b/config/has_128bit_floatmax_t.cpp
@@ -0,0 +1,19 @@
+// Copyright John Maddock 2014.
+// Use, modification and distribution are subject to the
+// Boost Software License, Version 1.0. (See accompanying file
+// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+#include <boost/cstdfloat.hpp>
+#include <boost/static_assert.hpp>
+
+#ifndef BOOST_FLOAT128_C
+#error "There is no 128 bit floating point type"
+#endif
+
+BOOST_STATIC_ASSERT(sizeof(boost::floatmax_t) * CHAR_BIT == 128);
+
+int main()
+{
+ return 0;
+}
+
diff --git a/config/has_e_float.cpp b/config/has_e_float.cpp
new file mode 100644
index 0000000..1f3b305
--- /dev/null
+++ b/config/has_e_float.cpp
@@ -0,0 +1,15 @@
+// Copyright John Maddock 2011.
+// Use, modification and distribution are subject to the
+// Boost Software License, Version 1.0. (See accompanying file
+// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+
+#ifdef _MSC_VER
+# pragma warning (disable : 4100) // unreferenced formal parameter
+#endif
+
+#define E_FLOAT_TYPE_EFX
+
+#include <e_float/e_float.h>
+#include <functions/functions.h>
+
diff --git a/config/has_fftw3.cpp b/config/has_fftw3.cpp
new file mode 100644
index 0000000..8045685
--- /dev/null
+++ b/config/has_fftw3.cpp
@@ -0,0 +1,19 @@
+// Copyright John Maddock 2017.
+// Use, modification and distribution are subject to the
+// Boost Software License, Version 1.0. (See accompanying file
+// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+#include <fftw3.h>
+
+int main()
+{
+ fftwq_plan plan; // early versions don't have this it seems.
+
+ fftw_cleanup();
+ fftwf_cleanup();
+ fftwl_cleanup();
+
+
+ return 0;
+}
+
diff --git a/config/has_float128.cpp b/config/has_float128.cpp
new file mode 100644
index 0000000..60a2e2e
--- /dev/null
+++ b/config/has_float128.cpp
@@ -0,0 +1,17 @@
+// Copyright John Maddock 2013.
+// Use, modification and distribution are subject to the
+// Boost Software License, Version 1.0. (See accompanying file
+// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+extern "C" {
+#include <quadmath.h>
+}
+
+int main()
+{
+ __float128 f = -2.0Q;
+ f = fabsq(f);
+
+ return 0;
+}
+
diff --git a/config/has_gcc_visibility.cpp b/config/has_gcc_visibility.cpp
new file mode 100644
index 0000000..6c7d6f9
--- /dev/null
+++ b/config/has_gcc_visibility.cpp
@@ -0,0 +1,13 @@
+// Copyright John Maddock 20010.
+// Use, modification and distribution are subject to the
+// Boost Software License, Version 1.0. (See accompanying file
+// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+#ifndef __GNUC__
+# error "This is a GCC specific test case".
+#endif
+
+int main()
+{
+ return 0;
+}
diff --git a/config/has_gmpxx.cpp b/config/has_gmpxx.cpp
new file mode 100644
index 0000000..edf62d8
--- /dev/null
+++ b/config/has_gmpxx.cpp
@@ -0,0 +1,7 @@
+// Copyright John Maddock 2008.
+// Use, modification and distribution are subject to the
+// Boost Software License, Version 1.0. (See accompanying file
+// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+#include <gmpxx.h>
+
diff --git a/config/has_intel_quad.cpp b/config/has_intel_quad.cpp
new file mode 100644
index 0000000..a2db80c
--- /dev/null
+++ b/config/has_intel_quad.cpp
@@ -0,0 +1,16 @@
+// Copyright John Maddock 2013.
+// Use, modification and distribution are subject to the
+// Boost Software License, Version 1.0. (See accompanying file
+// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+extern "C" _Quad __fabs(_Quad);
+
+int main()
+{
+ _Quad f = -2.0Q;
+ f = __fabsq(f);
+
+ return 0;
+}
+
+
diff --git a/config/has_long_double_support.cpp b/config/has_long_double_support.cpp
new file mode 100644
index 0000000..d314cf3
--- /dev/null
+++ b/config/has_long_double_support.cpp
@@ -0,0 +1,10 @@
+// Copyright John Maddock 2008.
+// Use, modification and distribution are subject to the
+// Boost Software License, Version 1.0. (See accompanying file
+// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+#include <boost/math/tools/config.hpp>
+
+#ifdef BOOST_MATH_NO_LONG_DOUBLE_MATH_FUNCTIONS
+#error "long double support is not supported by Boost.Math on this Plaform: the long double version of the TR1 library will not be built."
+#endif
diff --git a/config/has_mpfr_class.cpp b/config/has_mpfr_class.cpp
new file mode 100644
index 0000000..376b022
--- /dev/null
+++ b/config/has_mpfr_class.cpp
@@ -0,0 +1,15 @@
+// Copyright John Maddock 2008.
+// Copyright Paul A. Britow 2009
+// Use, modification and distribution are subject to the
+// Boost Software License, Version 1.0. (See accompanying file
+// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+#ifdef _MSC_VER
+# pragma warning (disable : 4127) // conditional expression is constant
+# pragma warning (disable : 4800) // 'int' : forcing value to bool 'true' or 'false' (performance warning)
+# pragma warning (disable : 4512) // assignment operator could not be generated
+#endif
+
+#include <cstddef>
+#include <gmpfrxx.h>
+
diff --git a/config/has_mpreal.cpp b/config/has_mpreal.cpp
new file mode 100644
index 0000000..8ee8897
--- /dev/null
+++ b/config/has_mpreal.cpp
@@ -0,0 +1,14 @@
+// Copyright John Maddock 2008.
+// Copyright Paul A. Britow 2009
+// Use, modification and distribution are subject to the
+// Boost Software License, Version 1.0. (See accompanying file
+// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+#ifdef _MSC_VER
+# pragma warning (disable : 4127) // conditional expression is constant
+# pragma warning (disable : 4800) // 'int' : forcing value to bool 'true' or 'false' (performance warning)
+# pragma warning (disable : 4512) // assignment operator could not be generated
+#endif
+
+#include <mpreal.h>
+
diff --git a/config/has_ntl_rr.cpp b/config/has_ntl_rr.cpp
new file mode 100644
index 0000000..f384421
--- /dev/null
+++ b/config/has_ntl_rr.cpp
@@ -0,0 +1,12 @@
+// Copyright John Maddock 2008.
+// Use, modification and distribution are subject to the
+// Boost Software License, Version 1.0. (See accompanying file
+// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+
+#ifdef _MSC_VER
+# pragma warning (disable : 4100) // unreferenced formal parameter
+#endif
+
+#include <NTL/RR.h>
+
diff --git a/doc/Jamfile.v2 b/doc/Jamfile.v2
new file mode 100644
index 0000000..c8b2453
--- /dev/null
+++ b/doc/Jamfile.v2
@@ -0,0 +1,89 @@
+
+# Copyright John Maddock 2005. Use, modification, and distribution are
+# subject to the Boost Software License, Version 1.0. (See accompanying
+# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+using quickbook ;
+using auto-index ;
+import modules ;
+
+path-constant images_location : html ;
+path-constant here : . ;
+
+xml math : math.qbk ;
+boostbook standalone
+ :
+ math
+ :
+ # Path for links to Boost:
+ <xsl:param>boost.root=../../../..
+ <xsl:param>html.stylesheet=math.css
+
+ # Some general style settings:
+ <xsl:param>table.footnote.number.format=1
+ <xsl:param>footnote.number.format=1
+
+ # HTML options first:
+ # Use graphics not text for navigation:
+ <xsl:param>navig.graphics=1
+ # How far down we chunk nested sections, basically all of them:
+ <xsl:param>chunk.section.depth=10
+ # Don't put the first section on the same page as the TOC:
+ <xsl:param>chunk.first.sections=1
+ # How far down sections get TOC's
+ <xsl:param>toc.section.depth=10
+ # Max depth in each TOC:
+ <xsl:param>toc.max.depth=4
+ # How far down we go with TOC's
+ <xsl:param>generate.section.toc.level=10
+ # Index on type:
+ <xsl:param>index.on.type=1
+ <xsl:param>boost.noexpand.chapter.toc=1
+
+ #<xsl:param>root.filename="sf_dist_and_tools"
+ #<xsl:param>graphicsize.extension=1
+ #<xsl:param>use.extensions=1
+
+ # PDF Options:
+ # TOC Generation: this is needed for FOP-0.9 and later:
+ <xsl:param>fop1.extensions=0
+ <format>pdf:<xsl:param>xep.extensions=1
+ # TOC generation: this is needed for FOP 0.2, but must not be set to zero for FOP-0.9!
+ <format>pdf:<xsl:param>fop.extensions=0
+ <format>pdf:<xsl:param>fop1.extensions=0
+ # No indent on body text:
+ <format>pdf:<xsl:param>body.start.indent=0pt
+ # Margin size:
+ <format>pdf:<xsl:param>page.margin.inner=0.5in
+ # Margin size:
+ <format>pdf:<xsl:param>page.margin.outer=0.5in
+ # Paper type = A4
+ <format>pdf:<xsl:param>paper.type=A4
+ # Yes, we want graphics for admonishments:
+ <xsl:param>admon.graphics=1
+ # Set this one for PDF generation *only*:
+ # default pnd graphics are awful in PDF form,
+ # better use SVG's instead:
+ <format>pdf:<xsl:param>admon.graphics.extension=".svg"
+ <format>pdf:<xsl:param>use.role.for.mediaobject=1
+ <format>pdf:<xsl:param>preferred.mediaobject.role=print
+ <format>pdf:<xsl:param>img.src.path=$(images_location)/
+ <format>pdf:<xsl:param>draft.mode="no"
+ <format>pdf:<xsl:param>boost.url.prefix=http://www.boost.org/doc/libs/release/libs/math/doc/html
+ <auto-index>on <format>pdf:<auto-index-internal>off
+ <format>html:<auto-index-internal>on
+ <auto-index-script>$(here)/index.idx
+ <auto-index-prefix>$(here)/../../..
+ #<auto-index-verbose>on
+ <quickbook-define>enable_index
+ <format>pdf:<xsl:param>index.on.type=1
+ ;
+
+install pdfinstall : standalone/<format>pdf : <location>. <install-type>PDF <name>math.pdf ;
+explicit pdfinstall ;
+
+###############################################################################
+alias boostdoc ;
+explicit boostdoc ;
+alias boostrelease : standalone ;
+explicit boostrelease ;
diff --git a/doc/background/error.qbk b/doc/background/error.qbk
new file mode 100644
index 0000000..a3a79b1
--- /dev/null
+++ b/doc/background/error.qbk
@@ -0,0 +1,73 @@
+[section:relative_error Relative Error]
+
+Given an actual value /a/ and a found value /v/ the relative error can be
+calculated from:
+
+[equation error2]
+
+However the test programs in the library use the symmetrical form:
+
+[equation error1]
+
+which measures /relative difference/ and happens to be less error
+prone in use since we don't have to worry which value is the "true"
+result, and which is the experimental one. It guarantees to return a value
+at least as large as the relative error.
+
+Special care needs to be taken when one value is zero: we could either take the
+absolute error in this case (but that's cheating as the absolute error is likely
+to be very small), or we could assign a value of either 1 or infinity to the
+relative error in this special case. In the test cases for the special functions
+in this library, everything below a threshold is regarded as "effectively zero",
+otherwise the relative error is assigned the value of 1 if only one of the terms
+is zero. The threshold is currently set at `std::numeric_limits<>::min()`:
+in other words all denormalised numbers are regarded as a zero.
+
+All the test programs calculate /quantized relative error/, whereas the graphs
+in this manual are produced with the /actual error/. The difference is as
+follows: in the test programs, the test data is rounded to the target real type
+under test when the program is compiled,
+so the error observed will then be a whole number of /units in the last place/
+either rounded up from the actual error, or rounded down (possibly to zero).
+In contrast the /true error/ is obtained by extending
+the precision of the calculated value, and then comparing to the actual value:
+in this case the calculated error may be some fraction of /units in the last place/.
+
+Note that throughout this manual and the test programs the relative error is
+usually quoted in units of epsilon. However, remember that /units in the last place/
+more accurately reflect the number of contaminated digits, and that relative
+error can /"wobble"/ by a factor of 2 compared to /units in the last place/.
+In other words: two implementations of the same function, whose
+maximum relative errors differ by a factor of 2, can actually be accurate
+to the same number of binary digits. You have been warned!
+
+[h4:zero_error The Impossibility of Zero Error]
+
+For many of the functions in this library, it is assumed that the error is
+"effectively zero" if the computation can be done with a number of guard
+digits. However it should be remembered that if the result is a
+/transcendental number/
+then as a point of principle we can never be sure that the result is accurate
+to more than 1 ulp. This is an example of what
+[@http://en.wikipedia.org/wiki/William_Kahan] called
+[@http://en.wikipedia.org/wiki/Rounding#The_table-maker.27s_dilemma]:
+consider what happens if the first guard digit is a one, and the remaining guard digits are all zero.
+Do we have a tie or not? Since the only thing we can tell about a transcendental number
+is that its digits have no particular pattern, we can never tell if we have a tie,
+no matter how many guard digits we have. Therefore, we can never be completely sure
+that the result has been rounded in the right direction. Of course, transcendental
+numbers that just happen to be a tie - for however many guard digits we have - are
+extremely rare, and get rarer the more guard digits we have, but even so....
+
+Refer to the classic text
+[@http://docs.sun.com/source/806-3568/ncg_goldberg.html What Every Computer Scientist Should Know About Floating-Point Arithmetic]
+for more information.
+
+[endsect][/section:relative_error Relative Error]
+
+[/
+ Copyright 2006, 2012 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
diff --git a/doc/background/implementation.qbk b/doc/background/implementation.qbk
new file mode 100644
index 0000000..377e702
--- /dev/null
+++ b/doc/background/implementation.qbk
@@ -0,0 +1,664 @@
+[section:sf_implementation Additional Implementation Notes]
+
+The majority of the implementation notes are included with the documentation
+of each function or distribution. The notes here are of a more general nature,
+and reflect more the general implementation philosophy used.
+
+[h4 Implementation philosophy]
+
+"First be right, then be fast."
+
+There will always be potential compromises
+to be made between speed and accuracy.
+It may be possible to find faster methods,
+particularly for certain limited ranges of arguments,
+but for most applications of math functions and distributions,
+we judge that speed is rarely as important as accuracy.
+
+So our priority is accuracy.
+
+To permit evaluation of accuracy of the special functions,
+production of extremely accurate tables of test values
+has received considerable effort.
+
+(It also required much CPU effort -
+there was some danger of molten plastic dripping from the bottom of JM's laptop,
+so instead, PAB's Dual-core desktop was kept 50% busy for [*days]
+calculating some tables of test values!)
+
+For a specific RealType, say `float` or `double`,
+it may be possible to find approximations for some functions
+that are simpler and thus faster, but less accurate
+(perhaps because there are no refining iterations,
+for example, when calculating inverse functions).
+
+If these prove accurate enough to be "fit for his purpose",
+then a user may substitute his custom specialization.
+
+For example, there are approximations dating back from times
+when computation was a [*lot] more expensive:
+
+H Goldberg and H Levine, Approximate formulas for
+percentage points and normalisation of t and chi squared,
+Ann. Math. Stat., 17(4), 216 - 225 (Dec 1946).
+
+A H Carter, Approximations to percentage points of the z-distribution,
+Biometrika 34(2), 352 - 358 (Dec 1947).
+
+These could still provide sufficient accuracy for some speed-critical applications.
+
+[h4 Accuracy and Representation of Test Values]
+
+In order to be accurate enough for as many as possible real types,
+constant values are given to 50 decimal digits if available
+(though many sources proved only accurate near to 64-bit double precision).
+Values are specified as long double types by appending L,
+unless they are exactly representable, for example integers, or binary fractions like 0.125.
+This avoids the risk of loss of accuracy converting from double, the default type.
+Values are used after `static_cast<RealType>(1.2345L)`
+to provide the appropriate RealType for spot tests.
+
+Functions that return constants values, like kurtosis for example, are written as
+
+`static_cast<RealType>(-3) / 5;`
+
+to provide the most accurate value
+that the compiler can compute for the real type.
+(The denominator is an integer and so will be promoted exactly).
+
+So tests for one third, *not* exactly representable with radix two floating-point,
+(should) use, for example:
+
+`static_cast<RealType>(1) / 3;`
+
+If a function is very sensitive to changes in input,
+specifying an inexact value as input (such as 0.1) can throw
+the result off by a noticeable amount: 0.1f is "wrong"
+by ~1e-7 for example (because 0.1 has no exact binary representation).
+That is why exact binary values - halves, quarters, and eighths etc -
+are used in test code along with the occasional fraction `a/b` with `b`
+a power of two (in order to ensure that the result is an exactly
+representable binary value).
+
+[h4 Tolerance of Tests]
+
+The tolerances need to be set to the maximum of:
+
+* Some epsilon value.
+* The accuracy of the data (often only near 64-bit double).
+
+Otherwise when long double has more digits than the test data, then no
+amount of tweaking an epsilon based tolerance will work.
+
+A common problem is when tolerances that are suitable for implementations
+like Microsoft VS.NET where double and long double are the same size:
+tests fail on other systems where long double is more accurate than double.
+Check first that the suffix L is present, and then that the tolerance is big enough.
+
+[h4 Handling Unsuitable Arguments]
+
+In
+[@http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2004/n1665.pdf Errors in Mathematical Special Functions], J. Marraffino & M. Paterno
+it is proposed that signalling a domain error is mandatory
+when the argument would give an mathematically undefined result.
+
+*Guideline 1
+
+[:A mathematical function is said to be defined at a point a = (a1, a2, . . .)
+if the limits as x = (x1, x2, . . .) 'approaches a from all directions agree'.
+The defined value may be any number, or +infinity, or -infinity.]
+
+Put crudely, if the function goes to + infinity
+and then emerges 'round-the-back' with - infinity,
+it is NOT defined.
+
+[:The library function which approximates a mathematical function shall signal a domain error
+whenever evaluated with argument values for which the mathematical function is undefined.]
+
+*Guideline 2
+
+[:The library function which approximates a mathematical function
+shall signal a domain error whenever evaluated with argument values
+for which the mathematical function obtains a non-real value.]
+
+This implementation is believed to follow these proposals and to assist compatibility with
+['ISO/IEC 9899:1999 Programming languages - C]
+and with the
+[@http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1836.pdf Draft Technical Report on C++ Library Extensions, 2005-06-24, section 5.2.1, paragraph 5].
+[link math_toolkit.error_handling See also domain_error].
+
+See __policy_ref for details of the error handling policies that should allow
+a user to comply with any of these recommendations, as well as other behaviour.
+
+See [link math_toolkit.error_handling error handling]
+for a detailed explanation of the mechanism, and
+[link math_toolkit.stat_tut.weg.error_eg error_handling example]
+and
+[@../../example/error_handling_example.cpp error_handling_example.cpp]
+
+[caution If you enable throw but do NOT have try & catch block,
+then the program will terminate with an uncaught exception and probably abort.
+Therefore to get the benefit of helpful error messages, enabling *all* exceptions
+*and* using try&catch is recommended for all applications.
+However, for simplicity, this is not done for most examples.]
+
+[h4 Handling of Functions that are Not Mathematically defined]
+
+Functions that are not mathematically defined,
+like the Cauchy mean, fail to compile by default.
+A [link math_toolkit.pol_ref.assert_undefined policy]
+allows control of this.
+
+If the policy is to permit undefined functions, then calling them
+throws a domain error, by default. But the error policy can be set
+to not throw, and to return NaN instead. For example,
+
+`#define BOOST_MATH_DOMAIN_ERROR_POLICY ignore_error`
+
+appears before the first Boost include,
+then if the un-implemented function is called,
+mean(cauchy<>()) will return std::numeric_limits<T>::quiet_NaN().
+
+[warning If `std::numeric_limits<T>::has_quiet_NaN` is false
+(for example, if T is a User-defined type without NaN support),
+then an exception will always be thrown when a domain error occurs.
+Catching exceptions is therefore strongly recommended.]
+
+[h4 Median of distributions]
+
+There are many distributions for which we have been unable to find an analytic formula,
+and this has deterred us from implementing
+[@http://en.wikipedia.org/wiki/Median median functions], the mid-point in a list of values.
+
+However a useful numerical approximation for distribution `dist`
+is available as usual as an accessor non-member function median using `median(dist)`,
+that may be evaluated (in the absence of an analytic formula) by calling
+
+`quantile(dist, 0.5)` (this is the /mathematical/ definition of course).
+
+[@http://www.amstat.org/publications/jse/v13n2/vonhippel.html Mean, Median, and Skew, Paul T von Hippel]
+
+[@http://documents.wolfram.co.jp/teachersedition/MathematicaBook/24.5.html Descriptive Statistics,]
+
+[@http://documents.wolfram.co.jp/v5/Add-onsLinks/StandardPackages/Statistics/DescriptiveStatistics.html and ]
+
+[@http://documents.wolfram.com/v5/TheMathematicaBook/AdvancedMathematicsInMathematica/NumericalOperationsOnData/3.8.1.html
+Mathematica Basic Statistics.] give more detail, in particular for discrete distributions.
+
+
+[h4 Handling of Floating-Point Infinity]
+
+Some functions and distributions are well defined with + or - infinity as
+argument(s), but after some experiments with handling infinite arguments
+as special cases, we concluded that it was generally more useful to forbid this,
+and instead to return the result of __domain_error.
+
+Handling infinity as special cases is additionally complicated
+because, unlike built-in types on most - but not all - platforms,
+not all User-Defined Types are
+specialized to provide `std::numeric_limits<RealType>::infinity()`
+and would return zero rather than any representation of infinity.
+
+The rationale is that non-finiteness may happen because of error
+or overflow in the users code, and it will be more helpful for this
+to be diagnosed promptly rather than just continuing.
+The code also became much more complicated, more error-prone,
+much more work to test, and much less readable.
+
+However in a few cases, for example normal, where we felt it obvious,
+we have permitted argument(s) to be infinity,
+provided infinity is implemented for the `RealType` on that implementation,
+and it is supported and tested by the distribution.
+
+The range for these distributions is set to infinity if supported by the platform,
+(by testing `std::numeric_limits<RealType>::has_infinity`)
+else the maximum value provided for the `RealType` by Boost.Math.
+
+Testing for has_infinity is obviously important for arbitrary precision types
+where infinity makes much less sense than for IEEE754 floating-point.
+
+So far we have not set `support()` function (only range)
+on the grounds that the PDF is uninteresting/zero for infinities.
+
+Users who require special handling of infinity (or other specific value) can,
+of course, always intercept this before calling a distribution or function
+and return their own choice of value, or other behavior.
+This will often be simpler than trying to handle the aftermath of the error policy.
+
+Overflow, underflow, denorm can be handled using __error_policy.
+
+We have also tried to catch boundary cases where the mathematical specification
+would result in divide by zero or overflow and signalling these similarly.
+What happens at (and near), poles can be controlled through __error_policy.
+
+[h4 Scale, Shape and Location]
+
+We considered adding location and scale to the list of functions, for example:
+
+ template <class RealType>
+ inline RealType scale(const triangular_distribution<RealType>& dist)
+ {
+ RealType lower = dist.lower();
+ RealType mode = dist.mode();
+ RealType upper = dist.upper();
+ RealType result; // of checks.
+ if(false == detail::check_triangular(BOOST_CURRENT_FUNCTION, lower, mode, upper, &result))
+ {
+ return result;
+ }
+ return (upper - lower);
+ }
+
+but found that these concepts are not defined (or their definition too contentious)
+for too many distributions to be generally applicable.
+Because they are non-member functions, they can be added if required.
+
+[h4 Notes on Implementation of Specific Functions & Distributions]
+
+* Default parameters for the Triangular Distribution.
+We are uncertain about the best default parameters.
+Some sources suggest that the Standard Triangular Distribution has
+lower = 0, mode = half and upper = 1.
+However as a approximation for the normal distribution,
+the most common usage, lower = -1, mode = 0 and upper = 1 would be more suitable.
+
+[h4 Rational Approximations Used]
+
+Some of the special functions in this library are implemented via
+rational approximations. These are either taken from the literature,
+or devised by John Maddock using
+[link math_toolkit.internals.minimax our Remez code].
+
+Rational rather than Polynomial approximations are used to ensure
+accuracy: polynomial approximations are often wonderful up to
+a certain level of accuracy, but then quite often fail to provide much greater
+accuracy no matter how many more terms are added.
+
+Our own approximations were devised either for added accuracy
+(to support 128-bit long doubles for example), or because
+literature methods were unavailable or under non-BSL
+compatible license. Our Remez code is known to produce good
+agreement with literature results in fairly simple "toy" cases.
+All approximations were checked
+for convergence and to ensure that
+they were not ill-conditioned (the coefficients can give a
+theoretically good solution, but the resulting rational function
+may be un-computable at fixed precision).
+
+Recomputing using different
+Remez implementations may well produce differing coefficients: the
+problem is well known to be ill conditioned in general, and our Remez implementation
+often found a broad and ill-defined minima for many of these approximations
+(of course for simple "toy" examples like approximating `exp` the minima
+is well defined, and the coefficients should agree no matter whose Remez
+implementation is used). This should not in general effect the validity
+of the approximations: there's good literature supporting the idea that
+coefficients can be "in error" without necessarily adversely effecting
+the result. Note that "in error" has a special meaning in this context,
+see [@http://front.math.ucdavis.edu/0101.5042
+"Approximate construction of rational approximations and the effect
+of error autocorrection.", Grigori Litvinov, eprint arXiv:math/0101042].
+Therefore the coefficients still need to be accurately calculated, even if they can
+be in error compared to the "true" minimax solution.
+
+[h4 Representation of Mathematical Constants]
+
+A macro BOOST_DEFINE_MATH_CONSTANT in constants.hpp is used
+to provide high accuracy constants to mathematical functions and distributions,
+since it is important to provide values uniformly for both built-in
+float, double and long double types,
+and for User Defined types in __multiprecision like __cpp_dec_float.
+and others like NTL::quad_float and NTL::RR.
+
+To permit calculations in this Math ToolKit and its tests, (and elsewhere)
+at about 100 decimal digits with NTL::RR type,
+it is obviously necessary to define constants to this accuracy.
+
+However, some compilers do not accept decimal digits strings as long as this.
+So the constant is split into two parts, with the 1st containing at least
+long double precision, and the 2nd zero if not needed or known.
+The 3rd part permits an exponent to be provided if necessary (use zero if none) -
+the other two parameters may only contain decimal digits (and sign and decimal point),
+and may NOT include an exponent like 1.234E99 (nor a trailing F or L).
+The second digit string is only used if T is a User-Defined Type,
+when the constant is converted to a long string literal and lexical_casted to type T.
+(This is necessary because you can't use a numeric constant
+since even a long double might not have enough digits).
+
+For example, pi is defined:
+
+ BOOST_DEFINE_MATH_CONSTANT(pi,
+ 3.141592653589793238462643383279502884197169399375105820974944,
+ 5923078164062862089986280348253421170679821480865132823066470938446095505,
+ 0)
+
+And used thus:
+
+ using namespace boost::math::constants;
+
+ double diameter = 1.;
+ double radius = diameter * pi<double>();
+
+ or boost::math::constants::pi<NTL::RR>()
+
+Note that it is necessary (if inconvenient) to specify the type explicitly.
+
+So you cannot write
+
+ double p = boost::math::constants::pi<>(); // could not deduce template argument for 'T'
+
+Neither can you write:
+
+ double p = boost::math::constants::pi; // Context does not allow for disambiguation of overloaded function
+ double p = boost::math::constants::pi(); // Context does not allow for disambiguation of overloaded function
+
+[h4 Thread safety]
+
+Reporting of error by setting `errno` should be thread-safe already
+(otherwise none of the std lib math functions would be thread safe?).
+If you turn on reporting of errors via exceptions, `errno` gets left unused anyway.
+
+For normal C++ usage, the Boost.Math `static const` constants are now thread-safe so
+for built-in real-number types: `float`, `double` and `long double` are all thread safe.
+
+For User_defined types, for example, __cpp_dec_float,
+the Boost.Math should also be thread-safe,
+(thought we are unsure how to rigorously prove this).
+
+(Thread safety has received attention in the C++11 Standard revision,
+so hopefully all compilers will do the right thing here at some point.)
+
+[h4 Sources of Test Data]
+
+We found a large number of sources of test data.
+We have assumed that these are /"known good"/
+if they agree with the results from our test
+and only consulted other sources for their /'vote'/
+in the case of serious disagreement.
+The accuracy, actual and claimed, vary very widely.
+Only [@http://functions.wolfram.com/ Wolfram Mathematica functions]
+provided a higher accuracy than
+C++ double (64-bit floating-point) and was regarded as
+the most-trusted source by far.
+The __R provided the widest range of distributions,
+but the usual Intel X86 distribution uses 64-but doubles,
+so our use was limited to the 15 to 17 decimal digit accuracy.
+
+A useful index of sources is:
+[@http://www.sal.hut.fi/Teaching/Resources/ProbStat/table.html
+Web-oriented Teaching Resources in Probability and Statistics]
+
+[@http://espse.ed.psu.edu/edpsych/faculty/rhale/hale/507Mat/statlets/free/pdist.htm Statlet]:
+Is a Javascript application that calculates and plots probability distributions,
+and provides the most complete range of distributions:
+
+[:Bernoulli, Binomial, discrete uniform, geometric, hypergeometric,
+negative binomial, Poisson, beta, Cauchy-Lorentz, chi-sequared, Erlang,
+exponential, extreme value, Fisher, gamma, Laplace, logistic,
+lognormal, normal, Parteo, Student's t, triangular, uniform, and Weibull.]
+
+It calculates pdf, cdf, survivor, log survivor, hazard, tail areas,
+& critical values for 5 tail values.
+
+It is also the only independent source found for the Weibull distribution;
+unfortunately it appears to suffer from very poor accuracy in areas where
+the underlying special function is known to be difficult to implement.
+
+[h4 Testing for Invalid Parameters to Functions and Constructors]
+
+After finding that some 'bad' parameters (like NaN) were not throwing
+a `domain_error` exception as they should, a function
+
+`check_out_of_range` (in `test_out_of_range.hpp`)
+was devised by JM to check
+(using Boost.Test's BOOST_CHECK_THROW macro)
+that bad parameters passed to constructors and functions throw `domain_error` exceptions.
+
+Usage is `check_out_of_range< DistributionType >(list-of-params);`
+Where list-of-params is a list of *valid* parameters from which the distribution can be constructed
+- ie the same number of args are passed to the function,
+as are passed to the distribution constructor.
+
+The values of the parameters are not important, but must be *valid* to pass the constructor checks;
+the default values are suitable, but must be explicitly provided, for example:
+
+ check_out_of_range<extreme_value_distribution<RealType> >(1, 2);
+
+Checks made are:
+
+* Infinity or NaN (if available) passed in place of each of the valid params.
+* Infinity or NaN (if available) as a random variable.
+* Out-of-range random variable passed to pdf and cdf
+(ie outside of "range(DistributionType)").
+* Out-of-range probability passed to quantile function and complement.
+
+but does *not* check finite but out-of-range parameters to the constructor
+because these are specific to each distribution, for example:
+
+ BOOST_CHECK_THROW(pdf(pareto_distribution<RealType>(0, 1), 0), std::domain_error);
+ BOOST_CHECK_THROW(pdf(pareto_distribution<RealType>(1, 0), 0), std::domain_error);
+
+checks `scale` and `shape` parameters are both > 0
+by checking that `domain_error` exception is thrown if either are == 0.
+
+(Use of `check_out_of_range` function may mean that some previous tests are now redundant).
+
+It was also noted that if more than one parameter is bad,
+then only the first detected will be reported by the error message.
+
+[h4 Creating and Managing the Equations]
+
+Equations that fit on a single line can most easily be produced by inline Quickbook code
+using templates for Unicode Greek and Unicode Math symbols.
+All Greek letter and small set of Math symbols is available at
+/boost-path/libs/math/doc/sf_and_dist/html4_symbols.qbk
+
+Where equations need to use more than one line, real Math editors were used.
+
+The primary source for the equations is now
+[@http://www.w3.org/Math/ MathML]: see the
+*.mml files in libs\/math\/doc\/sf_and_dist\/equations\/.
+
+These are most easily edited by a GUI editor such as
+[@http://mathcast.sourceforge.net/home.html Mathcast],
+please note that the equation editor supplied with Open Office
+currently mangles these files and should not currently be used.
+
+Conversion to SVG was achieved using
+[@https://sourceforge.net/projects/svgmath/ SVGMath] and a command line
+such as:
+
+[pre
+$for file in *.mml; do
+>/cygdrive/c/Python25/python.exe 'C:\download\open\SVGMath-0.3.1\math2svg.py' \\
+>>$file > $(basename $file .mml).svg
+>done
+]
+
+See also the section on "Using Python to run Inkscape" and
+"Using inkscape to convert scalable vector SVG files to Portable Network graphic PNG".
+
+Note that SVGMath requires that the mml files are *not* wrapped in an XHTML
+XML wrapper - this is added by Mathcast by default - one workaround is to
+copy an existing mml file and then edit it with Mathcast: the existing
+format should then be preserved. This is a bug in the XML parser used by
+SVGMath which the author is aware of.
+
+If necessary the XHTML wrapper can be removed with:
+
+[pre cat filename | tr -d "\\r\\n" \| sed -e 's\/.*\\(<math\[^>\]\*>.\*<\/math>\\).\*\/\\1\/' > newfile]
+
+Setting up fonts for SVGMath is currently rather tricky, on a Windows XP system
+JM's font setup is the same as the sample config file provided with SVGMath
+but with:
+
+[pre
+ <!\-\- Double\-struck \-\->
+ <mathvariant name\="double\-struck" family\="Mathematica7, Lucida Sans Unicode"\/>
+]
+
+changed to:
+
+[pre
+ <!\-\- Double\-struck \-\->
+ <mathvariant name\="double\-struck" family\="Lucida Sans Unicode"\/>
+]
+
+Note that unlike the sample config file supplied with SVGMath, this does not
+make use of the [@http://support.wolfram.com/technotes/fonts/windows/latestfonts.html Mathematica 7 font]
+as this lacks sufficient Unicode information
+for it to be used with either SVGMath or XEP "as is".
+
+Also note that the SVG files in the repository are almost certainly
+Windows-specific since they reference various Windows Fonts.
+
+PNG files can be created from the SVGs using
+[@http://xmlgraphics.apache.org/batik/tools/rasterizer.html Batik]
+and a command such as:
+
+[pre java -jar 'C:\download\open\batik-1.7\batik-rasterizer.jar' -dpi 120 *.svg]
+
+Or using Inkscape (File, Export bitmap, Drawing tab, bitmap size (default size, 100 dpi), Filename (default). png)
+
+or Using Cygwin, a command such as:
+
+[pre for file in *.svg; do
+ /cygdrive/c/progra~1/Inkscape/inkscape -d 120 -e $(cygpath -a -w $(basename $file .svg).png) $(cygpath -a -w $file);
+done]
+
+Using BASH
+
+[pre # Convert single SVG to PNG file.
+# /c/progra~1/Inkscape/inkscape -d 120 -e a.png a.svg
+]
+
+or to convert All files in folder SVG to PNG.
+
+[pre
+for file in *.svg; do
+/c/progra~1/Inkscape/inkscape -d 120 -e $(basename $file .svg).png $file
+done
+]
+
+Currently Inkscape seems to generate the better looking PNGs.
+
+The PDF is generated into \pdf\math.pdf
+using a command from a shell or command window with current directory
+\math_toolkit\libs\math\doc\sf_and_dist, typically:
+
+[pre bjam -a pdf >math_pdf.log]
+
+Note that XEP will have to be configured to *use and embed*
+whatever fonts are used by the SVG equations
+(almost certainly editing the sample xep.xml provided by the XEP installation).
+If you fail to do this you will get XEP warnings in the log file like
+
+[pre \[warning\]could not find any font family matching "Times New Roman"; replaced by Helvetica]
+
+(html is the default so it is generated at libs\math\doc\html\index.html
+using command line >bjam -a > math_toolkit.docs.log).
+
+ <!-- Sample configuration for Windows TrueType fonts. -->
+is provided in the xep.xml downloaded, but the Windows TrueType fonts are commented out.
+
+JM's XEP config file \xep\xep.xml has the following font configuration section added:
+
+[pre
+ <font\-group xml:base\="file:\/C:\/Windows\/Fonts\/" label\="Windows TrueType" embed\="true" subset\="true">
+ <font\-family name\="Arial">
+ <font><font\-data ttf\="arial.ttf"\/><\/font>
+ <font style\="oblique"><font\-data ttf\="ariali.ttf"\/><\/font>
+ <font weight\="bold"><font\-data ttf\="arialbd.ttf"\/><\/font>
+ <font weight\="bold" style\="oblique"><font\-data ttf\="arialbi.ttf"\/><\/font>
+ <\/font\-family>
+
+ <font\-family name\="Times New Roman" ligatures\="fi fl">
+ <font><font\-data ttf\="times.ttf"\/><\/font>
+ <font style\="italic"><font\-data ttf\="timesi.ttf"\/><\/font>
+ <font weight\="bold"><font\-data ttf\="timesbd.ttf"\/><\/font>
+ <font weight\="bold" style\="italic"><font\-data ttf\="timesbi.ttf"\/><\/font>
+ <\/font\-family>
+
+ <font\-family name\="Courier New">
+ <font><font\-data ttf\="cour.ttf"\/><\/font>
+ <font style\="oblique"><font\-data ttf\="couri.ttf"\/><\/font>
+ <font weight\="bold"><font\-data ttf\="courbd.ttf"\/><\/font>
+ <font weight\="bold" style\="oblique"><font\-data ttf\="courbi.ttf"\/><\/font>
+ <\/font\-family>
+
+ <font\-family name\="Tahoma" embed\="true">
+ <font><font\-data ttf\="tahoma.ttf"\/><\/font>
+ <font weight\="bold"><font\-data ttf\="tahomabd.ttf"\/><\/font>
+ <\/font\-family>
+
+ <font\-family name\="Verdana" embed\="true">
+ <font><font\-data ttf\="verdana.ttf"\/><\/font>
+ <font style\="oblique"><font\-data ttf\="verdanai.ttf"\/><\/font>
+ <font weight\="bold"><font\-data ttf\="verdanab.ttf"\/><\/font>
+ <font weight\="bold" style\="oblique"><font\-data ttf\="verdanaz.ttf"\/><\/font>
+ <\/font\-family>
+
+ <font\-family name\="Palatino" embed\="true" ligatures\="ff fi fl ffi ffl">
+ <font><font\-data ttf\="pala.ttf"\/><\/font>
+ <font style\="italic"><font\-data ttf\="palai.ttf"\/><\/font>
+ <font weight\="bold"><font\-data ttf\="palab.ttf"\/><\/font>
+ <font weight\="bold" style\="italic"><font\-data ttf\="palabi.ttf"\/><\/font>
+ <\/font\-family>
+
+ <font-family name="Lucida Sans Unicode">
+ <!-- <font><font-data ttf="lsansuni.ttf"></font> -->
+ <!-- actually called l_10646.ttf on Windows 2000 and Vista Sp1 -->
+ <font><font-data ttf="l_10646.ttf"/></font>
+ </font-family>
+]
+
+PAB had to alter his because the Lucida Sans Unicode font had a different name.
+Other changes are very likely to be required if you are not using Windows.
+
+XZ authored his equations using the venerable Latex, JM converted these to
+MathML using [@http://gentoo-wiki.com/HOWTO_Convert_LaTeX_to_HTML_with_MathML mxlatex].
+This process is currently unreliable and required some manual intervention:
+consequently Latex source is not considered a viable route for the automatic
+production of SVG versions of equations.
+
+Equations are embedded in the quickbook source using the /equation/
+template defined in math.qbk. This outputs Docbook XML that looks like:
+
+[pre
+<inlinemediaobject>
+<imageobject role="html">
+<imagedata fileref="../equations/myfile.png"></imagedata>
+</imageobject>
+<imageobject role="print">
+<imagedata fileref="../equations/myfile.svg"></imagedata>
+</imageobject>
+</inlinemediaobject>
+]
+
+MathML is not currently present in the Docbook output, or in the
+generated HTML: this needs further investigation.
+
+[h4 Producing Graphs]
+
+Graphs were produced in SVG format and then converted to PNG's using the same
+process as the equations.
+
+The programs
+`/libs/math/doc/sf_and_dist/graphs/dist_graphs.cpp`
+and `/libs/math/doc/sf_and_dist/graphs/sf_graphs.cpp`
+generate the SVG's directly using the
+[@http://code.google.com/soc/2007/boost/about.html Google Summer of Code 2007]
+project of Jacob Voytko (whose work so far,
+considerably enhanced and now reasonably mature and usable, by Paul A. Bristow,
+is at .\boost-sandbox\SOC\2007\visualization).
+
+[endsect] [/section:sf_implementation Implementation Notes]
+
+[/
+ Copyright 2006, 2007, 2010 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
+
diff --git a/doc/background/lanczos.qbk b/doc/background/lanczos.qbk
new file mode 100644
index 0000000..0fa3bee
--- /dev/null
+++ b/doc/background/lanczos.qbk
@@ -0,0 +1,246 @@
+[section:lanczos The Lanczos Approximation]
+
+[h4 Motivation]
+
+['Why base gamma and gamma-like functions on the Lanczos approximation?]
+
+First of all I should make clear that for the gamma function
+over real numbers (as opposed to complex ones)
+the Lanczos approximation (See [@http://en.wikipedia.org/wiki/Lanczos_approximation Wikipedia or ]
+[@http://mathworld.wolfram.com/LanczosApproximation.html Mathworld])
+appears to offer no clear advantage over more traditional methods such as
+[@http://en.wikipedia.org/wiki/Stirling_approximation Stirling's approximation].
+__pugh carried out an extensive comparison of the various methods available
+and discovered that they were all very similar in terms of complexity
+and relative error. However, the Lanczos approximation does have a couple of
+properties that make it worthy of further consideration:
+
+* The approximation has an easy to compute truncation error that holds for
+all /z > 0/. In practice that means we can use the same approximation for all
+/z > 0/, and be certain that no matter how large or small /z/ is, the truncation
+error will /at worst/ be bounded by some finite value.
+* The approximation has a form that is particularly amenable to analytic
+manipulation, in particular ratios of gamma or gamma-like functions
+are particularly easy to compute without resorting to logarithms.
+
+It is the combination of these two properties that make the approximation
+attractive: Stirling's approximation is highly accurate for large z, and
+has some of the same analytic properties as the Lanczos approximation, but
+can't easily be used across the whole range of z.
+
+As the simplest example, consider the ratio of two gamma functions: one could
+compute the result via lgamma:
+
+ exp(lgamma(a) - lgamma(b));
+
+However, even if lgamma is uniformly accurate to 0.5ulp, the worst case
+relative error in the above can easily be shown to be:
+
+ Erel > a * log(a)/2 + b * log(b)/2
+
+For small /a/ and /b/ that's not a problem, but to put the relationship another
+way: ['each time a and b increase in magnitude by a factor of 10, at least one
+decimal digit of precision will be lost.]
+
+In contrast, by analytically combining like power
+terms in a ratio of Lanczos approximation's, these errors can be virtually eliminated
+for small /a/ and /b/, and kept under control for very large (or very small
+for that matter) /a/ and /b/. Of course, computing large powers is itself a
+notoriously hard problem, but even so, analytic combinations of Lanczos
+approximations can make the difference between obtaining a valid result, or
+simply garbage. Refer to the implementation notes for the __beta function for
+an example of this method in practice. The incomplete
+[link math_toolkit.sf_gamma.igamma gamma_p gamma] and
+[link math_toolkit.sf_beta.ibeta_function beta] functions
+use similar analytic combinations of power terms, to combine gamma and beta
+functions divided by large powers into single (simpler) expressions.
+
+[h4 The Approximation]
+
+The Lanczos Approximation to the Gamma Function is given by:
+
+[equation lanczos0]
+
+Where S[sub g](z) is an infinite sum, that is convergent for all z > 0,
+and /g/ is an arbitrary parameter that controls the "shape" of the
+terms in the sum which is given by:
+
+[equation lanczos0a]
+
+With individual coefficients defined in closed form by:
+
+[equation lanczos0b]
+
+However, evaluation of the sum in that form can lead to numerical instability
+in the computation of the ratios of rising and falling factorials (effectively
+we're multiplying by a series of numbers very close to 1, so roundoff errors
+can accumulate quite rapidly).
+
+The Lanczos approximation is therefore often written in partial fraction form
+with the leading constants absorbed by the coefficients in the sum:
+
+[equation lanczos1]
+
+where:
+
+[equation lanczos2]
+
+Again parameter /g/ is an arbitrarily chosen constant, and /N/ is an arbitrarily chosen
+number of terms to evaluate in the "Lanczos sum" part.
+
+[note
+Some authors
+choose to define the sum from k=1 to N, and hence end up with N+1 coefficients.
+This happens to confuse both the following discussion and the code (since C++
+deals with half open array ranges, rather than the closed range of the sum).
+This convention is consistent with __godfrey, but not __pugh, so take care
+when referring to the literature in this field.]
+
+[h4 Computing the Coefficients]
+
+The coefficients C0..CN-1 need to be computed from /N/ and /g/
+at high precision, and then stored as part of the program.
+Calculation of the coefficients is performed via the method of __godfrey;
+let the constants be contained in a column vector P, then:
+
+P = D B C F
+
+where B is an NxN matrix:
+
+[equation lanczos4]
+
+D is an NxN matrix:
+
+[equation lanczos3]
+
+C is an NxN matrix:
+
+[equation lanczos5]
+
+and F is an N element column vector:
+
+[equation lanczos6]
+
+Note than the matrices B, D and C contain all integer terms and depend
+only on /N/, this product should be computed first, and then multiplied
+by /F/ as the last step.
+
+[h4 Choosing the Right Parameters]
+
+The trick is to choose
+/N/ and /g/ to give the desired level of accuracy: choosing a small value for
+/g/ leads to a strictly convergent series, but one which converges only slowly.
+Choosing a larger value of /g/ causes the terms in the series to be large
+and\/or divergent for about the first /g-1/ terms, and to then suddenly converge
+with a "crunch".
+
+__pugh has determined the optimal
+value of /g/ for /N/ in the range /1 <= N <= 60/: unfortunately in practice choosing
+these values leads to cancellation errors in the Lanczos sum as the largest
+term in the (alternating) series is approximately 1000 times larger than the result.
+These optimal values appear not to be useful in practice unless the evaluation
+can be done with a number of guard digits /and/ the coefficients are stored
+at higher precision than that desired in the result. These values are best
+reserved for say, computing to float precision with double precision arithmetic.
+
+[table Optimal choices for N and g when computing with guard digits (source: Pugh)
+[[Significand Size] [N] [g][Max Error]]
+[[24] [6] [5.581][9.51e-12]]
+[[53][13][13.144565][9.2213e-23]]
+]
+
+The alternative described by __godfrey is to perform an exhaustive
+search of the /N/ and /g/ parameter space to determine the optimal combination for
+a given /p/ digit floating-point type. Repeating this work found a good
+approximation for double precision arithmetic (close to the one __godfrey found),
+but failed to find really
+good approximations for 80 or 128-bit long doubles. Further it was observed
+that the approximations obtained tended to optimised for the small values
+of z (1 < z < 200) used to test the implementation against the factorials.
+Computing ratios of gamma functions with large arguments were observed to
+suffer from error resulting from the truncation of the Lancozos series.
+
+__pugh identified all the locations where the theoretical error of the
+approximation were at a minimum, but unfortunately has published only the largest
+of these minima. However, he makes the observation that the minima
+coincide closely with the location where the first neglected term (a[sub N]) in the
+Lanczos series S[sub g](z) changes sign. These locations are quite easy to
+locate, albeit with considerable computer time. These "sweet spots" need
+only be computed once, tabulated, and then searched when required for an
+approximation that delivers the required precision for some fixed precision
+type.
+
+Unfortunately, following this path failed to find a really good approximation
+for 128-bit long doubles, and those found for 64 and 80-bit reals required an
+excessive number of terms. There are two competing issues here: high precision
+requires a large value of /g/, but avoiding cancellation errors in the evaluation
+requires a small /g/.
+
+At this point note that the Lanczos sum can be converted into rational form
+(a ratio of two polynomials, obtained from the partial-fraction form using
+polynomial arithmetic),
+and doing so changes the coefficients so that /they are all positive/. That
+means that the sum in rational form can be evaluated without cancellation
+error, albeit with double the number of coefficients for a given N. Repeating
+the search of the "sweet spots", this time evaluating the Lanczos sum in
+rational form, and testing only those "sweet spots" whose theoretical error
+is less than the machine epsilon for the type being tested, yielded good
+approximations for all the types tested. The optimal values found were quite
+close to the best cases reported by __pugh (just slightly larger /N/ and slightly
+smaller /g/ for a given precision than __pugh reports), and even though converting
+to rational form doubles the number of stored coefficients, it should be
+noted that half of them are integers (and therefore require less storage space)
+and the approximations require a smaller /N/ than would otherwise be required,
+so fewer floating point operations may be required overall.
+
+The following table shows the optimal values for /N/ and /g/ when computing
+at fixed precision. These should be taken as work in progress: there are no
+values for 106-bit significand machines (Darwin long doubles & NTL quad_float),
+and further optimisation of the values of /g/ may be possible.
+Errors given in the table
+are estimates of the error due to truncation of the Lanczos infinite series
+to /N/ terms. They are calculated from the sum of the first five neglected
+terms - and are known to be rather pessimistic estimates - although it is noticeable
+that the best combinations of /N/ and /g/ occurred when the estimated truncation error
+almost exactly matches the machine epsilon for the type in question.
+
+[table Optimum value for N and g when computing at fixed precision
+[[Significand Size][Platform/Compiler Used][N][g][Max Truncation Error]]
+[[24][Win32, VC++ 7.1] [6] [1.428456135094165802001953125][9.41e-007]]
+[[53][Win32, VC++ 7.1] [13] [6.024680040776729583740234375][3.23e-016]]
+[[64][Suse Linux 9 IA64, gcc-3.3.3] [17] [12.2252227365970611572265625][2.34e-024]]
+[[116][HP Tru64 Unix 5.1B \/ Alpha, Compaq C++ V7.1-006] [24] [20.3209821879863739013671875][4.75e-035]]
+]
+
+Finally note that the Lanczos approximation can be written as follows
+by removing a factor of exp(g) from the denominator, and then dividing
+all the coefficients by exp(g):
+
+[equation lanczos7]
+
+This form is more convenient for calculating lgamma, but for the gamma
+function the division by /e/ turns a possibly exact quality into an
+inexact value: this reduces accuracy in the common case that
+the input is exact, and so isn't used for the gamma function.
+
+[h4 References]
+
+# [#godfrey]Paul Godfrey, [@http://my.fit.edu/~gabdo/gamma.txt "A note on the computation of the convergent
+Lanczos complex Gamma approximation"].
+# [#pugh]Glendon Ralph Pugh,
+[@http://bh0.physics.ubc.ca/People/matt/Doc/ThesesOthers/Phd/pugh.pdf
+"An Analysis of the Lanczos Gamma Approximation"],
+PhD Thesis November 2004.
+# Viktor T. Toth,
+[@http://www.rskey.org/gamma.htm "Calculators and the Gamma Function"].
+# Mathworld, [@http://mathworld.wolfram.com/LanczosApproximation.html
+The Lanczos Approximation].
+
+[endsect][/section:lanczos The Lanczos Approximation]
+
+[/
+ Copyright 2006 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
diff --git a/doc/background/references.qbk b/doc/background/references.qbk
new file mode 100644
index 0000000..86f1a5e
--- /dev/null
+++ b/doc/background/references.qbk
@@ -0,0 +1,115 @@
+[section:refs References]
+
+[h4 General references]
+
+(Specific detailed sources for individual functions and distributions
+are given at the end of each individual section).
+
+[@http://dlmf.nist.gov/ DLMF (NIST Digital Library of Mathematical Functions)]
+is a replacement for the legendary
+Abramowitz and Stegun's Handbook of Mathematical Functions (often called simply A&S),
+
+M. Abramowitz and I. A. Stegun (Eds.) (1964)
+Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables,
+National Bureau of Standards Applied Mathematics Series,
+U.S. Government Printing Office, Washington, D.C.
+[/ __Abramowitz_Stegun]
+
+NIST Handbook of Mathematical Functions
+Edited by: Frank W. J. Olver, University of Maryland and National Institute of Standards and Technology, Maryland,
+Daniel W. Lozier, National Institute of Standards and Technology, Maryland,
+Ronald F. Boisvert, National Institute of Standards and Technology, Maryland,
+Charles W. Clark, National Institute of Standards and Technology, Maryland and University of Maryland.
+
+ISBN: 978-0521140638 (paperback), 9780521192255 (hardback), July 2010, Cambridge University Press.
+
+[@http://www.itl.nist.gov/div898/handbook/index.htm NIST/SEMATECH e-Handbook of Statistical Methods]
+
+[@http://documents.wolfram.com/mathematica/Add-onsLinks/StandardPackages/Statistics/DiscreteDistributions.html Mathematica Documentation: DiscreteDistributions]
+The Wolfram Research Documentation Center is a collection of online reference materials about Mathematica, CalculationCenter, and other Wolfram Research products.
+
+[@http://documents.wolfram.com/mathematica/Add-onsLinks/StandardPackages/Statistics/ContinuousDistributions.html Mathematica Documentation: ContinuousDistributions]
+The Wolfram Research Documentation Center is a collection of online reference materials about Mathematica, CalculationCenter, and other Wolfram Research products.
+
+Statistical Distributions (Wiley Series in Probability & Statistics) (Paperback)
+by N.A.J. Hastings, Brian Peacock, Merran Evans, ISBN: 0471371246, Wiley 2000.
+
+[@http://www.worldscibooks.com/mathematics/p191.html Extreme Value Distributions, Theory and Applications]
+Samuel Kotz & Saralees Nadarajah, ISBN 978-1-86094-224-2 & 1-86094-224-5 Oct 2000,
+Chapter 1.2 discusses the various extreme value distributions.
+
+[@http://bh0.physics.ubc.ca/People/matt/Doc/ThesesOthers/Phd/pugh.pdf pugh.pdf (application/pdf Object)]
+Pugh Msc Thesis on the Lanczos approximation to the gamma function.
+
+[@http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2003 N1514, 03-0097, A Proposal to Add Mathematical Special Functions to the C++ Standard Library (version 2), Walter E. Brown]
+
+[h4 Calculators]
+
+We found (and used to create cross-check spot values - as far as their accuracy allowed).
+
+[@http://functions.wolfram.com/ The Wolfram Functions Site]
+The Wolfram Functions Site - Providing
+the mathematical and scientific community with the world's largest
+(and most authorititive) collection of formulas and graphics about mathematical functions.
+
+[@http://www.moshier.net/cephes28.zip 100-decimal digit calculator] provided some spot values.
+
+[@http://www.adsciengineering.com/bpdcalc/ http://www.adsciengineering.com/bpdcalc/] Binomial Probability Distribution Calculator.
+
+
+[h4 Other Libraries]
+
+[@http://www.moshier.net/#Cephes Cephes library] by Shephen Moshier and his book:
+
+Methods and programs for mathematical functions, Stephen L B Moshier, Ellis Horwood (1989) ISBN 0745802893 0470216093 provided inspiration.
+
+[@http://lib.stat.cmu.edu/general/cdflib CDFLIB Library of Fortran Routines for Cumulative Distribution functions.]
+
+[@http://www.csit.fsu.edu/~burkardt/cpp_src/dcdflib/dcdflib.html DCFLIB C++ version].
+
+[@http://www.csit.fsu.edu/~burkardt/f_src/dcdflib/dcdflib.html DCDFLIB C++ version]
+DCDFLIB is a library of C++ routines, using double precision arithmetic, for evaluating cumulative probability density functions.
+
+[@http://www.softintegration.com/docs/package/chnagstat/ http://www.softintegration.com/docs/package/chnagstat/]
+
+[@http://www.nag.com/numeric/numerical_libraries.asp NAG] libraries.
+
+[@http://www.mathcad.com MathCAD]
+
+[@http://www.vni.com/products/imsl/jmsl/v30/api/com/imsl/stat/Cdf.html JMSL Numerical Library] (Java).
+
+John F Hart, Computer Approximations, (1978) ISBN 0 088275 642-7.
+
+William J Cody, Software Manual for the Elementary Functions, Prentice-Hall (1980) ISBN 0138220646.
+
+Nico Temme, Special Functions, An Introduction to the Classical Functions of Mathematical Physics, Wiley, ISBN: 0471-11313-1 (1996) who also gave valueable advice.
+
+[@http://www.cas.lancs.ac.uk/glossary_v1.1/prob.html#probdistn Statistics Glossary], Valerie Easton and John H. McColl.
+
+[__R]
+R Development Core Team (2010). R: A language and environment for
+statistical computing. R Foundation for Statistical Computing,
+Vienna, Austria. ISBN 3-900051-07-0, URL http://www.R-project.org.
+
+For use of R, see:
+
+Jim Albert, Bayesian Computation with R, ISBN 978-0-387-71384-7.
+
+[@http://www.quantnet.com/cplusplus-statistical-distributions-boost
+C++ Statistical Distributions in Boost - QuantNetwork forum]
+discusses using Boost.Math in finance.
+
+[@http://www.quantnet.com/boost-and-computational-finance/ Quantnet Boost and computational finance].
+Robert Demming & Daniel J. Duffy, Introduction to the C++ Boost Libraries - Volume I - Foundations
+and Volume II ISBN 978-94-91028-01-4, Advanced Libraries and Applications, ISBN 978-94-91028-02-1
+(to be published in 2011).
+discusses application of Boost.Math, especially in finance.
+
+[endsect] [/section:references References]
+[/
+ Copyright 2006 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/background/remez.qbk b/doc/background/remez.qbk
new file mode 100644
index 0000000..6dd2718
--- /dev/null
+++ b/doc/background/remez.qbk
@@ -0,0 +1,377 @@
+[section:remez The Remez Method]
+
+The [@http://en.wikipedia.org/wiki/Remez_algorithm Remez algorithm]
+is a methodology for locating the minimax rational approximation
+to a function. This short article gives a brief overview of the method, but
+it should not be regarded as a thorough theoretical treatment, for that you
+should consult your favorite textbook.
+
+Imagine that you want to approximate some function f(x) by way of a rational
+function R(x), where R(x) may be either a polynomial P(x) or a ratio of two
+polynomials P(x)/Q(x) (a rational function). Initially we'll concentrate on the
+polynomial case, as it's by far the easier to deal with, later we'll extend
+to the full rational function case.
+
+We want to find the "best" rational approximation, where
+"best" is defined to be the approximation that has the least deviation
+from f(x). We can measure the deviation by way of an error function:
+
+E[sub abs](x) = f(x) - R(x)
+
+which is expressed in terms of absolute error, but we can equally use
+relative error:
+
+E[sub rel](x) = (f(x) - R(x)) / |f(x)|
+
+And indeed in general we can scale the error function in any way we want, it
+makes no difference to the maths, although the two forms above cover almost
+every practical case that you're likely to encounter.
+
+The minimax rational function R(x) is then defined to be the function that
+yields the smallest maximal value of the error function. Chebyshev showed
+that there is a unique minimax solution for R(x) that has the following
+properties:
+
+* If R(x) is a polynomial of degree N, then there are N+2 unknowns:
+the N+1 coefficients of the polynomial, and maximal value of the error
+function.
+* The error function has N+1 roots, and N+2 extrema (minima and maxima).
+* The extrema alternate in sign, and all have the same magnitude.
+
+That means that if we know the location of the extrema of the error function
+then we can write N+2 simultaneous equations:
+
+R(x[sub i]) + (-1)[super i]E = f(x[sub i])
+
+where E is the maximal error term, and x[sub i] are the abscissa values of the
+N+2 extrema of the error function. It is then trivial to solve the simultaneous
+equations to obtain the polynomial coefficients and the error term.
+
+['Unfortunately we don't know where the extrema of the error function are located!]
+
+[h4 The Remez Method]
+
+The Remez method is an iterative technique which, given a broad range of
+assumptions, will converge on the extrema of the error function, and therefore
+the minimax solution.
+
+In the following discussion we'll use a concrete example to illustrate
+the Remez method: an approximation to the function e[super x][space] over
+the range \[-1, 1\].
+
+Before we can begin the Remez method, we must obtain an initial value
+for the location of the extrema of the error function. We could "guess"
+these, but a much closer first approximation can be obtained by first
+constructing an interpolated polynomial approximation to f(x).
+
+In order to obtain the N+1 coefficients of the interpolated polynomial
+we need N+1 points (x[sub 0]...x[sub N]): with our interpolated form
+passing through each of those points
+that yields N+1 simultaneous equations:
+
+f(x[sub i]) = P(x[sub i]) = c[sub 0] + c[sub 1]x[sub i] ... + c[sub N]x[sub i][super N]
+
+Which can be solved for the coefficients c[sub 0]...c[sub N] in P(x).
+
+Obviously this is not a minimax solution, indeed our only guarantee is that f(x) and
+P(x) touch at N+1 locations, away from those points the error may be arbitrarily
+large. However, we would clearly like this initial approximation to be as close to
+f(x) as possible, and it turns out that using the zeros of an orthogonal polynomial
+as the initial interpolation points is a good choice. In our example we'll use the
+zeros of a Chebyshev polynomial as these are particularly easy to calculate,
+interpolating for a polynomial of degree 4, and measuring /relative error/
+we get the following error function:
+
+[$../graphs/remez-2.png]
+
+Which has a peak relative error of 1.2x10[super -3].
+
+While this is a pretty good approximation already, judging by the
+shape of the error function we can clearly do better. Before starting
+on the Remez method propper, we have one more step to perform: locate
+all the extrema of the error function, and store
+these locations as our initial ['Chebyshev control points].
+
+[note
+In the simple case of a polynomial approximation, by interpolating through
+the roots of a Chebyshev polynomial we have in fact created a ['Chebyshev
+approximation] to the function: in terms of /absolute error/
+this is the best a priori choice for the interpolated form we can
+achieve, and typically is very close to the minimax solution.
+
+However, if we want to optimise for /relative error/, or if the approximation
+is a rational function, then the initial Chebyshev solution can be quite far
+from the ideal minimax solution.
+
+A more technical discussion of the theory involved can be found in this
+[@http://math.fullerton.edu/mathews/n2003/ChebyshevPolyMod.html online course].]
+
+[h4 Remez Step 1]
+
+The first step in the Remez method, given our current set of
+N+2 Chebyshev control points x[sub i], is to solve the N+2 simultaneous
+equations:
+
+P(x[sub i]) + (-1)[super i]E = f(x[sub i])
+
+To obtain the error term E, and the coefficients of the polynomial P(x).
+
+This gives us a new approximation to f(x) that has the same error /E/ at
+each of the control points, and whose error function ['alternates in sign]
+at the control points. This is still not necessarily the minimax
+solution though: since the control points may not be at the extrema of the error
+function. After this first step here's what our approximation's error
+function looks like:
+
+[$../graphs/remez-3.png]
+
+Clearly this is still not the minimax solution since the control points
+are not located at the extrema, but the maximum relative error has now
+dropped to 5.6x10[super -4].
+
+[h4 Remez Step 2]
+
+The second step is to locate the extrema of the new approximation, which we do
+in two stages: first, since the error function changes sign at each
+control point, we must have N+1 roots of the error function located between
+each pair of N+2 control points. Once these roots are found by standard root finding
+techniques, we know that N extrema are bracketed between each pair of
+roots, plus two more between the endpoints of the range and the first and last roots.
+The N+2 extrema can then be found using standard function minimisation techniques.
+
+We now have a choice: multi-point exchange, or single point exchange.
+
+In single point exchange, we move the control point nearest to the largest extrema to
+the absissa value of the extrema.
+
+In multi-point exchange we swap all the current control points, for the locations
+of the extrema.
+
+In our example we perform multi-point exchange.
+
+[h4 Iteration]
+
+The Remez method then performs steps 1 and 2 above iteratively until the control
+points are located at the extrema of the error function: this is then
+the minimax solution.
+
+For our current example, two more iterations converges on a minimax
+solution with a peak relative error of
+5x10[super -4] and an error function that looks like:
+
+[$../graphs/remez-4.png]
+
+[h4 Rational Approximations]
+
+If we wish to extend the Remez method to a rational approximation of the form
+
+f(x) = R(x) = P(x) / Q(x)
+
+where P(x) and Q(x) are polynomials, then we proceed as before, except that now
+we have N+M+2 unknowns if P(x) is of order N and Q(x) is of order M. This assumes
+that Q(x) is normalised so that its leading coefficient is 1, giving
+N+M+1 polynomial coefficients in total, plus the error term E.
+
+The simultaneous equations to be solved are now:
+
+P(x[sub i]) / Q(x[sub i]) + (-1)[super i]E = f(x[sub i])
+
+Evaluated at the N+M+2 control points x[sub i].
+
+Unfortunately these equations are non-linear in the error term E: we can only
+solve them if we know E, and yet E is one of the unknowns!
+
+The method usually adopted to solve these equations is an iterative one: we guess the
+value of E, solve the equations to obtain a new value for E (as well as the polynomial
+coefficients), then use the new value of E as the next guess. The method is
+repeated until E converges on a stable value.
+
+These complications extend the running time required for the development
+of rational approximations quite considerably. It is often desirable
+to obtain a rational rather than polynomial approximation none the less:
+rational approximations will often match more difficult to approximate
+functions, to greater accuracy, and with greater efficiency, than their
+polynomial alternatives. For example, if we takes our previous example
+of an approximation to e[super x], we obtained 5x10[super -4] accuracy
+with an order 4 polynomial. If we move two of the unknowns into the denominator
+to give a pair of order 2 polynomials, and re-minimise, then the peak relative error drops
+to 8.7x10[super -5]. That's a 5 fold increase in accuracy, for the same number
+of terms overall.
+
+[h4 Practical Considerations]
+
+Most treatises on approximation theory stop at this point. However, from
+a practical point of view, most of the work involves finding the right
+approximating form, and then persuading the Remez method to converge
+on a solution.
+
+So far we have used a direct approximation:
+
+f(x) = R(x)
+
+But this will converge to a useful approximation only if f(x) is smooth. In
+addition round-off errors when evaluating the rational form mean that this
+will never get closer than within a few epsilon of machine precision.
+Therefore this form of direct approximation is often reserved for situations
+where we want efficiency, rather than accuracy.
+
+The first step in improving the situation is generally to split f(x) into
+a dominant part that we can compute accurately by another method, and a
+slowly changing remainder which can be approximated by a rational approximation.
+We might be tempted to write:
+
+f(x) = g(x) + R(x)
+
+where g(x) is the dominant part of f(x), but if f(x)\/g(x) is approximately
+constant over the interval of interest then:
+
+f(x) = g(x)(c + R(x))
+
+Will yield a much better solution: here /c/ is a constant that is the approximate
+value of f(x)\/g(x) and R(x) is typically tiny compared to /c/. In this situation
+if R(x) is optimised for absolute error, then as long as its error is small compared
+to the constant /c/, that error will effectively get wiped out when R(x) is added to
+/c/.
+
+The difficult part is obviously finding the right g(x) to extract from your
+function: often the asymptotic behaviour of the function will give a clue, so
+for example the function __erfc becomes proportional to
+e[super -x[super 2]]\/x as x becomes large. Therefore using:
+
+erfc(z) = (C + R(x)) e[super -x[super 2]]/x
+
+as the approximating form seems like an obvious thing to try, and does indeed
+yield a useful approximation.
+
+However, the difficulty then becomes one of converging the minimax solution.
+Unfortunately, it is known that for some functions the Remez method can lead
+to divergent behaviour, even when the initial starting approximation is quite good.
+Furthermore, it is not uncommon for the solution obtained in the first Remez step
+above to be a bad one: the equations to be solved are generally "stiff", often
+very close to being singular, and assuming a solution is found at all, round-off
+errors and a rapidly changing error function, can lead to a situation where the
+error function does not in fact change sign at each control point as required.
+If this occurs, it is fatal to the Remez method. It is also possible to
+obtain solutions that are perfectly valid mathematically, but which are
+quite useless computationally: either because there is an unavoidable amount
+of roundoff error in the computation of the rational function, or because
+the denominator has one or more roots over the interval of the approximation.
+In the latter case while the approximation may have the correct limiting value at
+the roots, the approximation is nonetheless useless.
+
+Assuming that the approximation does not have any fatal errors, and that the only
+issue is converging adequately on the minimax solution, the aim is to
+get as close as possible to the minimax solution before beginning the Remez method.
+Using the zeros of a Chebyshev polynomial for the initial interpolation is a
+good start, but may not be ideal when dealing with relative errors and\/or
+rational (rather than polynomial) approximations. One approach is to skew
+the initial interpolation points to one end: for example if we raise the
+roots of the Chebyshev polynomial to a positive power greater than 1
+then the roots will be skewed towards the middle of the \[-1,1\] interval,
+while a positive power less than one
+will skew them towards either end. More usefully, if we initially rescale the
+points over \[0,1\] and then raise to a positive power, we can skew them to the left
+or right. Returning to our example of e[super x][space] over \[-1,1\], the initial
+interpolated form was some way from the minimax solution:
+
+[$../graphs/remez-2.png]
+
+However, if we first skew the interpolation points to the left (rescale them
+to \[0, 1\], raise to the power 1.3, and then rescale back to \[-1,1\]) we
+reduce the error from 1.3x10[super -3][space]to 6x10[super -4]:
+
+[$../graphs/remez-5.png]
+
+It's clearly still not ideal, but it is only a few percent away from
+our desired minimax solution (5x10[super -4]).
+
+[h4 Remez Method Checklist]
+
+The following lists some of the things to check if the Remez method goes wrong,
+it is by no means an exhaustive list, but is provided in the hopes that it will
+prove useful.
+
+* Is the function smooth enough? Can it be better separated into
+a rapidly changing part, and an asymptotic part?
+* Does the function being approximated have any "blips" in it? Check
+for problems as the function changes computation method, or
+if a root, or an infinity has been divided out. The telltale
+sign is if there is a narrow region where the Remez method will
+not converge.
+* Check you have enough accuracy in your calculations: remember that
+the Remez method works on the difference between the approximation
+and the function being approximated: so you must have more digits of
+precision available than the precision of the approximation
+being constructed. So for example at double precision, you
+shouldn't expect to be able to get better than a float precision
+approximation.
+* Try skewing the initial interpolated approximation to minimise the
+error before you begin the Remez steps.
+* If the approximation won't converge or is ill-conditioned from one starting
+location, try starting from a different location.
+* If a rational function won't converge, one can minimise a polynomial
+(which presents no problems), then rotate one term from the numerator to
+the denominator and minimise again. In theory one can continue moving
+terms one at a time from numerator to denominator, and then re-minimising,
+retaining the last set of control points at each stage.
+* Try using a smaller interval. It may also be possible to optimise over
+one (small) interval, rescale the control points over a larger interval,
+and then re-minimise.
+* Keep absissa values small: use a change of variable to keep the abscissa
+over, say \[0, b\], for some smallish value /b/.
+
+[h4 References]
+
+The original references for the Remez Method and it's extension
+to rational functions are unfortunately in Russian:
+
+Remez, E.Ya., ['Fundamentals of numerical methods for Chebyshev approximations],
+"Naukova Dumka", Kiev, 1969.
+
+Remez, E.Ya., Gavrilyuk, V.T., ['Computer development of certain approaches
+to the approximate construction of solutions of Chebyshev problems
+nonlinearly depending on parameters], Ukr. Mat. Zh. 12 (1960), 324-338.
+
+Gavrilyuk, V.T., ['Generalization of the first polynomial algorithm of
+E.Ya.Remez for the problem of constructing rational-fractional
+Chebyshev approximations], Ukr. Mat. Zh. 16 (1961), 575-585.
+
+Some English language sources include:
+
+Fraser, W., Hart, J.F., ['On the computation of rational approximations
+to continuous functions], Comm. of the ACM 5 (1962), 401-403, 414.
+
+Ralston, A., ['Rational Chebyshev approximation by Remes' algorithms],
+Numer.Math. 7 (1965), no. 4, 322-330.
+
+A. Ralston, ['Rational Chebyshev approximation, Mathematical
+Methods for Digital Computers v. 2] (Ralston A., Wilf H., eds.),
+Wiley, New York, 1967, pp. 264-284.
+
+Hart, J.F. e.a., ['Computer approximations], Wiley, New York a.o., 1968.
+
+Cody, W.J., Fraser, W., Hart, J.F., ['Rational Chebyshev approximation
+using linear equations], Numer.Math. 12 (1968), 242-251.
+
+Cody, W.J., ['A survey of practical rational and polynomial
+approximation of functions], SIAM Review 12 (1970), no. 3, 400-423.
+
+Barrar, R.B., Loeb, H.J., ['On the Remez algorithm for non-linear
+families], Numer.Math. 15 (1970), 382-391.
+
+Dunham, Ch.B., ['Convergence of the Fraser-Hart algorithm for rational
+Chebyshev approximation], Math. Comp. 29 (1975), no. 132, 1078-1082.
+
+G. L. Litvinov, ['Approximate construction of rational
+approximations and the effect of error autocorrection],
+Russian Journal of Mathematical Physics, vol.1, No. 3, 1994.
+
+[endsect][/section:remez The Remez Method]
+
+[/
+ Copyright 2006 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/background/special_tut.qbk b/doc/background/special_tut.qbk
new file mode 100644
index 0000000..a57fac0
--- /dev/null
+++ b/doc/background/special_tut.qbk
@@ -0,0 +1,508 @@
+[section:special_tut Tutorial: How to Write a New Special Function]
+
+[section:special_tut_impl Implementation]
+
+In this section, we'll provide a "recipe" for adding a new special function to this library to make life easier for
+future authors wishing to contribute. We'll assume the function returns a single floating-point result, and takes
+two floating-point arguments. For the sake of exposition we'll give the function the name [~my_special].
+
+Normally, the implementation of such a function is split into two layers - a public user layer, and an internal
+implementation layer that does the actual work.
+The implementation layer is declared inside a `detail` namespace and has a simple signature:
+
+ namespace boost { namespace math { namespace detail {
+
+ template <class T, class Policy>
+ T my_special_imp(const T& a, const T&b, const Policy& pol)
+ {
+ /* Implementation goes here */
+ }
+
+ }}} // namespaces
+
+We'll come back to what can go inside the implementation later, but first lets look at the user layer.
+This consists of two overloads of the function, with and without a __Policy argument:
+
+ namespace boost{ namespace math{
+
+ template <class T, class U>
+ typename tools::promote_args<T, U>::type my_special(const T& a, const U& b);
+
+ template <class T, class U, class Policy>
+ typename tools::promote_args<T, U>::type my_special(const T& a, const U& b, const Policy& pol);
+
+ }} // namespaces
+
+Note how each argument has a different template type - this allows for mixed type arguments - the return
+type is computed from a traits class and is the "common type" of all the arguments after any integer
+arguments have been promoted to type `double`.
+
+The implementation of the non-policy overload is trivial:
+
+ namespace boost{ namespace math{
+
+ template <class T, class U>
+ inline typename tools::promote_args<T, U>::type my_special(const T& a, const U& b)
+ {
+ // Simply forward with a default policy:
+ return my_special(a, b, policies::policy<>();
+ }
+
+ }} // namespaces
+
+The implementation of the other overload is somewhat more complex, as there's some meta-programming to do,
+but from a runtime perspective is still a one-line forwarding function. Here it is with comments explaining
+what each line does:
+
+ namespace boost{ namespace math{
+
+ template <class T, class U, class Policy>
+ inline typename tools::promote_args<T, U>::type my_special(const T& a, const U& b, const Policy& pol)
+ {
+ //
+ // We've found some standard library functions to misbehave if any FPU exception flags
+ // are set prior to their call, this code will clear those flags, then reset them
+ // on exit:
+ //
+ BOOST_FPU_EXCEPTION_GUARD
+ //
+ // The type of the result - the common type of T and U after
+ // any integer types have been promoted to double:
+ //
+ typedef typename tools::promote_args<T, U>::type result_type;
+ //
+ // The type used for the calculation. This may be a wider type than
+ // the result in order to ensure full precision:
+ //
+ typedef typename policies::evaluation<result_type, Policy>::type value_type;
+ //
+ // The type of the policy to forward to the actual implementation.
+ // We disable promotion of float and double as that's [possibly]
+ // happened already in the line above. Also reset to the default
+ // any policies we don't use (reduces code bloat if we're called
+ // multiple times with differing policies we don't actually use).
+ // Also normalise the type, again to reduce code bloat in case we're
+ // called multiple times with functionally identical policies that happen
+ // to be different types.
+ //
+ typedef typename policies::normalise<
+ Policy,
+ policies::promote_float<false>,
+ policies::promote_double<false>,
+ policies::discrete_quantile<>,
+ policies::assert_undefined<> >::type forwarding_policy;
+ //
+ // Whew. Now we can make the actual call to the implementation.
+ // Arguments are explicitly cast to the evaluation type, and the result
+ // passed through checked_narrowing_cast which handles things like overflow
+ // according to the policy passed:
+ //
+ return policies::checked_narrowing_cast<result_type, forwarding_policy>(
+ detail::my_special_imp(
+ static_cast<value_type>(a),
+ static_cast<value_type>(x),
+ forwarding_policy()),
+ "boost::math::my_special<%1%>(%1%, %1%)");
+ }
+
+ }} // namespaces
+
+We're now almost there, we just need to flesh out the details of the implementation layer:
+
+ namespace boost { namespace math { namespace detail {
+
+ template <class T, class Policy>
+ T my_special_imp(const T& a, const T&b, const Policy& pol)
+ {
+ /* Implementation goes here */
+ }
+
+ }}} // namespaces
+
+The following guidelines indicate what (other than basic arithmetic) can go in the implementation:
+
+* Error conditions (for example bad arguments) should be handled by calling one of the
+[link math_toolkit.error_handling.finding_more_information policy based error handlers].
+* Calls to standard library functions should be made unqualified (this allows argument
+dependent lookup to find standard library functions for user-defined floating point
+types such as those from __multiprecision). In addition, the macro `BOOST_MATH_STD_USING`
+should appear at the start of the function (note no semi-colon afterwards!) so that
+all the math functions in `namespace std` are visible in the current scope.
+* Calls to other special functions should be made as fully qualified calls, and include the
+policy parameter as the last argument, for example `boost::math::tgamma(a, pol)`.
+* Where possible, evaluation of series, continued fractions, polynomials, or root
+finding should use one of the [link math_toolkit.internals_overview boiler-plate functions]. In any case, after
+any iterative method, you should verify that the number of iterations did not exceed the
+maximum specified in the __Policy type, and if it did terminate as a result of exceeding the
+maximum, then the appropriate error handler should be called (see existing code for examples).
+* Numeric constants such as [pi] etc should be obtained via a call to the [link math_toolkit.constants appropriate function],
+for example: `constants::pi<T>()`.
+* Where tables of coefficients are used (for example for rational approximations), care should be taken
+to ensure these are initialized at program startup to ensure thread safety when using user-defined number types.
+See for example the use of `erf_initializer` in [@../../include/boost/math/special_functions/erf.hpp erf.hpp].
+
+Here are some other useful internal functions:
+
+[table
+[[function][Meaning]]
+[[`policies::digits<T, Policy>()`][Returns number of binary digits in T (possible overridden by the policy).]]
+[[`policies::get_max_series_iterations<Policy>()`][Maximum number of iterations for series evaluation.]]
+[[`policies::get_max_root_iterations<Policy>()`][Maximum number of iterations for root finding.]]
+[[`polices::get_epsilon<T, Policy>()`][Epsilon for type T, possibly overridden by the Policy.]]
+[[`tools::digits<T>()`][Returns the number of binary digits in T.]]
+[[`tools::max_value<T>()`][Equivalent to `std::numeric_limits<T>::max()`]]
+[[`tools::min_value<T>()`][Equivalent to `std::numeric_limits<T>::min()`]]
+[[`tools::log_max_value<T>()`][Equivalent to the natural logarithm of `std::numeric_limits<T>::max()`]]
+[[`tools::log_min_value<T>()`][Equivalent to the natural logarithm of `std::numeric_limits<T>::min()`]]
+[[`tools::epsilon<T>()`][Equivalent to `std::numeric_limits<T>::epsilon()`.]]
+[[`tools::root_epsilon<T>()`][Equivalent to the square root of `std::numeric_limits<T>::epsilon()`.]]
+[[`tools::forth_root_epsilon<T>()`][Equivalent to the forth root of `std::numeric_limits<T>::epsilon()`.]]
+]
+
+[endsect]
+
+[section:special_tut_test Testing]
+
+We work under the assumption that untested code doesn't work, so some tests for your new special function are in order,
+we'll divide these up in to 3 main categories:
+
+[h4 Spot Tests]
+
+Spot tests consist of checking that the expected exception is generated when the inputs are in error (or
+otherwise generate undefined values), and checking any special values. We can check for expected exceptions
+with `BOOST_CHECK_THROW`, so for example if it's a domain error for the last parameter to be outside the range
+`[0,1]` then we might have:
+
+ BOOST_CHECK_THROW(my_special(0, -0.1), std::domain_error);
+ BOOST_CHECK_THROW(my_special(0, 1.1), std::domain_error);
+
+When the function has known exact values (typically integer values) we can use `BOOST_CHECK_EQUAL`:
+
+ BOOST_CHECK_EQUAL(my_special(1.0, 0.0), 0);
+ BOOST_CHECK_EQUAL(my_special(1.0, 1.0), 1);
+
+When the function has known values which are not exact (from a floating point perspective) then we can use
+`BOOST_CHECK_CLOSE_FRACTION`:
+
+ // Assumes 4 epsilon is as close as we can get to a true value of 2Pi:
+ BOOST_CHECK_CLOSE_FRACTION(my_special(0.5, 0.5), 2 * constants::pi<double>(), std::numeric_limits<double>::epsilon() * 4);
+
+[h4 Independent Test Values]
+
+If the function is implemented by some other known good source (for example Mathematica or it's online versions
+[@http://functions.wolfram.com functions.wolfram.com] or [@http://www.wolframalpha.com www.wolframalpha.com]
+then it's a good idea to sanity check our implementation by having at least one independendly generated value
+for each code branch our implementation may take. To slot these in nicely with our testing framework it's best to
+tabulate these like this:
+
+ // function values calculated on http://functions.wolfram.com/
+ static const boost::array<boost::array<T, 3>, 10> my_special_data = {{
+ {{ SC_(0), SC_(0), SC_(1) }},
+ {{ SC_(0), SC_(1), SC_(1.26606587775200833559824462521471753760767031135496220680814) }},
+ /* More values here... */
+ }};
+
+We'll see how to use this table and the meaning of the `SC_` macro later. One important point
+is to make sure that the input values have exact binary representations: so choose values such as
+1.5, 1.25, 1.125 etc. This ensures that if `my_special` is unusually sensitive in one area, that
+we don't get apparently large errors just because the inputs are 0.5 ulp in error.
+
+[h4 Random Test Values]
+
+We can generate a large number of test values to check both for future regressions, and for
+accumulated rounding or cancellation error in our implementation. Ideally we would use an
+independent implementation for this (for example my_special may be defined in directly terms
+of other special functions but not implemented that way for performance or accuracy reasons).
+Alternatively we may use our own implementation directly, but with any special cases (asymptotic
+expansions etc) disabled. We have a set of [link math_toolkit.internals.test_data tools]
+to generate test data directly, here's a typical example:
+
+[import ../../example/special_data.cpp]
+[special_data_example]
+
+Typically several sets of data will be generated this way, including random values in some "normal"
+range, extreme values (very large or very small), and values close to any "interesting" behaviour
+of the function (singularities etc).
+
+[h4 The Test File Header]
+
+We split the actual test file into 2 distinct parts: a header that contains the testing code
+as a series of function templates, and the actual .cpp test driver that decides which types
+are tested, and sets the "expected" error rates for those types. It's done this way because:
+
+* We want to test with both built in floating point types, and with multiprecision types.
+However, both compile and runtimes with the latter can be too long for the folks who run
+the tests to realistically cope with, so it makes sense to split the test into (at least)
+2 parts.
+* The definition of the SC_ macro used in our tables of data may differ depending on what type
+we're testing (see below). Again this is largely a matter of managing compile times as large tables
+of user-defined-types can take a crazy amount of time to compile with some compilers.
+
+The test header contains 2 functions:
+
+ template <class Real, class T>
+ void do_test(const T& data, const char* type_name, const char* test_name);
+
+ template <class T>
+ void test(T, const char* type_name);
+
+Before implementing those, we'll include the headers we'll need, and provide a default
+definition for the SC_ macro:
+
+ // A couple of Boost.Test headers in case we need any BOOST_CHECK_* macros:
+ #include <boost/test/unit_test.hpp>
+ #include <boost/test/floating_point_comparison.hpp>
+ // Our function to test:
+ #include <boost/math/special_functions/my_special.hpp>
+ // We need boost::array for our test data, plus a few headers from
+ // libs/math/test that contain our testing machinary:
+ #include <boost/array.hpp>
+ #include "functor.hpp"
+ #include "handle_test_result.hpp"
+ #include "table_type.hpp"
+
+ #ifndef SC_
+ #define SC_(x) static_cast<typename table_type<T>::type>(BOOST_JOIN(x, L))
+ #endif
+
+The easiest function to implement is the "test" function which is what we'll be calling
+from the test-driver program. It simply includes the files containing the tabular
+test data and calls `do_test` function for each table, along with a description of what's
+being tested:
+
+ template <class T>
+ void test(T, const char* type_name)
+ {
+ //
+ // The actual test data is rather verbose, so it's in a separate file
+ //
+ // The contents are as follows, each row of data contains
+ // three items, input value a, input value b and my_special(a, b):
+ //
+ # include "my_special_1.ipp"
+
+ do_test<T>(my_special_1, name, "MySpecial Function: Mathematica Values");
+
+ # include "my_special_2.ipp"
+
+ do_test<T>(my_special_2, name, "MySpecial Function: Random Values");
+
+ # include "my_special_3.ipp"
+
+ do_test<T>(my_special_3, name, "MySpecial Function: Very Small Values");
+ }
+
+The function `do_test` takes each table of data and calculates values for each row
+of data, along with statistics for max and mean error etc, most of this is handled
+by some boilerplate code:
+
+ template <class Real, class T>
+ void do_test(const T& data, const char* type_name, const char* test_name)
+ {
+ // Get the type of each row and each element in the rows:
+ typedef typename T::value_type row_type;
+ typedef Real value_type;
+
+ // Get a pointer to our function, we have to use a workaround here
+ // as some compilers require the template types to be explicitly
+ // specified, while others don't much like it if it is!
+ typedef value_type (*pg)(value_type, value_type);
+ #if defined(BOOST_MATH_NO_DEDUCED_FUNCTION_POINTERS)
+ pg funcp = boost::math::my_special<value_type, value_type>;
+ #else
+ pg funcp = boost::math::my_special;
+ #endif
+
+ // Somewhere to hold our results:
+ boost::math::tools::test_result<value_type> result;
+ // And some pretty printing:
+ std::cout << "Testing " << test_name << " with type " << type_name
+ << "\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n";
+
+ //
+ // Test my_special against data:
+ //
+ result = boost::math::tools::test_hetero<Real>(
+ /* First argument is the table */
+ data,
+ /* Next comes our function pointer, plus the indexes of it's arguments in the table */
+ bind_func<Real>(funcp, 0, 1),
+ /* Then the index of the result in the table - potentially we can test several
+ related functions this way, each having the same input arguments, and different
+ output values in different indexes in the table */
+ extract_result<Real>(2));
+ //
+ // Finish off with some boilerplate to check the results were within the expected errors,
+ // and pretty print the results:
+ //
+ handle_test_result(result, data[result.worst()], result.worst(), type_name, "boost::math::my_special", test_name);
+ }
+
+Now we just need to write the test driver program, at it's most basic it looks something like this:
+
+ #include <boost/math/special_functions/math_fwd.hpp>
+ #include <boost/math/tools/test.hpp>
+ #include <boost/math/tools/stats.hpp>
+ #include <boost/type_traits.hpp>
+ #include <boost/array.hpp>
+ #include "functor.hpp"
+
+ #include "handle_test_result.hpp"
+ #include "test_my_special.hpp"
+
+ BOOST_AUTO_TEST_CASE( test_main )
+ {
+ //
+ // Test each floating point type, plus real_concept.
+ // We specify the name of each type by hand as typeid(T).name()
+ // often gives an unreadable mangled name.
+ //
+ test(0.1F, "float");
+ test(0.1, "double");
+ //
+ // Testing of long double and real_concept is protected
+ // by some logic to disable these for unsupported
+ // or problem compilers.
+ //
+ #ifndef BOOST_MATH_NO_LONG_DOUBLE_MATH_FUNCTIONS
+ test(0.1L, "long double");
+ #ifndef BOOST_MATH_NO_REAL_CONCEPT_TESTS
+ #if !BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x582))
+ test(boost::math::concepts::real_concept(0.1), "real_concept");
+ #endif
+ #endif
+ #else
+ std::cout << "<note>The long double tests have been disabled on this platform "
+ "either because the long double overloads of the usual math functions are "
+ "not available at all, or because they are too inaccurate for these tests "
+ "to pass.</note>" << std::cout;
+ #endif
+ }
+
+That's almost all there is too it - except that if the above program is run it's very likely that
+all the tests will fail as the default maximum allowable error is 1 epsilon. So we'll
+define a function (don't forget to call it from the start of the `test_main` above) to
+up the limits to something sensible, based both on the function we're calling and on
+the particular tests plus the platform and compiler:
+
+ void expected_results()
+ {
+ //
+ // Define the max and mean errors expected for
+ // various compilers and platforms.
+ //
+ const char* largest_type;
+ #ifndef BOOST_MATH_NO_LONG_DOUBLE_MATH_FUNCTIONS
+ if(boost::math::policies::digits<double, boost::math::policies::policy<> >() == boost::math::policies::digits<long double, boost::math::policies::policy<> >())
+ {
+ largest_type = "(long\\s+)?double|real_concept";
+ }
+ else
+ {
+ largest_type = "long double|real_concept";
+ }
+ #else
+ largest_type = "(long\\s+)?double";
+ #endif
+ //
+ // We call add_expected_result for each error rate we wish to adjust, these tell
+ // handle_test_result what level of error is acceptable. We can have as many calls
+ // to add_expected_result as we need, each one establishes a rule for acceptable error
+ // with rules set first given preference.
+ //
+ add_expected_result(
+ /* First argument is a regular expression to match against the name of the compiler
+ set in BOOST_COMPILER */
+ ".*",
+ /* Second argument is a regular expression to match against the name of the
+ C++ standard library as set in BOOST_STDLIB */
+ ".*",
+ /* Third argument is a regular expression to match against the name of the
+ platform as set in BOOST_PLATFORM */
+ ".*",
+ /* Forth argument is the name of the type being tested, normally we will
+ only need to up the acceptable error rate for the widest floating
+ point type being tested */
+ largest_real,
+ /* Fifth argument is a regular expression to match against
+ the name of the group of data being tested */
+ "MySpecial Function:.*Small.*",
+ /* Sixth argument is a regular expression to match against the name
+ of the function being tested */
+ "boost::math::my_special",
+ /* Seventh argument is the maximum allowable error expressed in units
+ of machine epsilon passed as a long integer value */
+ 50,
+ /* Eighth argument is the maximum allowable mean error expressed in units
+ of machine epsilon passed as a long integer value */
+ 20);
+ }
+
+[h4 Testing Multiprecision Types]
+
+Testing of multiprecision types is handled by the test drivers in libs/multiprecision/test/math,
+please refer to these for examples. Note that these tests are run only occationally as they take
+a lot of CPU cycles to build and run.
+
+[h4 Improving Compile Times]
+
+As noted above, these test programs can take a while to build as we're instantiating a lot of templates
+for several different types, and our test runners are already stretched to the limit, and probably
+using outdated "spare" hardware. There are two things we can do to speed things up:
+
+* Use a precompiled header.
+* Use separate compilation of our special function templates.
+
+We can make these changes by changing the list of includes from:
+
+ #include <boost/math/special_functions/math_fwd.hpp>
+ #include <boost/math/tools/test.hpp>
+ #include <boost/math/tools/stats.hpp>
+ #include <boost/type_traits.hpp>
+ #include <boost/array.hpp>
+ #include "functor.hpp"
+
+ #include "handle_test_result.hpp"
+
+To just:
+
+ #include <pch_light.hpp>
+
+And changing
+
+ #include <boost/math/special_functions/my_special.hpp>
+
+To:
+
+ #include <boost/math/special_functions/math_fwd.hpp>
+
+The Jamfile target that builds the test program will need the targets
+
+ test_instances//test_instances pch_light
+
+adding to it's list of source dependencies (see the Jamfile for examples).
+
+Finally the project in libs/math/test/test_instances will need modifying
+to instantiate function `my_special`.
+
+These changes should be made last, when `my_special` is stable and the code is in Trunk.
+
+[h4 Concept Checks]
+
+Our concept checks verify that your function's implementation makes no assumptions that aren't
+required by our [link math_toolkit.real_concepts Real number conceptual requirements]. They also
+check for various common bugs and programming traps that we've fallen into over time. To
+add your function to these tests, edit libs/math/test/compile_test/instantiate.hpp to add
+calls to your function: there are 7 calls to each function, each with a different purpose.
+Search for something like "ibeta" or "gamm_p" and follow their examples.
+
+[endsect]
+
+[endsect]
+
+[/
+ Copyright 2013 John Maddock.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
diff --git a/doc/complex/complex-tr1.qbk b/doc/complex/complex-tr1.qbk
new file mode 100644
index 0000000..86e67b4
--- /dev/null
+++ b/doc/complex/complex-tr1.qbk
@@ -0,0 +1,149 @@
+[def __effects [*Effects: ]]
+[def __formula [*Formula: ]]
+[def __exm1 '''<code>e<superscript>x</superscript> - 1</code>''']
+[def __ex '''<code>e<superscript>x</superscript></code>''']
+[def __te '''2ε''']
+
+[mathpart inverse_complex..Complex Number Functions]
+
+The following complex number algorithms are the inverses of trigonometric functions currently
+present in the C++ standard. Equivalents to these functions are part of the C99 standard, and
+are part of the [tr1].
+
+[section:complex_implementation Implementation and Accuracy]
+
+Although there are deceptively simple formulae available for all of these functions, a naive
+implementation that used these formulae would fail catastrophically for some input
+values. The Boost versions of these functions have been implemented using the methodology
+described in "Implementing the Complex Arcsine and Arccosine Functions Using Exception Handling"
+by T. E. Hull Thomas F. Fairgrieve and Ping Tak Peter Tang, ACM Transactions on Mathematical Software,
+Vol. 23, No. 3, September 1997. This means that the functions are well defined over the entire
+complex number range, and produce accurate values even at the extremes of that range, where as a naive
+formula would cause overflow or underflow to occur during the calculation, even though the result is
+actually a representable value. The maximum theoretical relative error for all of these functions
+is less than 9.5[epsilon] for every machine-representable point in the complex plane. Please refer to
+comments in the header files themselves and to the above mentioned paper for more information
+on the implementation methodology.
+
+[endsect]
+
+[section:asin asin]
+
+[h4 Header:]
+
+ #include <boost/math/complex/asin.hpp>
+
+[h4 Synopsis:]
+
+ template<class T>
+ std::complex<T> asin(const std::complex<T>& z);
+
+__effects returns the inverse sine of the complex number z.
+
+__formula [$../images/asin.png]
+
+[endsect]
+
+[section:acos acos]
+
+[h4 Header:]
+
+ #include <boost/math/complex/acos.hpp>
+
+[h4 Synopsis:]
+
+ template<class T>
+ std::complex<T> acos(const std::complex<T>& z);
+
+__effects returns the inverse cosine of the complex number z.
+
+__formula [$../images/acos.png]
+
+[endsect]
+
+[section:atan atan]
+
+[h4 Header:]
+
+ #include <boost/math/complex/atan.hpp>
+
+[h4 Synopsis:]
+
+ template<class T>
+ std::complex<T> atan(const std::complex<T>& z);
+
+__effects returns the inverse tangent of the complex number z.
+
+__formula [$../images/atan.png]
+
+[endsect]
+
+[section:asinh asinh]
+
+[h4 Header:]
+
+ #include <boost/math/complex/asinh.hpp>
+
+[h4 Synopsis:]
+
+ template<class T>
+ std::complex<T> asinh(const std::complex<T>& z);
+
+__effects returns the inverse hyperbolic sine of the complex number z.
+
+__formula [$../images/asinh.png]
+
+[endsect]
+
+[section:acosh acosh]
+
+[h4 Header:]
+
+ #include <boost/math/complex/acosh.hpp>
+
+[h4 Synopsis:]
+
+ template<class T>
+ std::complex<T> acosh(const std::complex<T>& z);
+
+__effects returns the inverse hyperbolic cosine of the complex number z.
+
+__formula [$../images/acosh.png]
+
+[endsect]
+
+[section:atanh atanh]
+
+[h4 Header:]
+
+ #include <boost/math/complex/atanh.hpp>
+
+[h4 Synopsis:]
+
+ template<class T>
+ std::complex<T> atanh(const std::complex<T>& z);
+
+__effects returns the inverse hyperbolic tangent of the complex number z.
+
+__formula [$../images/atanh.png]
+
+[endsect]
+
+[section:complex_history History]
+
+* 2005/12/17: Added support for platforms with no meaningful numeric_limits<>::infinity().
+* 2005/12/01: Initial version, added as part of the TR1 library.
+
+
+[endsect]
+
+[endmathpart]
+
+[/
+ Copyright 2008, 2009 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
+
diff --git a/doc/concepts/concepts.qbk b/doc/concepts/concepts.qbk
new file mode 100644
index 0000000..2723f07
--- /dev/null
+++ b/doc/concepts/concepts.qbk
@@ -0,0 +1,636 @@
+[section:high_precision Using Boost.Math with High-Precision Floating-Point Libraries]
+
+The special functions, distributions, constants and tools in this library
+can be used with a number of high-precision libraries, including:
+
+* __multiprecision
+* __e_float
+* __NTL
+* __GMP
+* __MPFR
+* __float128
+
+The last four have some license restrictions;
+only __multiprecision when using the `cpp_float` backend
+can provide an unrestricted [@http://www.boost.org/LICENSE_1_0.txt Boost] license.
+
+At present, the price of a free license is slightly lower speed.
+
+Of course, the main cost of higher precision is very much decreased
+(usually at least hundred-fold) computation speed, and big increases in memory use.
+
+Some libraries offer true
+[@http://en.wikipedia.org/wiki/Arbitrary-precision_arithmetic arbitrary-precision arithmetic]
+where the precision is limited only by available memory and compute time, but most are used
+at some arbitrarily-fixed precision, say 100 decimal digits, like __multiprecision `cpp_dec_float_100`.
+
+__multiprecision can operate in both ways, but the most popular choice is likely to be about a hundred
+decimal digits, though examples of computing about a million digits have been demonstrated.
+
+[section:why_high_precision Why use a high-precision library rather than built-in floating-point types?]
+
+For nearly all applications, the built-in floating-point types, `double`
+(and `long double` if this offers higher precision than `double`)
+offer enough precision, typically a dozen decimal digits.
+
+Some reasons why one would want to use a higher precision:
+
+* A much more precise result (many more digits) is just a requirement.
+* The range of the computed value exceeds the range of the type: factorials are the textbook example.
+* Using `double` is (or may be) too inaccurate.
+* Using `long double` (or may be) is too inaccurate.
+* Using an extended-precision type implemented in software as
+[@http://en.wikipedia.org/wiki/Double-double_(arithmetic)#Double-double_arithmetic double-double]
+([@http://en.wikipedia.org/wiki/Darwin_(operating_system) Darwin]) is sometimes unpredictably inaccurate.
+* Loss of precision or inaccuracy caused by extreme arguments or cancellation error.
+* An accuracy as good as possible for a chosen built-in floating-point type is required.
+* As a reference value, for example, to determine the inaccuracy
+of a value computed with a built-in floating point type,
+(perhaps even using some quick'n'dirty algorithm).
+The accuracy of many functions and distributions in Boost.Math has been measured in this way
+from tables of very high precision (up to 1000 decimal digits).
+
+Many functions and distributions have differences from exact values
+that are only a few least significant bits - computation noise.
+Others, often those for which analytical solutions are not available,
+require approximations and iteration:
+these may lose several decimal digits of precision.
+
+Much larger loss of precision can occur for [@http://en.wikipedia.org/wiki/Boundary_case boundary]
+or [@http://en.wikipedia.org/wiki/Corner_case corner cases],
+often caused by [@http://en.wikipedia.org/wiki/Loss_of_significance cancellation errors].
+
+(Some of the worst and most common examples of
+[@http://en.wikipedia.org/wiki/Loss_of_significance cancellation error or loss of significance]
+can be avoided by using __complements: see __why_complements).
+
+If you require a value which is as accurate as can be represented in the floating-point type,
+and is thus the closest representable value and has an error less than 1/2 a
+[@http://en.wikipedia.org/wiki/Least_significant_bit least significant bit] or
+[@http://en.wikipedia.org/wiki/Unit_in_the_last_place ulp]
+it may be useful to use a higher-precision type,
+for example, `cpp_dec_float_50`, to generate this value.
+Conversion of this value to a built-in floating-point type ('float', `double` or `long double`)
+will not cause any further loss of precision.
+A decimal digit string will also be 'read' precisely by the compiler
+into a built-in floating-point type to the nearest representable value.
+
+[note In contrast, reading a value from an `std::istream` into a built-in floating-point type
+is [*not guaranteed by the C++ Standard] to give the nearest representable value.]
+
+William Kahan coined the term
+[@http://en.wikipedia.org/wiki/Rounding#The_table-maker.27s_dilemma Table-Maker's Dilemma]
+for the problem of correctly rounding functions.
+Using a much higher precision (50 or 100 decimal digits)
+is a practical way of generating (almost always) correctly rounded values.
+
+[endsect] [/section:why_high_precision Why use a high-precision library rather than built-in floating-point types?]
+
+[section:use_multiprecision Using Boost.Multiprecision]
+
+[*All new projects are recommended to use __multiprecision.]
+
+[import ../../example/big_seventh.cpp]
+
+[big_seventh_example_1]
+
+[import ../../example/fft_sines_table.cpp]
+
+[fft_sines_table_example_1]
+
+The table output is:
+
+[fft_sines_table_example_output]
+
+[fft_sines_table_example_check]
+
+
+[/TODO another example needed here]
+
+[/import ../../example/ibeta_mp_example.cpp]
+
+[/ibeta_mp_example_1]
+
+[/The program output is:]
+
+[/ibeta_mp_output_1]
+
+[endsect] [/section:use_multiprecision Using Boost.Multiprecision]
+
+[section:float128 Using with GCC's __float128 datatype]
+
+At present support for GCC's native `__float128` datatype is extremely limited: the numeric constants
+will all work with that type, and that's about it. If you want to use the distributions or special
+functions then you will need to provide your own wrapper header that:
+
+* Provides std::numeric_limits<__float128> support.
+* Provides overloads of the standard library math function for type `__float128`and which forward to the libquadmath equivalents.
+
+Ultimately these facilities should be provided by GCC and `libstdc++`.
+
+[endsect]
+
+[section:use_mpfr Using With MPFR or GMP - High-Precision Floating-Point Library]
+
+The special functions and tools in this library can be used with
+[@http://www.mpfr.org MPFR] (an arbitrary precision number type based on the __GMP),
+either via the bindings in [@../../../../boost/math/bindings/mpfr.hpp boost/math/bindings/mpfr.hpp],
+or via [@../../../../boost/math/bindings/mpfr.hpp boost/math/bindings/mpreal.hpp].
+
+[*New projects are recommended to use __multiprecision with GMP/MPFR backend instead.]
+
+In order to use these bindings you will need to have installed [@http://www.mpfr.org MPFR]
+plus its dependency the [@http://gmplib.org GMP library]. You will also need one of the
+two supported C++ wrappers for MPFR:
+[@http://math.berkeley.edu/~wilken/code/gmpfrxx/ gmpfrxx (or mpfr_class)],
+or [@http://www.holoborodko.com/pavel/mpfr/ mpfr-C++ (mpreal)].
+
+Unfortunately neither `mpfr_class` nor `mpreal` quite satisfy our conceptual requirements,
+so there is a very thin set of additional interfaces and some helper traits defined in
+[@../../../../boost/math/bindings/mpfr.hpp boost/math/bindings/mpfr.hpp] and
+[@../../../../boost/math/bindings/mpreal.hpp boost/math/bindings/mpreal.hpp]
+that you should use in place of including 'gmpfrxx.h' or 'mpreal.h' directly.
+The classes `mpfr_class` or `mpreal` are
+then usable unchanged once this header is included, so for example `mpfr_class`'s
+performance-enhancing expression templates are preserved and fully supported by this library:
+
+ #include <boost/math/bindings/mpfr.hpp>
+ #include <boost/math/special_functions/gamma.hpp>
+
+ int main()
+ {
+ mpfr_class::set_dprec(500); // 500 bit precision
+ //
+ // Note that the argument to tgamma is
+ // an expression template - that's just fine here.
+ //
+ mpfr_class v = boost::math::tgamma(sqrt(mpfr_class(2)));
+ std::cout << std::setprecision(50) << v << std::endl;
+ }
+
+Alternatively use with `mpreal` would look like:
+
+ #include <boost/math/bindings/mpreal.hpp>
+ #include <boost/math/special_functions/gamma.hpp>
+
+ int main()
+ {
+ mpfr::mpreal::set_precision(500); // 500 bit precision
+ mpfr::mpreal v = boost::math::tgamma(sqrt(mpfr::mpreal(2)));
+ std::cout << std::setprecision(50) << v << std::endl;
+ }
+
+For those functions that are based upon the __lanczos, the bindings
+defines a series of approximations with up to 61 terms and accuracy
+up to approximately 3e-113. This therefore sets the upper limit for accuracy
+to the majority of functions defined this library when used with either `mpfr_class` or `mpreal`.
+
+There is a concept checking test program for mpfr support
+[@../../../../libs/math/test/mpfr_concept_check.cpp here] and
+[@../../../../libs/math/test/mpreal_concept_check.cpp here].
+
+[endsect] [/section:use_mpfr Using With MPFR / GMP - a High-Precision Floating-Point Library]
+
+[section:e_float Using e_float Library]
+
+__multiprecision was a development from the __e_float library by Christopher Kormanyos.
+
+e_float can still be used with Boost.Math library via the header:
+
+ <boost/math/bindings/e_float.hpp>
+
+And the type `boost::math::ef::e_float`:
+this type is a thin wrapper class around ::e_float which provides the necessary
+syntactic sugar to make everything "just work".
+
+There is also a concept checking test program for e_float support
+[@../../../../libs/math/test/e_float_concept_check.cpp here].
+
+[*New projects are recommended to use __multiprecision with `cpp_float` backend instead.]
+
+[endsect] [/section:e_float Using e_float Library]
+
+[section:use_ntl Using NTL Library]
+
+[@http://shoup.net/ntl/doc/RR.txt NTL::RR]
+(an arbitrarily-fixed precision floating-point number type),
+can be used via the bindings in
+[@../../../../boost/math/bindings/rr.hpp boost/math/bindings/rr.hpp].
+For details, see [@http://shoup.net/ntl/ NTL: A Library for doing Number Theory by
+Victor Shoup].
+
+[*New projects are recommended to use __multiprecision instead.]
+
+Unfortunately `NTL::RR` doesn't quite satisfy our conceptual requirements,
+so there is a very thin wrapper class `boost::math::ntl::RR` defined in
+[@../../../../boost/math/bindings/rr.hpp boost/math/bindings/rr.hpp] that you
+should use in place of `NTL::RR`. The class is intended to be a drop-in
+replacement for the "real" NTL::RR that adds some syntactic sugar to keep
+this library happy, plus some of the standard library functions not implemented
+in NTL.
+
+For those functions that are based upon the __lanczos, the bindings
+defines a series of approximations with up to 61 terms and accuracy
+up to approximately 3e-113. This therefore sets the upper limit for accuracy
+to the majority of functions defined this library when used with `NTL::RR`.
+
+There is a concept checking test program for NTL support
+[@../../../../libs/math/test/ntl_concept_check.cpp here].
+
+[endsect] [/section:use_ntl Using With NTL - a High-Precision Floating-Point Library]
+
+[section:using_test Using without expression templates for Boost.Test and others]
+
+As noted in the __multiprecision documentation, certain program constructs will not compile
+when using expression templates. One example that many users may encounter
+is Boost.Test (1.54 and earlier) when using macro BOOST_CHECK_CLOSE and BOOST_CHECK_CLOSE_FRACTION.
+
+If, for example, you wish to use any multiprecision type like `cpp_dec_float_50`
+in place of `double` to give more precision,
+you will need to override the default `boost::multiprecision::et_on` with
+`boost::multiprecision::et_off`.
+
+[import ../../example/test_cpp_float_close_fraction.cpp]
+
+[expression_template_1]
+
+A full example code is at [@../../example/test_cpp_float_close_fraction.cpp test_cpp_float_close_fraction.cpp]
+
+[endsect] [/section:using_test Using without expression templates for Boost.Test and others]
+[endsect] [/section:high_precision Using With High-Precision Floating-Point Libraries]
+
+[section:real_concepts Conceptual Requirements for Real Number Types]
+
+The functions and statistical distributions in this library can be used with
+any type ['RealType] that meets the conceptual requirements given below. All
+the built-in floating-point types like `double` will meet these requirements.
+(Built-in types are also called __fundamental_types).
+
+User-defined types that meet the conceptual requirements can also be used.
+For example, with [link math_toolkit.high_precision.use_ntl a thin wrapper class]
+one of the types provided with [@http://shoup.net/ntl/ NTL (RR)] can be used.
+But now that __multiprecision library is available,
+this has become the preferred real-number type,
+typically __cpp_dec_float or __cpp_bin_float.
+
+Submissions of binding to other extended precision types would also still be welcome.
+
+The guiding principal behind these requirements is that a ['RealType]
+behaves just like a built-in floating-point type.
+
+[h4 Basic Arithmetic Requirements]
+
+These requirements are common to all of the functions in this library.
+
+In the following table /r/ is an object of type `RealType`, /cr/ and
+/cr2/ are objects
+of type `const RealType`, and /ca/ is an object of type `const arithmetic-type`
+(arithmetic types include all the built in integers and floating point types).
+
+[table
+[[Expression][Result Type][Notes]]
+[[`RealType(cr)`][RealType]
+ [RealType is copy constructible.]]
+[[`RealType(ca)`][RealType]
+ [RealType is copy constructible from the arithmetic types.]]
+[[`r = cr`][RealType&][Assignment operator.]]
+[[`r = ca`][RealType&][Assignment operator from the arithmetic types.]]
+[[`r += cr`][RealType&][Adds cr to r.]]
+[[`r += ca`][RealType&][Adds ar to r.]]
+[[`r -= cr`][RealType&][Subtracts cr from r.]]
+[[`r -= ca`][RealType&][Subtracts ca from r.]]
+[[`r *= cr`][RealType&][Multiplies r by cr.]]
+[[`r *= ca`][RealType&][Multiplies r by ca.]]
+[[`r /= cr`][RealType&][Divides r by cr.]]
+[[`r /= ca`][RealType&][Divides r by ca.]]
+[[`-r`][RealType][Unary Negation.]]
+[[`+r`][RealType&][Identity Operation.]]
+[[`cr + cr2`][RealType][Binary Addition]]
+[[`cr + ca`][RealType][Binary Addition]]
+[[`ca + cr`][RealType][Binary Addition]]
+[[`cr - cr2`][RealType][Binary Subtraction]]
+[[`cr - ca`][RealType][Binary Subtraction]]
+[[`ca - cr`][RealType][Binary Subtraction]]
+[[`cr * cr2`][RealType][Binary Multiplication]]
+[[`cr * ca`][RealType][Binary Multiplication]]
+[[`ca * cr`][RealType][Binary Multiplication]]
+[[`cr / cr2`][RealType][Binary Subtraction]]
+[[`cr / ca`][RealType][Binary Subtraction]]
+[[`ca / cr`][RealType][Binary Subtraction]]
+[[`cr == cr2`][bool][Equality Comparison]]
+[[`cr == ca`][bool][Equality Comparison]]
+[[`ca == cr`][bool][Equality Comparison]]
+[[`cr != cr2`][bool][Inequality Comparison]]
+[[`cr != ca`][bool][Inequality Comparison]]
+[[`ca != cr`][bool][Inequality Comparison]]
+[[`cr <= cr2`][bool][Less than equal to.]]
+[[`cr <= ca`][bool][Less than equal to.]]
+[[`ca <= cr`][bool][Less than equal to.]]
+[[`cr >= cr2`][bool][Greater than equal to.]]
+[[`cr >= ca`][bool][Greater than equal to.]]
+[[`ca >= cr`][bool][Greater than equal to.]]
+[[`cr < cr2`][bool][Less than comparison.]]
+[[`cr < ca`][bool][Less than comparison.]]
+[[`ca < cr`][bool][Less than comparison.]]
+[[`cr > cr2`][bool][Greater than comparison.]]
+[[`cr > ca`][bool][Greater than comparison.]]
+[[`ca > cr`][bool][Greater than comparison.]]
+[[`boost::math::tools::digits<RealType>()`][int]
+ [The number of digits in the significand of RealType.]]
+[[`boost::math::tools::max_value<RealType>()`][RealType]
+ [The largest representable number by type RealType.]]
+[[`boost::math::tools::min_value<RealType>()`][RealType]
+ [The smallest representable number by type RealType.]]
+[[`boost::math::tools::log_max_value<RealType>()`][RealType]
+ [The natural logarithm of the largest representable number by type RealType.]]
+[[`boost::math::tools::log_min_value<RealType>()`][RealType]
+ [The natural logarithm of the smallest representable number by type RealType.]]
+[[`boost::math::tools::epsilon<RealType>()`][RealType]
+ [The machine epsilon of RealType.]]
+]
+
+Note that:
+
+# The functions `log_max_value` and `log_min_value` can be
+synthesised from the others, and so no explicit specialisation is required.
+# The function `epsilon` can be synthesised from the others, so no
+explicit specialisation is required provided the precision
+of RealType does not vary at runtime (see the header
+[@../../../../boost/math/bindings/rr.hpp boost/math/bindings/rr.hpp]
+for an example where the precision does vary at runtime).
+# The functions `digits`, `max_value` and `min_value`, all get synthesised
+automatically from `std::numeric_limits`. However, if `numeric_limits`
+is not specialised for type RealType, then you will get a compiler error
+when code tries to use these functions, /unless/ you explicitly specialise them.
+For example if the precision of RealType varies at runtime, then
+`numeric_limits` support may not be appropriate, see
+[@../../../../boost/math/bindings/rr.hpp boost/math/bindings/rr.hpp] for examples.
+
+[warning
+If `std::numeric_limits<>` is *not specialized*
+for type /RealType/ then the default float precision of 6 decimal digits
+will be used by other Boost programs including:
+
+Boost.Test: giving misleading error messages like
+
+['"difference between {9.79796} and {9.79796} exceeds 5.42101e-19%".]
+
+Boost.LexicalCast and Boost.Serialization when converting the number
+to a string, causing potentially serious loss of accuracy on output.
+
+Although it might seem obvious that RealType should require `std::numeric_limits`
+to be specialized, this is not sensible for
+`NTL::RR` and similar classes where the [*number of digits is a runtime parameter]
+(whereas for `numeric_limits` everything has to be fixed at compile time).
+]
+
+[h4 Standard Library Support Requirements]
+
+Many (though not all) of the functions in this library make calls
+to standard library functions, the following table summarises the
+requirements. Note that most of the functions in this library
+will only call a small subset of the functions listed here, so if in
+doubt whether a user-defined type has enough standard library
+support to be useable the best advise is to try it and see!
+
+In the following table /r/ is an object of type `RealType`,
+/cr1/ and /cr2/ are objects of type `const RealType`, and
+/i/ is an object of type `int`.
+
+[table
+[[Expression][Result Type]]
+[[`fabs(cr1)`][RealType]]
+[[`abs(cr1)`][RealType]]
+[[`ceil(cr1)`][RealType]]
+[[`floor(cr1)`][RealType]]
+[[`exp(cr1)`][RealType]]
+[[`pow(cr1, cr2)`][RealType]]
+[[`sqrt(cr1)`][RealType]]
+[[`log(cr1)`][RealType]]
+[[`frexp(cr1, &i)`][RealType]]
+[[`ldexp(cr1, i)`][RealType]]
+[[`cos(cr1)`][RealType]]
+[[`sin(cr1)`][RealType]]
+[[`asin(cr1)`][RealType]]
+[[`tan(cr1)`][RealType]]
+[[`atan(cr1)`][RealType]]
+[[`fmod(cr1)`][RealType]]
+[[`round(cr1)`][RealType]]
+[[`iround(cr1)`][int]]
+[[`trunc(cr1)`][RealType]]
+[[`itrunc(cr1)`][int]]
+]
+
+Note that the table above lists only those standard library functions known to
+be used (or likely to be used in the near future) by this library.
+The following functions: `acos`, `atan2`, `fmod`, `cosh`, `sinh`, `tanh`, `log10`,
+`lround`, `llround`, `ltrunc`, `lltrunc` and `modf`
+are not currently used, but may be if further special functions are added.
+
+Note that the `round`, `trunc` and `modf` functions are not part of the
+current C++ standard: they are part of the additions added to C99 which will
+likely be in the next C++ standard. There are Boost versions of these provided
+as a backup, and the functions are always called unqualified so that
+argument-dependent-lookup can take place.
+
+In addition, for efficient and accurate results, a __lanczos is highly desirable.
+You may be able to adapt an existing approximation from
+[@../../../../boost/math/special_functions/lanczos.hpp
+boost/math/special_functions/lanczos.hpp] or
+[@../../../../boost/math/bindings/detail/big_lanczos.hpp
+boost/math/bindings/detail/big_lanczos.hpp]:
+in the former case you will need change
+`static_cast`'s to `lexical_cast`'s, and the constants to /strings/
+(in order to ensure the coefficients aren't truncated to `long doubl`e)
+and then specialise `lanczos_traits` for type T. Otherwise you may have to hack
+[@../../tools/lanczos_generator.cpp
+libs/math/tools/lanczos_generator.cpp] to find a suitable
+approximation for your RealType. The code will still compile if you don't do
+this, but both accuracy and efficiency will be greatly compromised in any
+function that makes use of the gamma\/beta\/erf family of functions.
+
+[endsect] [/section:real_concepts Conceptual Requirements for Real Number Types]
+
+[section:dist_concept Conceptual Requirements for Distribution Types]
+
+A ['DistributionType] is a type that implements the following conceptual
+requirements, and encapsulates a statistical distribution.
+
+Please note that this documentation should not be used as a substitute
+for the
+[link math_toolkit.dist_ref reference documentation], and
+[link math_toolkit.stat_tut tutorial] of the statistical
+distributions.
+
+In the following table, ['d] is an object of type `DistributionType`,
+['cd] is an object of type `const DistributionType` and ['cr] is an
+object of a type convertible to `RealType`.
+
+[table
+[[Expression][Result Type][Notes]]
+[[DistributionType::value_type][RealType]
+ [The real-number type /RealType/ upon which the distribution operates.]]
+[[DistributionType::policy_type][RealType]
+ [The __Policy to use when evaluating functions that depend on this distribution.]]
+[[d = cd][Distribution&][Distribution types are assignable.]]
+[[Distribution(cd)][Distribution][Distribution types are copy constructible.]]
+[[pdf(cd, cr)][RealType][Returns the PDF of the distribution.]]
+[[cdf(cd, cr)][RealType][Returns the CDF of the distribution.]]
+[[cdf(complement(cd, cr))][RealType]
+ [Returns the complement of the CDF of the distribution,
+ the same as: `1-cdf(cd, cr)`]]
+[[quantile(cd, cr)][RealType][Returns the quantile (or percentile) of the distribution.]]
+[[quantile(complement(cd, cr))][RealType]
+ [Returns the quantile (or percentile) of the distribution, starting from
+ the complement of the probability, the same as: `quantile(cd, 1-cr)`]]
+[[chf(cd, cr)][RealType][Returns the cumulative hazard function of the distribution.]]
+[[hazard(cd, cr)][RealType][Returns the hazard function of the distribution.]]
+[[kurtosis(cd)][RealType][Returns the kurtosis of the distribution.]]
+[[kurtosis_excess(cd)][RealType][Returns the kurtosis excess of the distribution.]]
+[[mean(cd)][RealType][Returns the mean of the distribution.]]
+[[mode(cd)][RealType][Returns the mode of the distribution.]]
+[[skewness(cd)][RealType][Returns the skewness of the distribution.]]
+[[standard_deviation(cd)][RealType][Returns the standard deviation of the distribution.]]
+[[variance(cd)][RealType][Returns the variance of the distribution.]]
+]
+
+[endsect] [/ section:dist_concept Conceptual Requirements for Distribution Types]
+
+[section:archetypes Conceptual Archetypes for Reals and Distributions]
+
+There are a few concept archetypes available:
+
+* Real concept for floating-point types.
+* Distribution concept for statistical distributions.
+
+[h5:real_concept Real concept]
+
+`std_real_concept` is an archetype for theReal types,
+including the built-in float, double, long double.
+
+``#include <boost/concepts/std_real_concept.hpp>``
+
+ namespace boost{
+ namespace math{
+ namespace concepts
+ {
+ class std_real_concept;
+ }
+ }} // namespaces
+
+
+The main purpose in providing this type is to verify
+that standard library functions are found via a using declaration -
+bringing those functions into the current scope -
+and not just because they happen to be in global scope.
+
+In order to ensure that a call to say `pow` can be found
+either via argument dependent lookup, or failing that then
+in the std namespace: all calls to standard library functions
+are unqualified, with the std:: versions found via a `using` declaration
+to make them visible in the current scope. Unfortunately it's all
+to easy to forget the `using` declaration, and call the double version of
+the function that happens to be in the global scope by mistake.
+
+For example if the code calls ::pow rather than std::pow,
+the code will cleanly compile, but truncation of long doubles to
+double will cause a significant loss of precision.
+In contrast a template instantiated with std_real_concept will *only*
+compile if the all the standard library functions used have
+been brought into the current scope with a using declaration.
+
+[h6 Testing the real concept]
+
+There is a test program
+[@../../test/std_real_concept_check.cpp libs/math/test/std_real_concept_check.cpp]
+that instantiates every template in this library with type
+`std_real_concept` to verify its usage of standard library functions.
+
+``#include <boost/math/concepts/real_concept.hpp>``
+
+ namespace boost{
+ namespace math{
+ namespace concepts{
+
+ class real_concept;
+
+ }}} // namespaces
+
+`real_concept` is an archetype for
+[link math_toolkit.real_concepts user defined real types],
+it declares its standard library functions in its own
+namespace: these will only be found if they are called unqualified
+allowing argument dependent lookup to locate them. In addition
+this type is useable at runtime:
+this allows code that would not otherwise be exercised by the built-in
+floating point types to be tested. There is no std::numeric_limits<>
+support for this type, since numeric_limits is not a conceptual requirement
+for [link math_toolkit.real_concepts RealType]s.
+
+NTL RR is an example of a type meeting the requirements that this type
+models, but note that use of a thin wrapper class is required: refer to
+[link math_toolkit.high_precision.use_ntl "Using With NTL - a High-Precision Floating-Point Library"].
+
+There is no specific test case for type `real_concept`, instead, since this
+type is usable at runtime, each individual test case as well as testing
+`float`, `double` and `long double`, also tests `real_concept`.
+
+[h6:distribution_concept Distribution Concept]
+
+Distribution Concept models statistical distributions.
+
+``#include <boost/math/concepts/distribution.hpp>``
+
+ namespace boost{
+ namespace math{
+ namespace concepts
+ {
+ template <class RealType>
+ class distribution_archetype;
+
+ template <class Distribution>
+ struct DistributionConcept;
+
+ }}} // namespaces
+
+The class template `distribution_archetype` is a model of the
+[link math_toolkit.dist_concept Distribution concept].
+
+The class template `DistributionConcept` is a
+[@../../../../libs/concept_check/index.html concept checking class]
+for distribution types.
+
+[h6 Testing the distribution concept]
+
+The test program
+[@../../test/compile_test/distribution_concept_check.cpp distribution_concept_check.cpp]
+is responsible for using `DistributionConcept` to verify that all the
+distributions in this library conform to the
+[link math_toolkit.dist_concept Distribution concept].
+
+The class template `DistributionConcept` verifies the existence
+(but not proper function) of the non-member accessors
+required by the [link math_toolkit.dist_concept Distribution concept].
+These are checked by calls like
+
+v = pdf(dist, x); // (Result v is ignored).
+
+And in addition, those that accept two arguments do the right thing when the
+arguments are of different types (the result type is always the same as the
+distribution's value_type). (This is implemented by some additional
+forwarding-functions in derived_accessors.hpp, so that there is no need for
+any code changes. Likewise boilerplate versions of the
+hazard\/chf\/coefficient_of_variation functions are implemented in
+there too.)
+
+[endsect] [/section:archetypes Conceptual Archetypes for Reals and Distributions]
+[/
+ Copyright 2006, 2010, 2012 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
+
+
+
diff --git a/doc/constants/constants.qbk b/doc/constants/constants.qbk
new file mode 100644
index 0000000..99db336
--- /dev/null
+++ b/doc/constants/constants.qbk
@@ -0,0 +1,746 @@
+[mathpart constants..Mathematical Constants]
+
+[section:constants_intro Introduction]
+
+Boost.Math provides a collection of mathematical constants.
+
+[h4 Why use Boost.Math mathematical constants?]
+
+* Readable. For the very many jobs just using built-in like `double`, you can just write expressions like
+``double area = pi * r * r;``
+(If that's all you want, jump direct to [link math_toolkit.tutorial.non_templ use in non-template code]!)
+* Effortless - avoiding a search of reference sources.
+* Usable with both builtin floating point types, and user-defined, possibly extended precision, types such as
+NTL, MPFR/GMP, mp_float: in the latter case the constants are computed to the necessary precision and then cached.
+* Accurate - ensuring that the values are as accurate as possible for the
+chosen floating-point type
+ * No loss of accuracy from repeated rounding of intermediate computations.
+ * Result is computed with higher precision and only rounded once.
+ * Less risk of inaccurate result from functions pow, trig and log at [@http://en.wikipedia.org/wiki/Corner_case corner cases].
+ * Less risk of [@http://docs.oracle.com/cd/E19957-01/806-3568/ncg_goldberg.html cancellation error].
+* Portable - as possible between different systems using different floating-point precisions:
+see [link math_toolkit.tutorial.templ use in template code].
+* Tested - by comparison with other published sources, or separately computed at long double precision.
+* Faster - can avoid (re-)calculation at runtime.
+ * If the value returned is a builtin type then it's returned by value as a `constexpr` (C++11 feature, if available).
+ * If the value is computed and cached (or constructed from a string representation and cached), then it's returned by constant reference.[br]
+This can be significant if:
+ * Functions pow, trig or log are used.
+ * Inside an inner loop.
+ * Using a high-precision UDT like __multiprecision.
+ * Compiler optimizations possible with built-in types, especially `double`, are not available.
+
+[endsect] [/section:intro Introduction]
+
+[section:tutorial Tutorial]
+
+[section:non_templ Use in non-template code]
+
+When using the math constants at your chosen fixed precision in non-template code,
+you can simply add a `using namespace` declaration, for example,
+`using namespace boost::math::double_constants`,
+to make the constants of the correct precision for your code
+visible in the current scope, and then use each constant ['as a simple variable - sans brackets]:
+
+ #include <boost/math/constants/constants.hpp>
+
+ double area(double r)
+ {
+ using namespace boost::math::double_constants;
+ return pi * r * r;
+ }
+
+Had our function been written as taking a `float` rather than a `double`,
+we could have written instead:
+
+ #include <boost/math/constants/constants.hpp>
+
+ float area(float r)
+ {
+ using namespace boost::math::float_constants;
+ return pi * r * r;
+ }
+
+Likewise, constants that are suitable for use at `long double` precision
+are available in the namespace `boost::math::long_double_constants`.
+
+You can see the full list of available constants at [link math_toolkit.constants].
+
+Some examples of using constants are at [@../../example/constants_eg1.cpp constants_eg1].
+
+[endsect] [/section:non_templ Use in non-template code]
+
+[section:templ Use in template code]
+
+When using the constants inside a function template, we need to ensure that
+we use a constant of the correct precision for our template parameters.
+We can do this by calling the function-template versions, `pi<FPType>()`, of the constants
+like this:
+
+ #include <boost/math/constants/constants.hpp>
+
+ template <class Real>
+ Real area(Real r)
+ {
+ using namespace boost::math::constants;
+ return pi<Real>() * r * r;
+ }
+
+Although this syntax is a little less "cute" than the non-template version,
+the code is no less efficient
+(at least for the built-in types `float`, `double` and `long double`) :
+the function template versions of the constants are simple inline functions that
+return a constant of the correct precision for the type used. In addition, these
+functions are declared `constexp` for those compilers that support this, allowing
+the result to be used in constant-expressions provided the template argument is a literal type.
+
+[tip Keep in mind the difference between the variable version,
+just `pi`, and the template-function version:
+the template-function requires both a <[~floating-point-type]>
+and function call `()` brackets, for example: `pi<double>()`.
+You cannot write `double p = pi<>()`, nor `double p = pi()`.]
+
+[note You can always use [*both] variable and template-function versions
+[*provided calls are fully qualified], for example:
+``
+double my_pi1 = boost::math::constants::pi<double>();
+double my_pi2 = boost::math::double_constants::pi;
+``
+]
+
+[warning It may be tempting to simply define
+``
+using namespace boost::math::double_constants;
+using namespace boost::math::constants;
+``
+but if you do define two namespaces, this will, of course, create ambiguity!
+``
+double my_pi = pi(); // error C2872: 'pi' : ambiguous symbol
+double my_pi2 = pi; // Context does not allow for disambiguation of overloaded function
+``
+Although the mistake above is fairly obvious,
+it is also not too difficult to do this accidentally, or worse, create it in someone elses code.
+
+Therefore is it prudent to avoid this risk by [*localising the scope of such definitions], as shown above.]
+
+[tip Be very careful with the type provided as parameter.
+For example, providing an [*integer] instead of a floating-point type can be disastrous (a C++ feature).
+
+``cout << "Area = " << area(2) << endl; // Area = 12!!!``
+
+You should get a compiler warning
+[pre
+warning : 'return' : conversion from 'double' to 'int', possible loss of data
+] [/pre]
+Failure to heed this warning can lead to very wrong answers!
+
+You can also avoid this by being explicit about the type of `Area`.
+``cout << "Area = " << area<double>(2) << endl; // Area = 12.566371``
+]
+
+[endsect] [/section:templ Use in template code]
+
+[section:user_def Use With User-Defined Types]
+
+The most common example of a high-precision user-defined type will probably be __multiprecision.
+
+The syntax for using the function-call constants with user-defined types is the same
+as it is in the template class, which is to say we use:
+
+ #include <boost/math/constants/constants.hpp>
+
+ boost::math::constants::pi<UserDefinedType>();
+
+For example:
+
+ boost::math::constants::pi<boost::multiprecision::cpp_dec_float_50>();
+
+giving [pi] with a precision of 50 decimal digits.
+
+However, since the precision of the user-defined type may be much greater than that
+of the built-in floating point types, how the value returned is created is as follows:
+
+* If the precision of the type is known at compile time:
+ * If the precision is less than or equal to that of a `float` and the type is constructable from a `float`
+ then our code returns a `float` literal. If the user-defined type is a literal type
+ then the function call that returns the constant will be a `constexp`.
+ * If the precision is less than or equal to that of a `double` and the type is constructable from a `double`
+ then our code returns a `double` literal. If the user-defined type is a literal type
+ then the function call that returns the constant will be a `constexp`.
+ * If the precision is less than or equal to that of a `long double` and the type is constructable from a `long double`
+ then our code returns a `long double` literal. If the user-defined type is a literal type
+ then the function call that returns the constant will be a `constexp`.
+ * If the precision is less than or equal to that of a `__float128` (and the compiler supports such a type)
+ and the type is constructable from a `__float128`
+ then our code returns a `__float128` literal. If the user-defined type is a literal type
+ then the function call that returns the constant will be a `constexp`.
+ * If the precision is less than 100 decimal digits, then the constant will be constructed
+ (just the once, then cached in a thread-safe manner) from a string representation of the constant.
+ In this case the value is returned as a const reference to the cached value.
+ * Otherwise the value is computed (just once, then cached in a thread-safe manner).
+ In this case the value is returned as a const reference to the cached value.
+* If the precision is unknown at compile time then:
+ * If the runtime precision (obtained from a call to `boost::math::tools::digits<T>()`) is
+ less than 100 decimal digits, then the constant is constructed "on the fly" from the string
+ representation of the constant.
+ * Otherwise the value is constructed "on the fly" by calculating then value of the constant
+ using the current default precision of the type. Note that this can make use of the constants
+ rather expensive.
+
+In addition, it is possible to pass a `Policy` type as a second template argument, and use this to control
+the precision:
+
+ #include <boost/math/constants/constants.hpp>
+
+ typedef boost::math::policies::policy<boost::math::policies::digits2<80> > my_policy_type;
+ boost::math::constants::pi<MyType, my_policy_type>();
+
+[note Boost.Math doesn't know how to control the internal precision of `MyType`, the policy
+just controls how the selection process above is carried out, and the calculation precision
+if the result is computed.]
+
+It is also possible to control which method is used to construct the constant by specialising
+the traits class `construction_traits`:
+
+ namespace boost{ namespace math{ namespace constant{
+
+ template <class T, class Policy>
+ struct construction_traits
+ {
+ typedef mpl::int_<N> type;
+ };
+
+ }}} // namespaces
+
+Where ['N] takes one of the following values:
+
+[table
+[[['N]][Meaning]]
+[[0][The precision is unavailable at compile time;
+either construct from a decimal digit string or calculate on the fly depending upon the runtime precision.]]
+[[1][Return a float precision constant.]]
+[[2][Return a double precision constant.]]
+[[3][Return a long double precision constant.]]
+[[4][Construct the result from the string representation, and cache the result.]]
+[[Any other value ['N]][Sets the compile time precision to ['N] bits.]]
+]
+
+[h5 Custom Specializing a constant]
+
+In addition, for user-defined types that need special handling, it's possible to partially-specialize
+the internal structure used by each constant. For example, suppose we're using the C++ wrapper around MPFR
+`mpfr_class`: this has its own representation of Pi which we may well wish to use in place of the above
+mechanism. We can achieve this by specialising the class template `boost::math::constants::detail::constant_pi`:
+
+ namespace boost{ namespace math{ namespace constants{ namespace detail{
+
+ template<>
+ struct constant_pi<mpfr_class>
+ {
+ template<int N>
+ static mpfr_class get(const mpl::int_<N>&)
+ {
+ // The template param N is one of the values in the table above,
+ // we can either handle all cases in one as is the case here,
+ // or overload "get" for the different options.
+ mpfr_class result;
+ mpfr_const_pi(result.get_mpfr_t(), GMP_RNDN);
+ return result;
+ }
+ };
+
+ }}}} // namespaces
+
+[h5 Diagnosing what meta-programmed code is doing]
+
+Finally, since it can be tricky to diagnose what meta-programmed code is doing, there is a
+diagnostic routine that prints information about how this library will handle a specific type,
+it can be used like this:
+
+ #include <boost/math/constants/info.hpp>
+
+ int main()
+ {
+ boost::math::constants::print_info_on_type<MyType>();
+ }
+
+If you wish, you can also pass an optional std::ostream argument to the `print_info_on_type` function.
+Typical output for a user-defined type looks like this:
+
+[pre
+Information on the Implementation and Handling of
+Mathematical Constants for Type class boost::math::concepts::real_concept
+
+Checking for std::numeric_limits<class boost::math::concepts::real_concept> specialisation: no
+boost::math::policies::precision<class boost::math::concepts::real_concept, Policy>
+reports that there is no compile type precision available.
+boost::math::tools::digits<class boost::math::concepts::real_concept>()
+reports that the current runtime precision is
+53 binary digits.
+No compile time precision is available, the construction method
+will be decided at runtime and results will not be cached
+- this may lead to poor runtime performance.
+Current runtime precision indicates that
+the constant will be constructed from a string on each call.
+]
+
+[endsect] [/section:user_def Use With User Defined Types]
+
+[endsect] [/section:tutorial Tutorial]
+
+[section:constants The Mathematical Constants]
+
+This section lists the mathematical constants, their use(s) (and sometimes rationale for their inclusion).
+[table Mathematical Constants
+[[name] [formula] [Value (6 decimals)] [Uses and Rationale]]
+[[[*Rational fractions]] [] [] [] ]
+[[half] [1/2] [0.5] [] ]
+[[third] [1/3] [0.333333] [] ]
+[[two_thirds] [2/3] [0.66667] [] ]
+[[three_quarters] [3/4] [0.75] [] ]
+
+[[[*two and related]] [] [] [] ]
+[[root_two] [[radic]2] [1.41421] [] ]
+[[root_three] [[radic]3] [1.73205] [] ]
+[[half_root_two] [[radic]2 /2] [0.707106] [] ]
+[[ln_two] [ln(2)] [0.693147] [] ]
+[[ln_ten] [ln(10)] [2.30258] [] ]
+[[ln_ln_two] [ln(ln(2))] [-0.366512] [Gumbel distribution median] ]
+[[root_ln_four] [[radic]ln(4)] [1.177410] [] ]
+[[one_div_root_two] [1/[radic]2] [0.707106] [] ]
+
+[[[*[pi] and related]] [] [] [] ]
+[[pi] [pi] [3.14159] [Ubiquitous. Archimedes constant [@http://en.wikipedia.org/wiki/Pi [pi]]]]
+[[half_pi] [[pi]/2] [1.570796] [] ]
+[[third_pi] [[pi]/3] [1.04719] [] ]
+[[sixth_pi] [[pi]/6] [0.523598] [] ]
+[[two_pi] [2[pi]] [6.28318] [Many uses, most simply, circumference of a circle]]
+[[two_thirds_pi] [2/3 [pi]] [2.09439] [[@http://en.wikipedia.org/wiki/Sphere#Volume_of_a_sphere volume of a hemi-sphere] = 4/3 [pi] r[cubed]]]
+[[three_quarters_pi] [3/4 [pi]] [2.35619] [ = 3/4 [pi] ]]
+[[four_thirds_pi] [4/3 [pi]] [4.18879] [[@http://en.wikipedia.org/wiki/Sphere#Volume_of_a_sphere volume of a sphere] = 4/3 [pi] r[cubed]]]
+[[one_div_two_pi] [1/(2[pi])] [1.59155] [Widely used]]
+[[root_pi] [[radic][pi]][1.77245] [Widely used]]
+[[root_half_pi] [[radic] [pi]/2] [1.25331] [Widely used]]
+[[root_two_pi][[radic] [pi]*2] [2.50662] [Widely used]]
+[[one_div_root_pi] [1/[radic][pi]] [0.564189] [] ]
+[[one_div_root_two_pi] [1/[radic](2[pi])] [0.398942] [] ]
+[[root_one_div_pi] [[radic](1/[pi]] [0.564189] [] ]
+[[pi_minus_three] [[pi]-3] [0.141593] [] ]
+[[four_minus_pi] [4 -[pi]] [0.858407] [] ]
+[[pi_pow_e] [[pi][super e]] [22.4591] [] ]
+
+[[pi_sqr] [[pi][super 2]] [9.86960] [] ]
+[[pi_sqr_div_six] [[pi][super 2]/6] [1.64493] [] ]
+[[pi_cubed] [[pi][super 3]] [31.00627] [] ]
+[[cbrt_pi] [[radic][super 3] [pi]] [1.46459] [] ]
+[[one_div_cbrt_pi] [1/[radic][super 3] [pi]] [0.682784] [] ]
+
+[[[*Euler's e and related]] [] [] [] ]
+[[e] [e] [2.71828] [[@http://en.wikipedia.org/wiki/E_(mathematical_constant) Euler's constant e]] ]
+[[exp_minus_half] [e [super -1/2]] [0.606530] [] ]
+[[e_pow_pi] [e [super [pi]]] [23.14069] [] ]
+[[root_e] [[radic] e] [1.64872] [] ]
+[[log10_e] [log10(e)] [0.434294] [] ]
+[[one_div_log10_e] [1/log10(e)] [2.30258] [] ]
+
+[[[*Trigonometric]] [] [] [] ]
+[[degree] [radians = [pi] / 180] [0.017453] [] ]
+[[radian] [degrees = 180 / [pi]] [57.2957] [] ]
+[[sin_one] [sin(1)] [0.841470] [] ]
+[[cos_one] [cos(1)] [0.54030] [] ]
+[[sinh_one] [sinh(1)] [1.17520] [] ]
+[[cosh_one] [cosh(1)] [1.54308] [] ]
+
+[[[*Phi]] [ Phidias golden ratio] [[@http://en.wikipedia.org/wiki/Golden_ratio Phidias golden ratio]] [] ]
+[[phi] [(1 + [radic]5) /2] [1.61803] [finance] ]
+[[ln_phi] [ln([phi])] [0.48121] [] ]
+[[one_div_ln_phi] [1/ln([phi])] [2.07808] [] ]
+
+[[[*Euler's Gamma]] [] [] [] ]
+[[euler] [euler] [0.577215] [[@http://en.wikipedia.org/wiki/Euler%E2%80%93Mascheroni_constant Euler-Mascheroni gamma constant]] ]
+[[one_div_euler] [1/euler] [1.73245] [] ]
+[[euler_sqr] [euler[super 2]] [0.333177] [] ]
+
+[[[*Misc]] [] [] [] ]
+[[zeta_two] [[zeta](2)] [1.64493] [[@http://en.wikipedia.org/wiki/Riemann_zeta_function Riemann zeta function]] ]
+[[zeta_three] [[zeta](3)] [1.20205] [[@http://en.wikipedia.org/wiki/Riemann_zeta_function Riemann zeta function]] ]
+[[catalan] [['K]] [0.915965] [[@http://mathworld.wolfram.com/CatalansConstant.html Catalan (or Glaisher) combinatorial constant] ]]
+[[glaisher] [['A]] [1.28242] [[@https://oeis.org/A074962/constant Decimal expansion of Glaisher-Kinkelin constant] ]]
+[[khinchin] [['k]] [2.685452] [[@https://oeis.org/A002210/constant Decimal expansion of Khinchin constant] ]]
+
+[[extreme_value_skewness] [12[radic]6 [zeta](3)/ [pi][super 3]] [1.139547] [Extreme value distribution] ]
+[[rayleigh_skewness] [2[radic][pi]([pi]-3)/(4 - [pi])[super 3/2]] [0.631110] [Rayleigh distribution skewness] ]
+[[rayleigh_kurtosis_excess] [-(6[pi][super 2]-24[pi]+16)/(4-[pi])[super 2]] [0.245089] [[@http://en.wikipedia.org/wiki/Rayleigh_distribution Rayleigh distribution kurtosis excess]] ]
+[[rayleigh_kurtosis] [3+(6[pi][super 2]-24[pi]+16)/(4-[pi])[super 2]] [3.245089] [Rayleigh distribution kurtosis] ]
+
+] [/table]
+
+
+[note Integer values are [*not included] in this list of math constants, however interesting,
+because they can be so easily and exactly constructed, even for UDT, for example: `static_cast<cpp_float>(42)`.]
+
+[tip If you know the approximate value of the constant, you can search for the value to find Boost.Math chosen name in this table.]
+[tip Bernoulli numbers are available at __bernoulli_numbers.]
+[tip Factorials are available at __factorial.]
+
+[endsect] [/section:constants The constants]
+
+[section:new_const Defining New Constants]
+
+The library provides some helper code to assist in defining new constants;
+the process for defining a constant called `my_constant` goes like this:
+
+1. [*Define a function that calculates the value of the constant].
+This should be a template function, and be placed in `boost/math/constants/calculate_constants.hpp`
+if the constant is to be added to this library,
+or else defined at the top of your source file if not.
+
+The function should look like this:
+
+ namespace boost{ namespace math{ namespace constants{ namespace detail{
+
+ template <class Real>
+ template <int N>
+ Real constant_my_constant<Real>::compute(BOOST_MATH_EXPLICIT_TEMPLATE_TYPE_SPEC(mpl::int_<N>))
+ {
+ int required_precision = N ? N : tools::digits<Real>();
+ Real result = /* value computed to required_precision bits */ ;
+ return result;
+ }
+
+ }}}} // namespaces
+
+Then define a placeholder for the constant itself:
+
+ namespace boost{ namespace math{ namespace constants{
+
+ BOOST_DEFINE_MATH_CONSTANT(my_constant, 0.0, "0");
+
+ }}}
+
+
+For example, to calculate [pi]/2, add to `boost/math/constants/calculate_constants.hpp`
+
+ template <class T>
+ template<int N>
+ inline T constant_half_pi<T>::compute(BOOST_MATH_EXPLICIT_TEMPLATE_TYPE_SPEC(mpl::int_<N>))
+ {
+ BOOST_MATH_STD_USING
+ return pi<T, policies::policy<policies::digits2<N> > >() / static_cast<T>(2);
+ }
+
+Then to `boost/math/constants/constants.hpp` add:
+
+ BOOST_DEFINE_MATH_CONSTANT(half_pi, 0.0, "0"); // Actual values are temporary, we'll replace them later.
+
+[note Previously defined constants like pi and e can be used, but by *not simply calling* `pi<T>()`;
+specifying the precision via the policy
+`pi<T, policies::policy<policies::digits2<N> > >()`
+is essential to ensure full accuracy.]
+
+[warning Newly defined constants can only be used once they are included in
+`boost/math/constants/constants.hpp`. So if you add
+`template <class T, class N> T constant_my_constant{...}`,
+then you cannot define `constant_my_constant`
+until you add the temporary `BOOST_DEFINE_MATH_CONSTANT(my_constant, 0.0, "0")`.
+Failing to do this will result in surprising compile errors:
+``
+ error C2143: syntax error : missing ';' before '<'
+ error C2433: 'constant_root_two_div_pi' : 'inline' not permitted on data declarations
+ error C2888: 'T constant_root_two_div_pi' : symbol cannot be defined within namespace 'detail'
+ error C2988: unrecognizable template declaration/definition
+``
+]
+
+2. [*You will need an arbitrary precision type to use to calculate the value]. This library
+currently supports either `cpp_float`, `NTL::RR` or `mpfr_class` used via the bindings in `boost/math/bindings`.
+The default is to use `NTL::RR` unless you define an alternate macro, for example,
+`USE_MPFR` or `USE_CPP_FLOAT` at the start of your program.
+
+3. It is necessary to link to the Boost.Regex library,
+and probably to your chosen arbitrary precision type library.
+
+4. You need to add `libs\math\include_private` to your compiler's include path as the needed
+header is not installed in the usual places by default (this avoids a cyclic dependency between
+the Math and Multiprecision library's headers).
+
+5. The complete program to generate the constant `half_pi` using function `calculate_half_pi` is then:
+
+ #define USE_CPP_FLOAT // If required.
+ #include <boost/math/constants/generate.hpp>
+
+ int main()
+ {
+ BOOST_CONSTANTS_GENERATE(half_pi);
+ }
+
+The output from the program is a snippet of C++ code
+(actually a macro call) that can be cut and pasted
+into `boost/math/constants/constants.hpp` or else into your own code, for example:
+
+[pre
+ BOOST_DEFINE_MATH_CONSTANT(half_pi, 1.570796326794896619231321691639751442e+00, "1.57079632679489661923132169163975144209858469968755291048747229615390820314310449931401741267105853399107404326e+00");
+]
+
+This macro BOOST_DEFINE_MATH_CONSTANT inserts a C++ struct code snippet that
+declares the `float`, `double` and `long double` versions of the constant,
+plus a decimal digit string representation correct to 100 decimal
+digits, and all the meta-programming machinery needed to select between them.
+
+The result of an expanded macro for Pi is shown below.
+
+[import ./pp_pi.hpp]
+
+[preprocessed_pi]
+
+
+[endsect] [/section:new_const Defining New Constants]
+
+[section:constants_faq FAQs]
+
+[h4 Why are ['these] Constants Chosen?]
+It is, of course, impossible to please everyone with a list like this.
+
+Some of the criteria we have used are:
+
+* Used in Boost.Math.
+* Commonly used.
+* Expensive to compute.
+* Requested by users.
+* [@http://en.wikipedia.org/wiki/Mathematical_constant Used in science and mathematics.]
+* No integer values (because so cheap to construct).[br]
+(You can easily define your own if found convenient, for example: `FPT one =static_cast<FPT>(42);`).
+
+[h4 How are constants named?]
+* Not macros, so no upper case.
+* All lower case (following C++ standard names).
+* No CamelCase.
+* Underscore as _ delimiter between words.
+* Numbers spelt as words rather than decimal digits (except following pow).
+* Abbreviation conventions:
+ * root for square root.
+ * cbrt for cube root.
+ * pow for pow function using decimal digits like pow23 for n[super 2/3].
+ * div for divided by or operator /.
+ * minus for operator -, plus for operator +.
+ * sqr for squared.
+ * cubed for cubed n[super 3].
+ * words for greek, like [pi], [zeta] and [Gamma].
+ * words like half, third, three_quarters, sixth for fractions. (Digit(s) can get muddled).
+ * log10 for log[sub 10]
+ * ln for log[sub e]
+
+[h4 How are the constants derived?]
+
+The constants have all been calculated using high-precision software working
+with up to 300-bit precision giving about 100 decimal digits.
+(The precision can be arbitrarily chosen and is limited only by compute time).
+
+[h4 How Accurate are the constants?]
+The minimum accuracy chosen (100 decimal digits) exceeds the
+accuracy of reasonably-foreseeable floating-point hardware (256-bit)
+and should meet most high-precision computations.
+
+[h4 How are the constants tested?]
+
+# Comparison using Boost.Test BOOST_CHECK_CLOSE_FRACTION using long double literals,
+with at least 35 decimal digits, enough to be accurate for all long double implementations.
+The tolerance is usually twice `long double epsilon`.
+
+# Comparison with calculation at long double precision.
+This often requires a slightly higher tolerance than two epsilon
+because of computational noise from round-off etc,
+especially when trig and other functions are called.
+
+# Comparison with independent published values,
+for example, using [@http://oeis.org/ The On-Line Encyclopedia of Integer Sequences (OEIS)]
+again using at least 35 decimal digits strings.
+
+# Comparison with independely calculated values using arbitrary precision tools like
+[@http://www.wolfram.com/mathematica/ Mathematica], again using at least 35 decimal digits literal strings.
+
+[warning We have not yet been able to [*check] that
+[*all] constants are accurate at the full arbitrary precision,
+at present 100 decimal digits.
+But certain key values like `e` and `pi` appear to be accurate
+and internal consistencies suggest that others are this accurate too.
+]
+
+[h4 Why is Portability important?]
+
+Code written using math constants is easily portable even when using different
+floating-point types with differing precision.
+
+It is a mistake to expect that results of computations will be [*identical], but
+you can achieve the [*best accuracy possible for the floating-point type in use].
+
+This has no extra cost to the user, but reduces irritating,
+and often confusing and very hard-to-trace effects,
+caused by the intrinsically limited precision of floating-point calculations.
+
+A harmless symptom of this limit is a spurious least-significant digit;
+at worst, slightly inaccurate constants sometimes cause iterating algorithms
+to diverge wildly because internal comparisons just fail.
+
+[h4 What is the Internal Format of the constants, and why?]
+
+See [link math_toolkit.tutorial tutorial] above for normal use,
+but this FAQ explains the internal details used for the constants.
+
+Constants are stored as 100 decimal digit values.
+However, some compilers do not accept decimal digits strings as long as this.
+So the constant is split into two parts, with the first containing at least
+128-bit long double precision (35 decimal digits),
+and for consistency should be in scientific format with a signed exponent.
+
+The second part is the value of the constant expressed as a string literal,
+accurate to at least 100 decimal digits (in practice that means at least 102 digits).
+Again for consistency use scientific format with a signed exponent.
+
+For types with precision greater than a long double,
+then if T is constructible `T `is constructible from a `const char*`
+then it's directly constructed from the string,
+otherwise we fall back on lexical_cast to convert to type `T`.
+(Using a string is necessary because you can't use a numeric constant
+since even a `long double` might not have enough digits).
+
+So, for example, a constant like pi is internally defined as
+
+ BOOST_DEFINE_MATH_CONSTANT(pi, 3.141592653589793238462643383279502884e+00, "3.14159265358979323846264338327950288419716939937510582097494459230781640628620899862803482534211706798214808651e+00");
+
+In this case the significand is 109 decimal digits, ensuring 100 decimal digits are exact, and exponent is zero.
+
+See [link math_toolkit.new_const defining new constants] to calculate new constants.
+
+A macro definition like this can be pasted into user code where convenient,
+or into `boost/math/constants.hpp` if it is to be added to the Boost.Math library.
+
+[h4 What Floating-point Types could I use?]
+
+Apart from the built-in floating-point types `float`, `double`, `long double`,
+there are several arbitrary precision floating-point classes available,
+but most are not licensed for commercial use.
+
+[h5 Boost.Multiprecision by Christopher Kormanyos]
+
+This work is based on an earlier work called e-float:
+Algorithm 910: A Portable C++ Multiple-Precision System for Special-Function Calculations,
+in ACM TOMS, {VOL 37, ISSUE 4, (February 2011)} (C) ACM, 2011.
+[@http://doi.acm.org/10.1145/1916461.1916469]
+[@https://svn.boost.org/svn/boost/sandbox/e_float/ e_float]
+but is now re-factored and available under the Boost license in the Boost-sandbox at
+[@https://svn.boost.org/svn/boost/sandbox/multiprecision/ multiprecision]
+where it is being refined and prepared for review.
+
+[h5 Boost.cpp_float by John Maddock using Expression Templates]
+
+[@https://svn.boost.org/svn/boost/sandbox/big_number/ Big Number]
+which is a reworking of [@https://svn.boost.org/svn/boost/sandbox/e_float/ e_float]
+by Christopher Kormanyos to use expression templates for faster execution.
+
+[h5 NTL class quad_float]
+
+[@http://shoup.net/ntl/ NTL] by Victor Shoup has fixed and arbitrary high precision fixed and floating-point types.
+However none of these are licenced for commercial use.
+
+ #include <NTL/quad_float.h> // quad precision 106-bit, about 32 decimal digits.
+ using NTL::to_quad_float; // Less precise than arbitrary precision NTL::RR.
+
+NTL class `quad_float`, which gives a form of quadruple precision,
+106-bit significand (but without an extended exponent range.)
+With an IEC559/IEEE 754 compatible processor,
+for example Intel X86 family, with 64-bit double, and 53-bit significand,
+using the significands of [*two] 64-bit doubles,
+if `std::numeric_limits<double>::digits10` is 16,
+then we get about twice the precision,
+so `std::numeric_limits<quad_float>::digits10()` should be 32.
+(the default `std::numeric_limits<RR>::digits10()` should be about 40).
+(which seems to agree with experiments).
+We output constants (including some noisy bits,
+an approximation to `std::numeric_limits<RR>::max_digits10()`)
+by adding 2 or 3 extra decimal digits, so using `quad_float::SetOutputPrecision(32 + 3);`
+
+Apple Mac/Darwin uses a similar ['doubledouble] 106-bit for its built-in `long double` type.
+
+[note The precision of all `doubledouble` floating-point types is rather odd and values given are only approximate.]
+
+[*New projects should use __multiprecision.]
+
+[h5 NTL class RR]
+
+Arbitrary precision floating point with NTL class RR,
+default is 150 bit (about 50 decimal digits)
+used here with 300 bit to output 100 decimal digits,
+enough for many practical non-'number-theoretic' C++ applications.
+
+__NTL is [*not licenced for commercial use].
+
+This class is used in Boost.Math and is an option when using big_number projects to calculate new math constants.
+
+[*New projects should use __multiprecision.]
+
+[h5 GMP and MPFR]
+
+[@http://gmplib.org GMP] and [@http://www.mpfr.org/ MPFR] have also been used to compute constants,
+but are licensed under the [@http://www.gnu.org/copyleft/lesser.html Lesser GPL license]
+and are [*not licensed for commercial use].
+
+[h4 What happened to a previous collection of constants proposed for Boost?]
+
+A review concluded that the way in which the constants were presented did not meet many peoples needs.
+None of the methods proposed met many users' essential requirement to allow writing simply `pi` rather than `pi()`.
+Many science and engineering equations look difficult to read when because function call brackets can be confused
+with the many other brackets often needed. All the methods then proposed of avoiding the brackets failed to meet all needs,
+often on grounds of complexity and lack of applicability to various realistic scenarios.
+
+So the simple namespace method, proposed on its own, but rejected at the first review,
+has been added to allow users to have convenient access to float, double and long double values,
+but combined with template struct and functions to allow simultaneous use
+with other non-built-in floating-point types.
+
+
+[h4 Why do the constants (internally) have a struct rather than a simple function?]
+
+A function mechanism was provided by in previous versions of Boost.Math.
+
+The new mechanism is to permit partial specialization. See Custom Specializing a constant above.
+It should also allow use with other packages like [@http://www.ttmath.org/ ttmath Bignum C++ library.]
+
+[h4 Where can I find other high precision constants?]
+
+# Constants with very high precision and good accuracy (>40 decimal digits)
+from Simon Plouffe's web based collection [@http://pi.lacim.uqam.ca/eng/].
+# [@https://oeis.org/ The On-Line Encyclopedia of Integer Sequences (OEIS)]
+# Checks using printed text optically scanned values and converted from:
+D. E. Knuth, Art of Computer Programming, Appendix A, Table 1, Vol 1, ISBN 0 201 89683 4 (1997)
+# M. Abrahamovitz & I. E. Stegun, National Bureau of Standards, Handbook of Mathematical Functions,
+a reference source for formulae now superceded by
+# Frank W. Olver, Daniel W. Lozier, Ronald F. Boisvert, Charles W. Clark, NIST Handbook of Mathemetical Functions, Cambridge University Press, ISBN 978-0-521-14063-8, 2010.
+# John F Hart, Computer Approximations, Kreiger (1978) ISBN 0 88275 642 7.
+# Some values from Cephes Mathematical Library, Stephen L. Moshier
+and CALC100 100 decimal digit Complex Variable Calculator Program, a DOS utility.
+# Xavier Gourdon, Pascal Sebah, 50 decimal digits constants at [@http://numbers.computation.free.fr/Constants/constants.html Number, constants and computation].
+
+[h4 Where are Physical Constants?]
+
+Not here in this Boost.Math collection, because physical constants:
+
+* Are measurements, not truely constants.
+* Are not truly constant and keeping changing as mensuration technology improves.
+* Have a instrinsic uncertainty.
+* Mathematical constants are stored and represented at varying precision, but should never be inaccurate.
+
+Some physical constants may be available in Boost.Units.
+
+[endsect] [/section:FAQ FAQ]
+
+[endmathpart] [/section:constants Mathematical Constants]
+
+[/
+ Copyright 2012 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
+
diff --git a/doc/constants/pp_pi.hpp b/doc/constants/pp_pi.hpp
new file mode 100644
index 0000000..db66d30
--- /dev/null
+++ b/doc/constants/pp_pi.hpp
@@ -0,0 +1,99 @@
+//[preprocessed_pi
+
+// Preprocessed pi constant, annotated.
+
+namespace boost
+{
+ namespace math
+ {
+ namespace constants
+ {
+ namespace detail
+ {
+ template <class T> struct constant_pi
+ {
+ private:
+ // Default implementations from string of decimal digits:
+ static inline T get_from_string()
+ {
+ static const T result
+ = detail::convert_from_string<T>("3.14159265358979323846264338327950288419716939937510582097494459230781640628620899862803482534211706798214808651e+00",
+ boost::is_convertible<const char*, T>());
+ return result;
+ }
+ template <int N> static T compute();
+
+ public:
+ // Default implementations from string of decimal digits:
+ static inline T get(const mpl::int_<construct_from_string>&)
+ {
+ constant_initializer<T, & constant_pi<T>::get_from_string >::do_nothing();
+ return get_from_string();
+ }
+ // Float, double and long double versions:
+ static inline T get(const mpl::int_<construct_from_float>)
+ {
+ return 3.141592653589793238462643383279502884e+00F;
+ }
+ static inline T get(const mpl::int_<construct_from_double>&)
+ {
+ return 3.141592653589793238462643383279502884e+00;
+ }
+ static inline T get(const mpl::int_<construct_from_long_double>&)
+ {
+ return 3.141592653589793238462643383279502884e+00L;
+ }
+ // For very high precision that is nonetheless can be calculated at compile time:
+ template <int N> static inline T get(const mpl::int_<N>& n)
+ {
+ constant_initializer2<T, N, & constant_pi<T>::template compute<N> >::do_nothing();
+ return compute<N>();
+ }
+ //For true arbitrary precision, which may well vary at runtime.
+ static inline T get(const mpl::int_<0>&)
+ {
+ return tools::digits<T>() > max_string_digits ? compute<0>() : get(mpl::int_<construct_from_string>());
+ }
+ }; // template <class T> struct constant_pi
+ } // namespace detail
+
+ // The actual forwarding function (including policy to control precision).
+ template <class T, class Policy> inline T pi( )
+ {
+ return detail:: constant_pi<T>::get(typename construction_traits<T, Policy>::type());
+ }
+ // The actual forwarding function (using default policy to control precision).
+ template <class T> inline T pi()
+ {
+ return pi<T, boost::math::policies::policy<> >()
+ }
+ } // namespace constants
+
+ // Namespace specific versions, for the three built-in floats:
+ namespace float_constants
+ {
+ static const float pi = 3.141592653589793238462643383279502884e+00F;
+ }
+ namespace double_constants
+ {
+ static const double pi = 3.141592653589793238462643383279502884e+00;
+ }
+ namespace long_double_constants
+ {
+ static const long double pi = 3.141592653589793238462643383279502884e+00L;
+ }
+ namespace constants{;
+ } // namespace constants
+ } // namespace math
+} // namespace boost
+
+//] [/preprocessed_pi]
+
+/*
+ Copyright 2012 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt)
+*/
+
+
diff --git a/doc/cstdfloat/cstdfloat.qbk b/doc/cstdfloat/cstdfloat.qbk
new file mode 100644
index 0000000..0c90e61
--- /dev/null
+++ b/doc/cstdfloat/cstdfloat.qbk
@@ -0,0 +1,551 @@
+[/cstdfloat.qbk Specified-width floating-point typedefs]
+
+[def __IEEE754 [@http://en.wikipedia.org/wiki/IEEE_floating_point IEEE_floating_point]]
+[def __N3626 [@http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2013/n3626.pdf N3626]]
+[def __N1703 [@http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1703.pdf N1703]]
+
+[import ../../example/cstdfloat_example.cpp]
+[import ../../example/normal_tables.cpp]
+[/Removed as unhelpful for C++ users, but might have use as a check that quadmath is available and linked OK.]
+[/import ../../example/quadmath_snprintf.c]
+
+[section:specified_typedefs Overview]
+
+The header `<boost/cstdfloat.hpp>` provides [*optional]
+standardized floating-point `typedef`s having [*specified widths].
+These are useful for writing portable code because they
+should behave identically on all platforms.
+These `typedef`s are the floating-point analog of specified-width integers in `<cstdint>` and `stdint.h`.
+
+The `typedef`s are based on __N3626
+proposed for a new C++14 standard header `<cstdfloat>` and
+__N1703 proposed for a new C language standard header `<stdfloat.h>`.
+
+All `typedef`s are in `namespace boost` (would be in namespace `std` if eventually standardized).
+
+The `typedef`s include `float16_t, float32_t, float64_t, float80_t, float128_t`,
+their corresponding least and fast types,
+and the corresponding maximum-width type.
+The `typedef`s are based on underlying built-in types
+such as `float`, `double`, or `long double`, or the proposed __short_float,
+or based on other compiler-specific non-standardized types such as `__float128`.
+The underlying types of these `typedef`s must conform with
+the corresponding specifications of binary16, binary32, binary64,
+and binary128 in __IEEE754 floating-point format, and
+`std::numeric_limits<>::is_iec559 == true`.
+
+The 128-bit floating-point type (of great interest in scientific and
+numeric programming) is not required in the Boost header,
+and may not be supplied for all platforms/compilers, because compiler
+support for a 128-bit floating-point type is not mandated by either
+the C standard or the C++ standard.
+
+If 128-bit floating-point is supported, then including `boost/cstdfloat.hpp`
+provides a [*native] 128-bit type, and
+includes other headers in folder `boost/math/cstdfloat` that provide C++
+quad support for __C_math in `<cmath>`, `<limits>`, `<iostream>`, `<complex>`,
+and the available floating-point types.
+
+One can also, more robustly, include `boost/multiprecision/float128.hpp`
+and this provides a thin wrapper selecting the appropriate 128-bit native type
+from `cstdfloat` if available, or else a 128-bit multiprecision type.
+
+See [link math_toolkit.examples.je_lambda Jahnke-Emden-Lambda function example]
+for an example using both a `<cmath>` function and a Boost.Math function
+to evaluate a moderately interesting function, the
+[@http://mathworld.wolfram.com/LambdaFunction.html Jahnke-Emden-Lambda function]
+and [link math_toolkit.examples.normal_table normal distribution]
+as an example of a statistical distribution from Boost.Math.
+
+[endsect] [/section:specified_typedefs Overview]
+
+[section:rationale Rationale]
+
+The implementation of `<boost/cstdfloat.hpp>` is designed to utilize `<float.h>`,
+defined in the 1989 C standard. The preprocessor is used to query certain
+preprocessor definitions in `<float.h>` such as FLT_MAX, DBL_MAX, etc.
+Based on the results of these queries, an attempt is made to automatically
+detect the presence of built-in floating-point types having specified widths.
+An unequivocal test requiring conformance with __IEEE754 (IEC599) based on
+[@ http://en.cppreference.com/w/cpp/types/numeric_limits/is_iec559 `std::numeric_limits<>::is_iec559`]
+is performed with `BOOST_STATIC_ASSERT`.
+
+In addition, this Boost implementation `<boost/cstdfloat.hpp>`
+supports an 80-bit floating-point `typedef` if it can be detected,
+and a 128-bit floating-point `typedef` if it can be detected,
+provided that the underlying types conform with
+[@http://en.wikipedia.org/wiki/Extended_precision IEEE-754 precision extension]
+(provided `std::numeric_limits<>::is_iec559 == true` for this type).
+
+The header `<boost/cstdfloat.hpp>` makes the standardized floating-point
+`typedef`s safely available in `namespace boost` without placing any names
+in `namespace std`. The intention is to complement rather than compete
+with a potential future C/C++ Standard Library that may contain these `typedef`s.
+Should some future C/C++ standard include `<stdfloat.h>` and `<cstdfloat>`,
+then `<boost/cstdfloat.hpp>` will continue to function, but will become redundant
+and may be safely deprecated.
+
+Because `<boost/cstdfloat.hpp>` is a Boost header, its name conforms to the
+boost header naming conventions, not the C++ Standard Library header
+naming conventions.
+
+[note
+`<boost/cstdfloat.hpp>` [*cannot synthesize or create
+a `typedef` if the underlying type is not provided by the compiler].
+For example, if a compiler does not have an underlying floating-point
+type with 128 bits (highly sought-after in scientific and numeric programming),
+then `float128_t` and its corresponding least and fast types are [*not]
+provided by `<boost/cstdfloat.hpp`>.]
+
+[warning If `<boost/cstdfloat.hpp>` uses a compiler-specific non-standardized type
+([*not] derived from `float, double,` or `long double`) for one or more
+of its floating-point `typedef`s, then there is no guarantee that
+specializations of `numeric_limits<>` will be available for these types.
+Typically, specializations of `numeric_limits<>` will only be available for these
+types if the compiler itself supports corresponding specializations
+for the underlying type(s), exceptions are GCC's `__float128` type and
+Intel's `_Quad` type which are explicitly supported via our own code.]
+
+[warning
+As an implementation artifact, certain C macro names from `<float.h>`
+may possibly be visible to users of `<boost/cstdfloat.hpp>`.
+Don't rely on using these macros; they are not part of any Boost-specified interface.
+Use `std::numeric_limits<>` for floating-point ranges, etc. instead.]
+
+[tip For best results, `<boost/cstdfloat.hpp>` should be `#include`d before
+other headers that define generic code making use of standard library functions
+defined in <cmath>.
+
+This is because `<boost/cstdfloat.hpp>` may define overloads of
+standard library functions where a non-standard type (i.e. other than
+`float`, `double`, or `long double`) is used for one of the specified
+width types. If generic code (for example in another Boost.Math header)
+calls a standard library function, then the correct overload will only be
+found if these overloads are defined prior to the point of use.
+See [link math_toolkit.float128.overloading overloading template functions with float128_t]
+and the implementation of `cstdfloat.hpp` for more details.
+
+For this reason, making `#include <boost/cstdfloat.hpp>` the [*first
+include] is usually best.
+]
+[endsect] [/section:rationale Rationale]
+
+[section:exact_typdefs Exact-Width Floating-Point `typedef`s]
+
+The `typedef float#_t`, with # replaced by the width, designates a
+floating-point type of exactly # bits. For example `float32_t` denotes
+a single-precision floating-point type with approximately
+7 decimal digits of precision (equivalent to binary32 in __IEEE754).
+
+Floating-point types in C and C++ are specified to be allowed to have
+(optionally) implementation-specific widths and formats.
+However, if a platform supports underlying
+floating-point types (conformant with __IEEE754) with widths of
+16, 32, 64, 80, 128 bits, or any combination thereof,
+then `<boost/cstdfloat.hpp>` does provide the corresponding `typedef`s
+`float16_t, float32_t, float64_t, float80_t, float128_t,`
+their corresponding least and fast types,
+and the corresponding maximum-width type.
+
+[h4 How to tell which widths are supported]
+
+The definition (or not) of a
+[link math_toolkit.macros floating-point constant macro]
+is a way to test if a [*specific width floating-point] is available on a platform.
+
+ #if defined(BOOST_FLOAT16_C)
+ // Can use boost::float16_t, perhaps a proposed __short_float.
+ // P0192R1, Adding Fundamental Type for Short Float,
+ // Boris Fomitchev, Sergei Nikolaev, Olivier Giroux, Lawrence Crowl, 2016 Feb14
+ // http://www.open-std.org/jtc1/sc22/wg14/www/docs/n2016.pdf
+ #endif
+
+ #if defined(BOOST_FLOAT32_C)
+ // Can use boost::float32_t, usually type `float`.
+ #endif
+
+ #if defined(BOOST_FLOAT64_C)
+ // Can use boost::float64_t, usually type `double`, and sometimes also type `long double`.
+ #endif
+
+ #if defined(BOOST_FLOAT80_C)
+ // Can use boost::float80_t, sometimes type `long double`.
+ #endif
+
+ #if defined(BOOST_FLOAT128_C)
+ // Can use boost::float128_t. Sometimes type `__float128` or `_Quad`.
+ #endif
+
+This can be used to write code which will compile and run (albeit differently) on several platforms.
+Without these tests, if a width, say `float128_t` is not supported, then compilation would fail.
+(It is, of course, rare for `float64_t` or `float32_t` not to be supported).
+
+The number of bits in just the significand can be determined using:
+
+ std::numeric_limits<boost::floatmax_t>::digits
+
+and from this one can safely infer the total number of bits because the type must be IEEE754 format,
+`std::numeric_limits<boost::floatmax_t>::is_iec559 == true`,
+so, for example, if `std::numeric_limits<boost::floatmax_t>::digits == 113`,
+then `floatmax_t` must be` float128_t`.
+
+The [*total] number of bits using `floatmax_t` can be found thus:
+
+[floatmax_1]
+
+and the number of 'guaranteed' decimal digits using
+
+ std::numeric_limits<boost::floatmax_t>::digits10
+
+and the maximum number of possibly significant decimal digits using
+
+ std::numeric_limits<boost::floatmax_t>::max_digits10
+
+[tip `max_digits10` is not always supported,
+but can be calculated at compile-time using the Kahan formula,
+`2 + binary_digits * 0.3010` which can be calculated [*at compile time] using
+`2 + binary_digits * 3010/10000`.
+]
+
+[note One could test that
+
+ std::is_same<boost::floatmax_t, boost::float128_t>::value == true
+
+but this would fail to compile on a platform where `boost::float128_t` is not defined.
+So it is better to use the MACROs `BOOST_FLOATnnn_C`. ]
+
+[endsect] [/section:exact_typdefs Exact-Width Floating-Point `typedef`s]
+
+[section:minimum_typdefs Minimum-width floating-point `typedef`s]
+
+The `typedef float_least#_t`, with # replaced by the width, designates a
+floating-point type with a [*width of at least # bits], such that no
+floating-point type with lesser size has at least the specified width.
+Thus, `float_least32_t` denotes the smallest floating-point type with
+a width of at least 32 bits.
+
+Minimum-width floating-point types are provided for all existing
+exact-width floating-point types on a given platform.
+
+For example, if a platform supports `float32_t` and `float64_t`,
+then `float_least32_t` and `float_least64_t` will also be supported, etc.
+
+[endsect] [/section:minimum_typdefs Minimum-width floating-point `typedef`s]
+
+[section:fastest_typdefs Fastest floating-point `typedef`s]
+
+The `typedef float_fast#_t`, with # replaced by the width, designates
+the [*fastest] floating-point type with a [*width of at least # bits].
+
+There is no absolute guarantee that these types are the fastest for all purposes.
+In any case, however, they satisfy the precision and width requirements.
+
+Fastest minimum-width floating-point types are provided for all existing
+exact-width floating-point types on a given platform.
+
+For example, if a platform supports `float32_t` and `float64_t`,
+then `float_fast32_t` and `float_fast64_t` will also be supported, etc.
+
+[endsect] [/section:fastest_typdefs Fastest floating-point `typedef`s]
+
+[section:greatest_typdefs Greatest-width floating-point typedef]
+
+The `typedef floatmax_t` designates a floating-point type capable of representing
+any value of any floating-point type in a given platform most precisely.
+
+The greatest-width `typedef` is provided for all platforms, but, of course, the size may vary.
+
+To provide floating-point [*constants] most precisely representable for a `floatmax_t` type,
+use the macro `BOOST_FLOATMAX_C`.
+
+For example, replace a constant `123.4567890123456789012345678901234567890` with
+
+ BOOST_FLOATMAX_C(123.4567890123456789012345678901234567890)
+
+If, for example, `floatmax_t` is `float64_t` then the result will be equivalent to a `long double` suffixed with L,
+but if `floatmax_t` is `float128_t` then the result will be equivalent to a `quad type` suffixed with Q
+(assuming, of course, that `float128_t` (`__float128` or `Quad`) is supported).
+
+If we display with `max_digits10`, the maximum possibly significant decimal digits:
+
+[floatmax_widths_1]
+
+then on a 128-bit platform (GCC 4.8.1 or higher with quadmath):
+
+[floatmax_widths_2]
+
+[endsect] [/section:greatest_typdefs Greatest-width floating-point typedef]
+
+[section:macros Floating-Point Constant Macros]
+
+All macros of the type `BOOST_FLOAT16_C, BOOST_FLOAT32_C, BOOST_FLOAT64_C,
+BOOST_FLOAT80_C, BOOST_FLOAT128_C, ` and `BOOST_FLOATMAX_C`
+are always defined after inclusion of `<boost/cstdfloat.hpp>`.
+
+[cstdfloat_constant_2]
+
+[tip Boost.Math provides many constants 'built-in', so always use Boost.Math constants if available, for example:]
+
+[cstdfloat_constant_1]
+
+from [@../../example/cstdfloat_example.cpp cstdfloat_example.cpp].
+
+See the complete list of __constants.
+
+[endsect] [/section:macros Floating-Point Constant Macros]
+
+[section:examples Examples]
+
+[h3:je_lambda Jahnke-Emden-Lambda function]
+
+The following code uses `<boost/cstdfloat.hpp>` in combination with
+`<boost/math/special_functions.hpp>` to compute a simplified
+version of the
+[@http://mathworld.wolfram.com/LambdaFunction.html Jahnke-Emden-Lambda function].
+Here, we specify a floating-point type with [*exactly 64 bits] (i.e., `float64_t`).
+If we were to use, for instance, built-in `double`,
+then there would be no guarantee that the code would
+behave identically on all platforms. With `float64_t` from
+`<boost/cstdfloat.hpp>`, however, it is very likely to be identical.
+
+Using `float64_t`, we know that
+this code is as portable as possible and uses a floating-point type
+with approximately 15 decimal digits of precision,
+regardless of the compiler or version or operating system.
+
+[cstdfloat_example_1]
+[cstdfloat_example_2]
+[cstdfloat_example_3]
+
+For details, see [@../../example/cstdfloat_example.cpp cstdfloat_example.cpp]
+- a extensive example program.
+
+[h3:normal_table Normal distribution table]
+
+This example shows printing tables of a normal distribution's PDF and CDF,
+using `boost::math` implementation of normal distribution.
+
+A function templated on floating-point type prints a table for a range of standard variate z values.
+
+The example shows use of the specified-width typedefs to either use a specific width,
+or to use the maximum available on the platform, perhaps a high as 128-bit.
+
+The number of digits displayed is controlled by the precision of the type,
+so there are no spurious insignificant decimal digits:
+
+ float_32_t 0 0.39894228
+ float_128_t 0 0.398942280401432702863218082711682655
+
+Some sample output for two different platforms is appended to the code at
+[@../../example/normal_tables.cpp normal_tables.cpp].
+
+[normal_table_1]
+
+[endsect] [/section:examples examples]
+
+[section:float128_hints Hints on using float128 (and __float128)]
+
+[h5:different_float128 __float128 versus float128]
+* __float128 is the (optionally) compiler supplied hardware type,
+it's an C-ish extension to C++ and there is only
+minimal support for it in normal C++
+(no IO streams or `numeric_limits` support,
+function names in libquadmath all have different names to the
+`std::` ones etc.)
+So you can program type `__float128` directly, but it's harder work.
+
+* Type `float128` uses __float128 and makes it C++ and generic code friendly,
+with all the usual standard `iostream`, `numeric_limits`, `complex` in namspace `std::` available,
+so strongly recommended for C++ use.
+
+[h5 Hints and tips]
+
+* Make sure you declare variables with the correct type, here `float128`.
+* Make sure that if you pass a variable to a function then it is casted to `float128`.
+* Make sure you declare literals with the correct suffix - otherwise
+they'll be treated as type `double` with catastrophic loss of precision.
+So make sure they have a Q suffix for 128-bit floating-point literals.
+* All the std library functions, cmath functions, plus all the constants, and special
+functions from Boost.Math should then just work.
+* Make sure std lib functions are called [*unqualified] so that the correct
+overload is found via __ADL. So write
+ sqrt(variable)
+and not
+ std::sqrt(variable).
+* In general, try not to reinvent stuff - using constants from
+Boost.Math is probably less error prone than declaring your own,
+likewise the special functions etc.
+
+Some examples of what can go horribly and silently wrong are at
+[@../../example/float128_example.cpp float128_example.cpp].
+
+[endsect] [/section:float128_hints Hints on using float128]
+
+[section:float128 Implementation of Float128 type]
+
+Since few compilers implement a true 128-bit floating-point, and language features like the suffix Q
+(which may need an option `-fext-numeric-literals` to enable),
+and C++ Standard library functions are as-yet missing or incomplete in C++11,
+this Boost.Math implementation wraps `__float128` provided by the GCC compiler
+[@https://gcc.gnu.org/onlinedocs/gcc/Floating-Types.html GCC floating-point types]
+or the `_Quad` type provided by the Intel compiler.
+
+This is provided to in order to demonstrate, and users to evaluate, the feasibility and benefits of higher-precision floating-point,
+especially to allow use of the full <cmath> and Boost.Math library of functions and distributions at high precision.
+
+(It is also possible to use Boost.Math with Boost.Multiprecision decimal and binary, but since these are entirely software solutions,
+allowing much higher precision or arbitrary precision, they are likely to be slower).
+
+We also provide (we believe full) support for `<limits>, <cmath>`, I/O stream operations in `<iostream>`, and `<complex>`.
+
+As a prototype for a future C++ standard, we place all these in `namespace std`.
+This contravenes the existing C++ standard of course, so selecting any compiler that promises to check conformance will fail.
+
+[tip For GCC, compile with `-std=gnu++11` or `-std=gnu++03` and do not use `-std=stdc++11` or any 'strict' options, as
+these turn off full support for `__float128`. These requirements also apply to the Intel compiler on Linux, for
+Intel on Windows you need to compile with `-Qoption,cpp,--extended_float_type -DBOOST_MATH_USE_FLOAT128` in order to
+activate 128-bit floating point support.]
+
+The `__float128` type is provided by the [@http://gcc.gnu.org/onlinedocs/libquadmath/ libquadmath library] on GCC or
+by Intel's FORTRAN library with Intel C++. THey also provide a full set of `<cmath>` functions in `namespace std`.
+
+[h4 Using C __float128 quadmath type]
+
+[quadmath_snprintf_1]
+
+The source code is at [@../../example/quadmath_snprintf.c quadmath_snprintf.c].
+
+[h4 Using C++ `float128` quadmath type]
+
+For C++ programs, you will want to use the C++ type `float128`
+
+See example at [@../../example/cstdfloat_example.cpp cstdfloat_example.cpp].
+
+A typical invocation of the compiler is
+
+ g++ -O3 -std=gnu++11 test.cpp -I/c/modular-boost -lquadmath -o test.exe
+
+[tip If you are trying to use the develop branch of Boost.Math, then make `-I/c/modular-boost/libs/math/include` the [*first] include directory.]
+
+ g++ -O3 -std=gnu++11 test.cpp -I/c/modular-boost/libs/math/include -I/c/modular-boost -lquadmath -o test.exe
+
+[note So far, the only missing detail that we had noted was in trying to use `<typeinfo>`,
+for example for `std::cout << typeid<__float_128>.name();`.
+``
+Link fails: undefined reference to typeinfo for __float128.
+``
+See [@http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43622 GCC Bug 43622 - no C++ typeinfo for __float128].
+But this is reported (Marc Glisse 2015-04-04 ) fixed in GCC 5 (and above).
+
+For example, with GCC6.1.1 this works as expected to a [*mangled] string name, and output (if possible - not always).
+``
+const std::type_info& tifu128 = typeid(__float128); // OK.
+//std::cout << tifu128.name() << std::endl; // On GCC, aborts (because not printable string).
+//std::cout << typeid(__float128).name() << std::endl; // Aborts - string name cannot be output.
+
+const std::type_info& tif128 = typeid(float128); // OK.
+std::cout << tif128.name() << std::endl; // OK.
+std::cout << typeid(float128).name() << std::endl; // OK.
+
+const std::type_info& tpi = typeid(pi1); // OK GCC 6.1.1 (from GCC 5 according to http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43622)
+std::cout << tpi.name() << std::endl; // Output mangled name:
+
+// N5boost14multiprecision6numberINS0_8backends16float128_backendELNS0_26expression_template_optionE0EEE
+
+``
+] [/note]
+
+[section:overloading Overloading template functions with float128_t]
+
+An artifact of providing C++ standard library support for
+quadmath may mandate the inclusion of `<boost/cstdfloat.hpp>`
+[*before] the inclusion of other headers.
+
+Consider a function that calls `fabs(x)` and has previously injected `std::fabs()`
+into local scope via a `using` directive:
+
+ template <class T>
+ bool unsigned_compare(T a, T b)
+ {
+ using std::fabs;
+ return fabs(a) == fabs(b);
+ }
+
+In this function, the correct overload of `fabs` may be found via
+[@http://en.wikipedia.org/wiki/Argument-dependent_name_lookup argument-dependent-lookup (ADL)]
+or by calling one of the `std::fabs` overloads. There is a key difference between them
+however: an overload in the same namespace as T and found via ADL need ['[*not be defined at the
+time the function is declared]]. However, all the types declared in `<boost/cstdfloat.hpp>` are
+fundamental types, so for these types we are relying on finding an overload declared in namespace `std`.
+In that case however, ['[*all such overloads must be declared prior to the definition of function
+`unsigned_compare` otherwise they are not considered]].
+
+In the event that `<boost/cstdfloat.hpp>` has been included [*after] the
+definition of the above function, the correct overload of `fabs`, while present, is simply
+not considered as part of the overload set.
+So the compiler tries to downcast the `float128_t` argument first to
+`long double`, then to `double`, then to `float`;
+the compilation fails because the result is ambiguous.
+However the compiler error message will appear cruelly inscrutable,
+at an apparently irelevant line number and making no mention of `float128`:
+the word ['ambiguous] is the clue to what is wrong.
+
+Provided you `#include <boost/cstdfloat.hpp>` [*before] the inclusion
+of the any header containing generic floating point code (such as other
+Boost.Math headers, then the compiler
+will know about and use the `std::fabs(std::float128_t)`
+that we provide in `#include <boost/cstdfloat.hpp>`.
+
+[endsect]
+
+[section:exp_function Exponential function]
+
+There was a bug when using any quadmath `expq` function on GCC :
+[@http://gcc.gnu.org/bugzilla/show_bug.cgi?id=60349 GCC bug #60349]
+caused by
+[@http://sourceforge.net/p/mingw-w64/bugs/368/ mingw-64 bug #368].
+
+To work round this defect, an alternative implementation of 128-bit exp
+was temporarily provided by `boost/cstdfloat.hpp`.
+
+The mingw bug was fixed at 2014-03-12 and GCC 6.1.1 now works as expected.
+
+[tip It is essential to link to the quadmath library].
+
+[endsect] [/section:exp_function exp function]
+
+[section:typeinfo `typeinfo`]
+
+For GCC 4.8.1 it was not yet possible to use `typeinfo` for `float_128` on GCC:
+see [@http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43622 GCC 43622].
+
+So this code (to display the mangled name)
+failed to link `undefined reference to typeinfo for __float128`
+
+ std::cout << typeid(boost::float128_t).name() << std::endl;
+
+This prevent using the existing tests for Boost.Math distributions,
+(unless a few lines are commented out)
+and if a MACRO BOOST_MATH_INSTRUMENT controlling them is defined
+then some diagnostic displays in Boost.Math will not work.
+
+However this was only used for display purposes
+and could be commented out until this was fixed in GCC 5.
+
+[tip Not all managed names can be [*displayed] using `std::cout`.]
+
+[endsect] [/section:typeinfo `typeinfo`]
+
+
+[endsect] [/section:float128 Float128 type]
+
+[/ cstdfloat.qbk
+ Copyright 2014 Christopher Kormanyos, John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
+
+
+
diff --git a/doc/cstdfloat/cstdfloat_header.qbk b/doc/cstdfloat/cstdfloat_header.qbk
new file mode 100644
index 0000000..2a4d7e9
--- /dev/null
+++ b/doc/cstdfloat/cstdfloat_header.qbk
@@ -0,0 +1,29 @@
+[article Standardized Floating-Point typedefs for C and C++
+ [id float_t]
+ [quickbook 1.6]
+ [copyright 2014 Christopher Kormanyos, John Maddock, Paul A. Bristow]
+ [license
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ [@http://www.boost.org/LICENSE_1_0.txt])
+ ]
+ [authors [Kormanyos, Christopher], [Maddock, John], [Bristow, Paul A.] ]
+ [/last-revision $Date$]
+ [/version 1.8.3]
+]
+
+[def __IEEE754 [@http://en.wikipedia.org/wiki/IEEE_floating_point IEEE_floating_point]]
+[def __N3626 [@http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2013/n3626.pdf N3626]]
+[def __N1703 [@http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1703.pdf N1703]]
+
+[note A printer-friendly PDF version of this manual is also available.]
+
+[include cstdfloat.qbk]
+
+[/ cstdfloat_header.qbk
+ Copyright 2014 Christopher Kormanyos, John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/cstdfloat/jamfile.v2 b/doc/cstdfloat/jamfile.v2
new file mode 100644
index 0000000..f0be5d8
--- /dev/null
+++ b/doc/cstdfloat/jamfile.v2
@@ -0,0 +1,109 @@
+# Boost.cstdfloat documentation Jamfile.v2
+#
+# Copyright Paul A. Bristow 2014.
+# Use, modification and distribution is subject to
+# the Boost Software License, Version 1.0.
+# (See accompanying file LICENSE_1_0.txt or copy at
+# http://www.boost.org/LICENSE_1_0.txt)
+
+# boost-no-inspect
+
+# This builds a standalone version of the cstdfloat docs
+# using the same cstdfloat.qbk called from cstdfloat_header.qbk
+# The full math only uses cstdfloat.qbk.
+
+import modules ;
+
+path-constant images_location : html ;
+path-constant nav_images : html/images ;
+path-constant here : . ;
+using quickbook ;
+
+xml cstdfloat : cstdfloat_header.qbk ;
+
+using boostbook ;
+
+boostbook standalone
+ :
+ cstdfloat
+ :
+
+ # General settings
+ # =================
+ # Path for links to Boost folder, for example: boost_1_55_0 or boost-trunk, relative to folder /doc/html.
+ <xsl:param>boost.root=../../../../..
+ # Path for libraries index:
+ <xsl:param>boost.libraries=../../../../../../libs/libraries.htm
+
+
+ # Or a local custom stylesheet:
+ #<xsl:param>html.stylesheet=boostbook.css
+ <xsl:param>html.stylesheet=boostbook.css
+
+ #<xsl:param>nav.layout=none # No navigation bar (home, prev, next).
+ # Defining creates a runtime error: Global parameter nav.layout already defined.
+ <xsl:param>nav.layout=horizontal # to get a horizontal navigation bar (you probably DO want this).
+
+ # Path for links to Boost logo.
+ #<xsl:param>boost.image=Boost # options are: none (no logo), Boost (for boost.png), or your own logo, for example, inspired_by_boost.png
+ #<xsl:param>boost.image.src=boost.png #
+ #<xsl:param>boost.image.w=180 # Width of logo in pixels. (JM has W = 162, h = 46)
+ #<xsl:param>boost.image.h=90 # Height of logo in pixels.
+
+ # Some general style settings:
+ <xsl:param>table.footnote.number.format=1
+ <xsl:param>footnote.number.format=1
+
+ # HTML options first:
+ # Use graphics not text for navigation:
+ <xsl:param>navig.graphics=1
+ # How far down we chunk nested sections, basically all of them:
+ <xsl:param>chunk.section.depth=10
+ # Don't put the first section on the same page as the TOC:
+ <xsl:param>chunk.first.sections=1
+ # How far down sections get TOC's
+ <xsl:param>toc.section.depth=10
+ # Max depth in each TOC:
+ <xsl:param>toc.max.depth=4
+ # How far down we go with TOC's
+ <xsl:param>generate.section.toc.level=10
+ # Index on type:
+ <xsl:param>index.on.type=1
+ <xsl:param>boost.noexpand.chapter.toc=1
+
+ #<xsl:param>root.filename="sf_dist_and_tools"
+ #<xsl:param>graphicsize.extension=1
+ #<xsl:param>use.extensions=1
+
+ # PDF Options:
+ # TOC Generation: this is needed for FOP-0.9 and later:
+ <xsl:param>fop1.extensions=0
+ <format>pdf:<xsl:param>xep.extensions=1
+ # TOC generation: this is needed for FOP 0.2, but must not be set to zero for FOP-0.9!
+ <format>pdf:<xsl:param>fop.extensions=0
+ <format>pdf:<xsl:param>fop1.extensions=0
+ # No indent on body text:
+ <format>pdf:<xsl:param>body.start.indent=0pt
+ # Margin size:
+ <format>pdf:<xsl:param>page.margin.inner=0.5in
+ # Margin size:
+ <format>pdf:<xsl:param>page.margin.outer=0.5in
+ # Paper type = A4
+ <format>pdf:<xsl:param>paper.type=A4
+ # Yes, we want graphics for admonishments:
+ <xsl:param>admon.graphics=1
+ # Set this one for PDF generation *only*:
+ # default pnd graphics are awful in PDF form,
+ # better use SVG's instead:
+ <format>pdf:<xsl:param>admon.graphics.extension=".svg"
+ <format>pdf:<xsl:param>use.role.for.mediaobject=1
+ <format>pdf:<xsl:param>preferred.mediaobject.role=print
+ <format>pdf:<xsl:param>img.src.path=$(images_location)/
+ <format>pdf:<xsl:param>draft.mode="no"
+ <format>pdf:<xsl:param>boost.url.prefix=http://www.boost.org/doc/libs/release/libs/math/doc/html
+ <format>pdf:<xsl:param>index.on.type=1
+ ;
+
+
+install pdf-install : standalone : <install-type>PDF <location>. <name>cstdfloat.pdf ;
+
diff --git a/doc/differentiation/numerical_differentiation.qbk b/doc/differentiation/numerical_differentiation.qbk
new file mode 100644
index 0000000..f04161f
--- /dev/null
+++ b/doc/differentiation/numerical_differentiation.qbk
@@ -0,0 +1,108 @@
+[/
+Copyright (c) 2018 Nick Thompson
+Use, modification and distribution are subject to the
+Boost Software License, Version 1.0. (See accompanying file
+LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+]
+
+[section:diff Numerical Differentiation]
+
+[heading Synopsis]
+
+``
+#include <boost/math/tools/numerical_differentiation.hpp>
+
+
+namespace boost::math::tools {
+
+ template <class F, class Real>
+ Real complex_step_derivative(const F f, Real x);
+
+ template <class F, class Real, size_t order = 6>
+ Real finite_difference_derivative(const F f, Real x, Real* error = nullptr);
+
+} // namespaces
+``
+
+[heading Description]
+
+The function `finite_difference_derivative` calculates a finite-difference approximation to the derivative of of a function /f/ at point /x/.
+A basic usage is
+
+ auto f = [](double x) { return std::exp(x); };
+ double x = 1.7;
+ double dfdx = finite_difference_derivative(f, x);
+
+Finite differencing is complicated, as finite-difference approximations to the derivative are /infinitely/ ill-conditioned.
+In addition, for any function implemented in finite-precision arithmetic, the "true" derivative is /zero/ almost everywhere, and undefined at representables.
+However, some tricks allow for reasonable results to be obtained in many cases.
+
+There are two sources of error from finite differences: One, the truncation error arising from using a finite number of samples to cancel out higher order terms in the Taylor series.
+The second is the roundoff error involved in evaluating the function.
+The truncation error goes to zero as /h/ \u2192 0, but the roundoff error becomes unbounded.
+By balancing these two sources of error, we can choose a value of /h/ that minimizes the maximum total error.
+For this reason boost's `finite_difference_derivative` does not require the user to input a stepsize.
+For more details about the theoretical error analysis involved in finite-difference approximations to the derivative, see [@http://web.archive.org/web/20150420195907/http://www.uio.no/studier/emner/matnat/math/MAT-INF1100/h08/kompendiet/diffint.pdf here].
+
+Despite the effort that has went into choosing a reasonable value of /h/, the problem is still fundamentally ill-conditioned, and hence an error estimate is essential.
+It can be queried as follows
+
+ double error_estimate;
+ double d = finite_difference_derivative(f, x, &error_estimate);
+
+N.B.: Producing an error estimate requires additional function evaluations and as such is slower than simple evaluation of the derivative.
+It also expands the domain over which the function must be differentiable and requires the function to have two more continuous derivatives.
+The error estimate is computed under the assumption that /f/ is evaluated to 1ULP.
+This might seem an extreme assumption, but it is the only sensible one, as the routine cannot know the functions rounding error.
+If the function cannot be evaluated with very great accuracy, Lanczos's smoothing differentiation is recommended as an alternative.
+
+The default order of accuracy is 6, which reflects that fact that people tend to be interested in functions with many continuous derivatives.
+If your function does not have 7 continuous derivatives, is may be of interest to use a lower order method, which can be achieved via (say)
+
+ double d = finite_difference_derivative<decltype(f), Real, 2>(f, x);
+
+This requests a second-order accurate derivative be computed.
+
+It is emphatically /not/ the case that higher order methods always give higher accuracy for smooth functions.
+Higher order methods require more addition of positive and negative terms, which can lead to catastrophic cancellation.
+A function which is very good at making a mockery of finite-difference differentiation is exp(x)/(cos(x)[super 3] + sin(x)[super 3]).
+Differentiating this function by `finite_difference_derivative` in double precision at /x=5.5/ gives zero correct digits at order 4, 6, and 8, but recovers 5 correct digits at order 2.
+These are dangerous waters; use the error estimates to tread carefully.
+
+For a finite-difference method of order /k/, the error is /C/ \u03B5[super k/k+1].
+In the limit /k/ \u2192 \u221E, we see that the error tends to \u03B5, recovering the full precision for the type.
+However, this ignores the fact that higher-order methods require subtracting more nearly-equal (perhaps noisy) terms, so the constant /C/ grows with /k/.
+Since /C/ grows quickly and \u03B5[super k/k+1] approaches \u03B5 slowly, we can see there is a compromise between high-order accuracy and conditioning of the difference quotient.
+In practice we have found that /k=6/ seems to be a good compromise between the two (and have made this the default), but users are encouraged to examine the error estimates to choose an optimal order of accuracy for the given problem.
+
+[table:id Cost of Finite-Difference Numerical Differentiation
+ [[Order of Accuracy] [Function Evaluations] [Error] [Continuous Derivatives Required for Error Estimate to Hold] [Additional Function Evaluations to Produce Error Estimates]]
+ [[1] [2] [\u03B5[super 1/2]] [2] [1]]
+ [[2] [2] [\u03B5[super 2/3]] [3] [2]]
+ [[4] [4] [\u03B5[super 4/5]] [5] [2]]
+ [[6] [6] [\u03B5[super 6/7]] [7] [2]]
+ [[8] [8] [\u03B5[super 8/9]] [9] [2]]
+]
+
+
+Given all the caveats which must be kept in mind for successful use of finite-difference differentiation, it is reasonable to try to avoid it if possible.
+Boost provides two possibilities: The Chebyshev transform (see [link math_toolkit.sf_poly.chebyshev here]) and the complex step derivative.
+If your function is the restriction to the real line of a holomorphic function which takes real values at real argument, then the *complex step derivative* can be used.
+The idea is very simple: Since /f/ is complex-differentiable, ['f(x+\u2148 h) = f(x) + \u2148 hf'(x) - h[super 2]f''(x) + [bigo](h[super 3])].
+As long as /f(x)/ \u2208 \u211D, then ['f'(x) = \u2111 f(x+\u2148 h)/h + [bigo](h[super 2])].
+This method requires a single complex function evaluation and is not subject to the catastrophic subtractive cancellation that plagues finite-difference calculations.
+
+An example usage:
+
+ double x = 7.2;
+ double e_prime = complex_step_derivative(std::exp<std::complex<double>>, x);
+
+References:
+
+Squire, William, and George Trapp. ['Using complex variables to estimate derivatives of real functions.] Siam Review 40.1 (1998): 110-112.
+
+Fornberg, Bengt. ['Generation of finite difference formulas on arbitrarily spaced grids.] Mathematics of computation 51.184 (1988): 699-706.
+
+Corless, Robert M., and Nicolas Fillion. ['A graduate introduction to numerical methods.] AMC 10 (2013): 12.
+
+[endsect]
diff --git a/doc/distexplorer/Jamfile.v2 b/doc/distexplorer/Jamfile.v2
new file mode 100644
index 0000000..f318925
--- /dev/null
+++ b/doc/distexplorer/Jamfile.v2
@@ -0,0 +1,56 @@
+# Copyright Paul A. Bristow 2008
+# Copyright John Maddock 2008
+
+# Distributed under the Boost Software License, Version 1.0.
+# (See accompanying file LICENSE_1_0.txt
+# or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+# Reminder: whitespace MUST terminate variable name!
+# so space BEFORE ; and :
+
+# Distexplorer documentation as html from Quickbook.
+
+# project boost/doc ;
+
+using quickbook ;
+
+#path-constant images_location : html ;
+# location of SVG images referenced by Quickbook.
+# screenshots installed as recomended by Sourceforge.
+
+xml distexplorer
+ :
+ distexplorer.qbk
+ :
+;
+
+# import boostbook : boostbook ;
+
+boostbook standalone
+ :
+ distexplorer
+ :
+ # Path for links to Boost:
+ <xsl:param>boost.root=../../../../..
+
+ # Some general style settings:
+ <xsl:param>table.footnote.number.format=1
+ <xsl:param>footnote.number.format=1
+
+ # HTML options first:
+ # Use graphics not text for navigation:
+ <xsl:param>navig.graphics=1
+ # How far down we chunk nested sections, basically all of them:
+ <xsl:param>chunk.section.depth=10
+ # Don't put the first section on the same page as the TOC:
+ <xsl:param>chunk.first.sections=1
+ # How far down sections get TOC's
+ <xsl:param>toc.section.depth=10
+ # Max depth in each TOC:
+ <xsl:param>toc.max.depth=4
+ # How far down we go with TOC's
+ <xsl:param>generate.section.toc.level=10
+ #<xsl:param>root.filename="distexplorer"
+;
+
+
diff --git a/doc/distexplorer/distexplorer.qbk b/doc/distexplorer/distexplorer.qbk
new file mode 100644
index 0000000..7dbe951
--- /dev/null
+++ b/doc/distexplorer/distexplorer.qbk
@@ -0,0 +1,105 @@
+[article Statistical Distribution Explorer
+ [quickbook 1.4]
+ [copyright 2008 Paul A. Bristow, John Maddock]
+ [license
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ [@http://www.boost.org/LICENSE_1_0.txt])
+ ]
+ [authors [Bristow, Paul A.], [Maddock, John]]
+ [category math]
+ [purpose mathematics]
+ [/last-revision $Date$]
+]
+
+A Windows utility to show the properties of statistical distributions
+using parameters provided interactively by the user.
+
+The distributions provided are:
+
+*bernoulli
+*beta_distribution
+*binomial_distribution
+*cauchy
+*chi_squared
+*exponential
+*extreme_value
+*fisher_f
+*gamma_distribution
+*lognormal_distribution
+*negative_binomial_distribution
+*normal_distribution
+*pareto
+*poisson
+*rayleigh
+*students_t
+*triangular
+*uniform
+*weibull
+
+Properties of distributions computed are:
+
+*mean
+*mode
+*median
+*variance
+*standard deviation
+*coefficient of variation,
+*skewness
+*kurtosis
+*excess
+*range supported
+
+Calculated, from values provided, are:
+
+*probability density (or mass) function (PDF)
+*cumulative distribution function (CDF), and complement
+*Quantiles (percentiles or fractiles) are calculated for typical risk (alpha) probabilities (0.001, 0.01, 0.5, 0.1, 0.333)
+and for additional probabilities provided by the user.
+
+Results can be saved to text files using Save or SaveAs.
+All the values on the four tabs are output to the file chosen,
+and are tab separated to assist input to other programs,
+for example, spreadsheets or text editors.
+
+Note: Excel (for example), only shows 10 decimal digits, by default:
+to display the maximum possible precision (abotu 15 decimal digits),
+it is necessary to format all cells to display this precision.
+Although unusually accurate, not all values computed by Distexplorer will be as accurate as this.
+Values shown as NaN cannot be calculated from the value(s) given,
+most commonly because the value input is outside the range for the distribution.
+
+For more information, including downloads, see
+
+[@http://distexplorer.sourceforge.net/ Distexplorer at Sourceforge]
+
+This Microsoft Windows 32 package distribution.exe
+was generated from a C# program
+and uses a boost_math.dll generated using the
+Boost.Math C++ source code from the Boost.Math Toolkit, compiled in CLI mode,
+containing the underlying statistical distribution classes and functions.
+
+All source code is freely available for view and use under the
+[@http://www.boost.org/LICENSE_1_0.txt Boost Open Source License].
+
+[@https://svn.boost.org/svn/boost/sandbox\math_toolkit\libs\math\dot_net_example
+Math Toolkit C++ source code]
+to produce boost_math.dll is in the most recent [@http://www.boost.org Boost] release, initially 1.35.0.
+
+It is distributed as a single Windows Installer package Setupdistex.msi.
+Unzip the distexplorer.zip to a temporary location of your choice and run setup.exe.
+
+(Note that .NET framework 2.0 and VCredist are requirements for this program.
+Most recent and updated Windows environments will already have these,
+but they are quickly, easily and safely installed from the Microsoft site if required.)
+
+(The package cannot be run on other platforms at present but it should be possible
+to build an equivalent utility on any C/C++ platform if anyone would like to undertake this task.)
+
+[/ Distexplorer.qbk
+ Copyright 2008 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distexplorer/html/index.html b/doc/distexplorer/html/index.html
new file mode 100644
index 0000000..3c53799
--- /dev/null
+++ b/doc/distexplorer/html/index.html
@@ -0,0 +1,216 @@
+<html>
+<head>
+<meta http-equiv="Content-Type" content="text/html; charset=US-ASCII">
+<title>Statistical Distribution Explorer</title>
+<link rel="stylesheet" href="../../../../../doc/src/boostbook.css" type="text/css">
+<meta name="generator" content="DocBook XSL Stylesheets V1.75.2">
+<link rel="home" href="index.html" title="Statistical Distribution Explorer">
+</head>
+<body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF">
+<table cellpadding="2" width="100%"><tr>
+<td valign="top"><img alt="Boost C++ Libraries" width="277" height="86" src="../../../../../boost.png"></td>
+<td align="center"><a href="../../../../../index.html">Home</a></td>
+<td align="center"><a href="../../../../../libs/libraries.htm">Libraries</a></td>
+<td align="center"><a href="http://www.boost.org/users/people.html">People</a></td>
+<td align="center"><a href="http://www.boost.org/users/faq.html">FAQ</a></td>
+<td align="center"><a href="../../../../../more/index.htm">More</a></td>
+</tr></table>
+<hr>
+<div class="spirit-nav"></div>
+<div class="article">
+<div class="titlepage">
+<div>
+<div><h2 class="title">
+<a name="statistical_distribution_explorer"></a>Statistical Distribution Explorer</h2></div>
+<div><div class="authorgroup">
+<div class="author"><h3 class="author">
+<span class="firstname">Paul A.</span> <span class="surname">Bristow</span>
+</h3></div>
+<div class="author"><h3 class="author">
+<span class="firstname">John</span> <span class="surname">Maddock</span>
+</h3></div>
+</div></div>
+<div><p class="copyright">Copyright © 2008 Paul A. Bristow, John Maddock</p></div>
+<div><div class="legalnotice">
+<a name="id759711"></a><p>
+ Distributed under the Boost Software License, Version 1.0. (See accompanying
+ file LICENSE_1_0.txt or copy at <a href="http://www.boost.org/LICENSE_1_0.txt" target="_top">http://www.boost.org/LICENSE_1_0.txt</a>)
+ </p>
+</div></div>
+</div>
+<hr>
+</div>
+<p>
+ A Windows utility to show the properties of statistical distributions using parameters
+ provided interactively by the user.
+ </p>
+<p>
+ The distributions provided are:
+ </p>
+<div class="itemizedlist"><ul class="itemizedlist" type="disc">
+<li class="listitem">
+ bernoulli
+ </li>
+<li class="listitem">
+ beta_distribution
+ </li>
+<li class="listitem">
+ binomial_distribution
+ </li>
+<li class="listitem">
+ cauchy
+ </li>
+<li class="listitem">
+ chi_squared
+ </li>
+<li class="listitem">
+ exponential
+ </li>
+<li class="listitem">
+ extreme_value
+ </li>
+<li class="listitem">
+ fisher_f
+ </li>
+<li class="listitem">
+ gamma_distribution
+ </li>
+<li class="listitem">
+ lognormal_distribution
+ </li>
+<li class="listitem">
+ negative_binomial_distribution
+ </li>
+<li class="listitem">
+ normal_distribution
+ </li>
+<li class="listitem">
+ pareto
+ </li>
+<li class="listitem">
+ poisson
+ </li>
+<li class="listitem">
+ rayleigh
+ </li>
+<li class="listitem">
+ students_t
+ </li>
+<li class="listitem">
+ triangular
+ </li>
+<li class="listitem">
+ uniform
+ </li>
+<li class="listitem">
+ weibull
+ </li>
+</ul></div>
+<p>
+ Properties of distributions computed are:
+ </p>
+<div class="itemizedlist"><ul class="itemizedlist" type="disc">
+<li class="listitem">
+ mean
+ </li>
+<li class="listitem">
+ mode
+ </li>
+<li class="listitem">
+ median
+ </li>
+<li class="listitem">
+ variance
+ </li>
+<li class="listitem">
+ standard deviation
+ </li>
+<li class="listitem">
+ coefficient of variation,
+ </li>
+<li class="listitem">
+ skewness
+ </li>
+<li class="listitem">
+ kurtosis
+ </li>
+<li class="listitem">
+ excess
+ </li>
+<li class="listitem">
+ range supported
+ </li>
+</ul></div>
+<p>
+ Calculated, from values provided, are:
+ </p>
+<div class="itemizedlist"><ul class="itemizedlist" type="disc">
+<li class="listitem">
+ probability density (or mass) function (PDF)
+ </li>
+<li class="listitem">
+ cumulative distribution function (CDF), and complement
+ </li>
+<li class="listitem">
+ Quantiles (percentiles) are calculated for typical risk (alpha) probabilities
+ (0.001, 0.01, 0.5, 0.1, 0.333) and for additional probabilities provided
+ by the user.
+ </li>
+</ul></div>
+<p>
+ Results can be saved to text files using Save or SaveAs. All the values on the
+ four tabs are output to the file chosen, and are tab separated to assist input
+ to other programs, for example, spreadsheets or text editors.
+ </p>
+<p>
+ Note: Excel (for example), only shows 10 decimal digits, by default: to display
+ the maximum possible precision (abotu 15 decimal digits), it is necessary to
+ format all cells to display this precision. Although unusually accurate, not
+ all values computed by Distexplorer will be as accurate as this. Values shown
+ as NaN cannot be calculated from the value(s) given, most commonly because the
+ value input is outside the range for the distribution.
+ </p>
+<p>
+ For more information, including downloads, see
+ </p>
+<p>
+ <a href="http://distexplorer.sourceforge.net/" target="_top">Distexplorer at Sourceforge</a>
+ </p>
+<p>
+ This Microsoft Windows 32 package distribution.exe was generated from a C# program
+ and uses a boost_math.dll generated using the Boost.Math C++ source code from
+ the Boost.Math Toolkit, compiled in CLI mode, containing the underlying statistical
+ distribution classes and functions.
+ </p>
+<p>
+ All source code is freely available for view and use under the <a href="http://www.boost.org/LICENSE_1_0.txt" target="_top">Boost
+ Open Source License</a>.
+ </p>
+<p>
+ <a href="https://svn.boost.org/svn/boost/sandbox%5Cmath_toolkit%5Clibs%5Cmath%5Cdot_net_example" target="_top">Math
+ Toolkit C++ source code</a> to produce boost_math.dll is in the most recent
+ <a href="http://www.boost.org" target="_top">Boost</a> release, initially 1.35.0.
+ </p>
+<p>
+ It is distributed as a single Windows Installer package Setupdistex.msi. Unzip
+ the distexplorer.zip to a temporary location of your choice and run setup.exe.
+ </p>
+<p>
+ (Note that .NET framework 2.0 and VCredist are requirements for this program.
+ Most recent and updated Windows environments will already have these, but they
+ are quickly, easily and safely installed from the Microsoft site if required.)
+ </p>
+<p>
+ (The package cannot be run on other platforms at present but it should be possible
+ to build an equivalent utility on any C/C++ platform if anyone would like to
+ undertake this task.)
+ </p>
+</div>
+<table xmlns:rev="http://www.cs.rpi.edu/~gregod/boost/tools/doc/revision" width="100%"><tr>
+<td align="left"><p><small>Last revised: July 01, 2010 at 21:45:23 GMT</small></p></td>
+<td align="right"><div class="copyright-footer"></div></td>
+</tr></table>
+<hr>
+<div class="spirit-nav"></div>
+</body>
+</html>
diff --git a/doc/distributions/arcsine.qbk b/doc/distributions/arcsine.qbk
new file mode 100644
index 0000000..8dc731e
--- /dev/null
+++ b/doc/distributions/arcsine.qbk
@@ -0,0 +1,288 @@
+[section:arcine_dist Arcsine Distribution]
+
+[import ../../example/arcsine_example.cpp] [/ for arcsine snips below]
+
+
+``#include <boost/math/distributions/arcsine.hpp>``
+
+ namespace boost{ namespace math{
+
+ template <class RealType = double,
+ class ``__Policy`` = ``__policy_class`` >
+ class arcsine_distribution;
+
+ typedef arcsine_distribution<double> arcsine; // double precision standard arcsine distribution [0,1].
+
+ template <class RealType, class ``__Policy``>
+ class arcsine_distribution
+ {
+ public:
+ typedef RealType value_type;
+ typedef Policy policy_type;
+
+ // Constructor from two range parameters, x_min and x_max:
+ arcsine_distribution(RealType x_min, RealType x_max);
+
+ // Range Parameter accessors:
+ RealType x_min() const;
+ RealType x_max() const;
+ };
+ }} // namespaces
+
+The class type `arcsine_distribution` represents an
+[@http://en.wikipedia.org/wiki/arcsine_distribution arcsine]
+[@http://en.wikipedia.org/wiki/Probability_distribution probability distribution function].
+The arcsine distribution is named because its CDF uses the inverse sin[super -1] or arcsine.
+
+This is implemented as a generalized version with support from ['x_min] to ['x_max]
+providing the 'standard arcsine distribution' as default with ['x_min = 0] and ['x_max = 1].
+(A few make other choices for 'standard').
+
+The arcsine distribution is generalized to include any bounded support ['a <= x <= b] by
+[@http://reference.wolfram.com/language/ref/ArcSinDistribution.html Wolfram] and
+[@http://en.wikipedia.org/wiki/arcsine_distribution Wikipedia],
+but also using ['location] and ['scale] parameters by
+[@http://www.math.uah.edu/stat/index.html Virtual Laboratories in Probability and Statistics]
+[@http://www.math.uah.edu/stat/special/Arcsine.html Arcsine distribution].
+The end-point version is simpler and more obvious, so we implement that.
+If desired, [@http://en.wikipedia.org/wiki/arcsine_distribution this]
+outlines how the __beta_distrib can be used to add a shape factor.
+
+The [@http://en.wikipedia.org/wiki/Probability_density_function probability density function PDF]
+for the [@http://en.wikipedia.org/wiki/arcsine_distribution arcsine distribution]
+defined on the interval \[['x_min, x_max]\] is given by:
+
+[figspace] [figspace] f(x; x_min, x_max) = 1 /([pi][sdot][sqrt]((x - x_min)[sdot](x_max - x_min))
+
+For example, __WolframAlpha arcsine distribution, from input of
+
+ N[PDF[arcsinedistribution[0, 1], 0.5], 50]
+
+computes the PDF value
+
+ 0.63661977236758134307553505349005744813783858296183
+
+The Probability Density Functions (PDF) of generalized arcsine distributions are symmetric U-shaped curves,
+centered on ['(x_max - x_min)/2],
+highest (infinite) near the two extrema, and quite flat over the central region.
+
+If random variate ['x] is ['x_min] or ['x_max], then the PDF is infinity.
+If random variate ['x] is ['x_min] then the CDF is zero.
+If random variate ['x] is ['x_max] then the CDF is unity.
+
+The 'Standard' (0, 1) arcsine distribution is shown in blue
+and some generalized examples with other ['x] ranges.
+
+[graph arcsine_pdf]
+
+The Cumulative Distribution Function CDF is defined as
+
+[figspace] [figspace] F(x) = 2[sdot]arcsin([sqrt]((x-x_min)/(x_max - x))) / [pi]
+
+[graph arcsine_cdf]
+
+[h5 Constructor]
+
+ arcsine_distribution(RealType x_min, RealType x_max);
+
+constructs an arcsine distribution with range parameters ['x_min] and ['x_max].
+
+Requires ['x_min < x_max], otherwise __domain_error is called.
+
+For example:
+
+ arcsine_distribution<> myarcsine(-2, 4);
+
+constructs an arcsine distribution with ['x_min = -2] and ['x_max = 4].
+
+Default values of ['x_min = 0] and ['x_max = 1] and a ` typedef arcsine_distribution<double> arcsine;` mean that
+
+ arcsine as;
+
+constructs a 'Standard 01' arcsine distribution.
+
+[h5 Parameter Accessors]
+
+ RealType x_min() const;
+ RealType x_max() const;
+
+Return the parameter ['x_min] or ['x_max] from which this distribution was constructed.
+
+So, for example:
+
+[arcsine_snip_8]
+
+[h4 Non-member Accessor Functions]
+
+All the [link math_toolkit.dist_ref.nmp usual non-member accessor functions]
+that are generic to all distributions are supported: __usual_accessors.
+
+The formulae for calculating these are shown in the table below, and at
+[@http://mathworld.wolfram.com/arcsineDistribution.html Wolfram Mathworld].
+
+[note There are always [*two] values for the [*mode], at ['x_min] and at ['x_max], default 0 and 1,
+so instead we raise the exception __domain_error.
+At these extrema, the PDFs are infinite, and the CDFs zero or unity.]
+
+[h4 Applications]
+
+The arcsine distribution is useful to describe
+[@http://en.wikipedia.org/wiki/Random_walk Random walks], (including drunken walks)
+[@http://en.wikipedia.org/wiki/Brownian_motion Brownian motion],
+[@http://en.wikipedia.org/wiki/Wiener_process Weiner processes],
+[@http://en.wikipedia.org/wiki/Bernoulli_trial Bernoulli trials],
+and their appplication to solve stock market and other
+[@http://en.wikipedia.org/wiki/Gambler%27s_ruin ruinous gambling games].
+
+The random variate ['x] is constrained to ['x_min] and ['x_max], (for our 'standard' distribution, 0 and 1),
+and is usually some fraction. For any other ['x_min] and ['x_max] a fraction can be obtained from ['x] using
+
+[sixemspace] fraction = (x - x_min) / (x_max - x_min)
+
+The simplest example is tossing heads and tails with a fair coin and modelling the risk of losing, or winning.
+Walkers (molecules, drunks...) moving left or right of a centre line are another common example.
+
+The random variate ['x] is the fraction of time spent on the 'winning' side.
+If half the time is spent on the 'winning' side (and so the other half on the 'losing' side) then ['x = 1/2].
+
+For large numbers of tosses, this is modelled by the (standard \[0,1\]) arcsine distribution,
+and the PDF can be calculated thus:
+
+[arcsine_snip_2]
+
+From the plot of PDF, it is clear that ['x] = [frac12] is the [*minimum] of the curve,
+so this is the [*least likely] scenario.
+(This is highly counter-intuitive, considering that fair tosses must [*eventually] become equal.
+It turns out that ['eventually] is not just very long, but [*infinite]!).
+
+The [*most likely] scenarios are towards the extrema where ['x] = 0 or ['x] = 1.
+
+If fraction of time on the left is a [frac14],
+it is only slightly more likely because the curve is quite flat bottomed.
+
+[arcsine_snip_3]
+
+If we consider fair coin-tossing games being played for 100 days
+(hypothetically continuously to be 'at-limit')
+the person winning after day 5 will not change in fraction 0.144 of the cases.
+
+We can easily compute this setting ['x] = 5./100 = 0.05
+
+[arcsine_snip_4]
+
+Similarly, we can compute from a fraction of 0.05 /2 = 0.025
+(halved because we are considering both winners and losers)
+corresponding to 1 - 0.025 or 97.5% of the gamblers, (walkers, particles...) on the [*same side] of the origin
+
+[arcsine_snip_5]
+
+(use of the complement gives a bit more clarity,
+and avoids potential loss of accuracy when ['x] is close to unity, see __why_complements).
+
+[arcsine_snip_6]
+
+or we can reverse the calculation by assuming a fraction of time on one side, say fraction 0.2,
+
+[arcsine_snip_7]
+
+[*Summary]: Every time we toss, the odds are equal,
+so on average we have the same change of winning and losing.
+
+But this is [*not true] for an an individual game where one will be [*mostly in a bad or good patch].
+
+This is quite counter-intuitive to most people, but the mathematics is clear,
+and gamblers continue to provide proof.
+
+[*Moral]: if you in a losing patch, leave the game.
+(Because the odds to recover to a good patch are poor).
+
+[*Corollary]: Quit while you are ahead?
+
+A working example is at [@../../example/arcsine_example.cpp arcsine_example.cpp]
+including sample output .
+
+[h4 Related distributions]
+
+The arcsine distribution with ['x_min = 0] and ['x_max = 1] is special case of the
+__beta_distrib with [alpha] = 1/2 and [beta] = 1/2.
+
+[h4 Accuracy]
+
+This distribution is implemented using sqrt, sine, cos and arc sine and cos trigonometric functions
+which are normally accurate to a few __epsilon.
+But all values suffer from [@http://en.wikipedia.org/wiki/Loss_of_significance loss of significance or cancellation error]
+for values of ['x] close to ['x_max].
+For example, for a standard [0, 1] arcsine distribution ['as], the pdf is symmetric about random variate ['x = 0.5]
+so that one would expect `pdf(as, 0.01) == pdf(as, 0.99)`. But as ['x] nears unity, there is increasing
+[@http://en.wikipedia.org/wiki/Loss_of_significance loss of significance].
+To counteract this, the complement versions of CDF and quantile
+are implemented with alternative expressions using ['cos[super -1]] instead of ['sin[super -1]].
+Users should see __why_complements for guidance on when to avoid loss of accuracy by using complements.
+
+[h4 Testing]
+The results were tested against a few accurate spot values computed by __WolframAlpha, for example:
+
+ N[PDF[arcsinedistribution[0, 1], 0.5], 50]
+ 0.63661977236758134307553505349005744813783858296183
+
+[h4 Implementation]
+
+In the following table ['a] and ['b] are the parameters ['x_min][space] and ['x_max],
+['x] is the random variable, ['p] is the probability and its complement ['q = 1-p].
+
+[table
+[[Function][Implementation Notes]]
+[[support] [x [isin] \[a, b\], default x [isin] \[0, 1\] ]]
+[[pdf] [f(x; a, b) = 1/([pi][sdot][sqrt](x - a)[sdot](b - x))]]
+[[cdf] [F(x) = 2/[pi][sdot]sin[super-1]([sqrt](x - a) / (b - a) ) ]]
+[[cdf of complement] [2/([pi][sdot]cos[super-1]([sqrt](x - a) / (b - a)))]]
+[[quantile] [-a[sdot]sin[super 2]([frac12][pi][sdot]p) + a + b[sdot]sin[super 2]([frac12][pi][sdot]p)]]
+[[quantile from the complement] [-a[sdot]cos[super 2]([frac12][pi][sdot]p) + a + b[sdot]cos[super 2]([frac12][pi][sdot]q)]]
+[[mean] [[frac12](a+b)]]
+[[median] [[frac12](a+b)]]
+[[mode] [ x [isin] \[a, b\], so raises domain_error (returning NaN).]]
+[[variance] [(b - a)[super 2] / 8]]
+[[skewness] [0]]
+[[kurtosis excess] [ -3/2 ]]
+[[kurtosis] [kurtosis_excess + 3]]
+]
+
+The quantile was calculated using an expression obtained by using __WolframAlpha
+to invert the formula for the CDF thus
+
+ solve [p - 2/pi sin^-1(sqrt((x-a)/(b-a))) = 0, x]
+
+which was interpreted as
+
+ Solve[p - (2 ArcSin[Sqrt[(-a + x)/(-a + b)]])/Pi == 0, x, MaxExtraConditions -> Automatic]
+
+and produced the resulting expression
+
+ x = -a sin^2((pi p)/2)+a+b sin^2((pi p)/2)
+
+Thanks to Wolfram for providing this facility.
+
+[h4 References]
+
+* [@http://en.wikipedia.org/wiki/arcsine_distribution Wikipedia arcsine distribution]
+* [@http://en.wikipedia.org/wiki/Beta_distribution Wikipedia Beta distribution]
+* [@http://mathworld.wolfram.com/BetaDistribution.html Wolfram MathWorld]
+* [@http://www.wolframalpha.com/ Wolfram Alpha]
+
+[h4 Sources]
+
+*[@http://estebanmoro.org/2009/04/the-probability-of-going-through-a-bad-patch The probability of going through a bad patch] Esteban Moro's Blog.
+*[@http://www.gotohaggstrom.com/What%20do%20schmucks%20and%20the%20arc%20sine%20law%20have%20in%20common.pdf What soschumcks and the arc sine have in common] Peter Haggstrom.
+*[@http://www.math.uah.edu/stat/special/Arcsine.html arcsine distribution].
+*[@http://reference.wolfram.com/language/ref/ArcSinDistribution.html Wolfram reference arcsine examples].
+*[@http://www.math.harvard.edu/library/sternberg/slides/1180908.pdf Shlomo Sternberg slides].
+
+
+[endsect] [/section:arcsine_dist arcsine]
+
+[/ arcsine.qbk
+ Copyright 2014 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
diff --git a/doc/distributions/background.qbk b/doc/distributions/background.qbk
new file mode 100644
index 0000000..09cb043
--- /dev/null
+++ b/doc/distributions/background.qbk
@@ -0,0 +1,84 @@
+[section:variates Random Variates and Distribution Parameters]
+
+[@http://en.wikipedia.org/wiki/Random_variate Random variates]
+and [@http://en.wikipedia.org/wiki/Parameter distribution parameters]
+are conventionally distinguished (for example in Wikipedia and Wolfram MathWorld
+by placing a semi-colon after the __random_variate (whose value you 'choose'),
+to separate the variate from the parameter(s) that defines the shape of the distribution.
+
+For example, the binomial distribution has two parameters:
+n (the number of trials) and p (the probability of success on one trial).
+It also has the __random_variate /k/: the number of successes observed.
+This means the probability density\/mass function (pdf) is written as ['f(k; n, p)].
+
+Translating this into code the `binomial_distribution` constructor
+therefore has two parameters:
+
+ binomial_distribution(RealType n, RealType p);
+
+While the function `pdf` has one argument specifying the distribution type
+(which includes its parameters, if any),
+and a second argument for the __random_variate. So taking our binomial distribution
+example, we would write:
+
+ pdf(binomial_distribution<RealType>(n, p), k);
+
+[endsect]
+
+[section:dist_params Discrete Probability Distributions]
+
+Note that the [@http://en.wikipedia.org/wiki/Discrete_probability_distribution
+discrete distributions], including the binomial, negative binomial, Poisson & Bernoulli,
+are all mathematically defined as discrete functions:
+only integral values of the __random_variate are envisaged
+and the functions are only defined at these integral values.
+However because the method of calculation often uses continuous functions,
+it is convenient to treat them as if they were continuous functions,
+and permit non-integral values of their parameters.
+
+To enforce a strict mathematical model,
+users may use floor or ceil functions on the __random_variate,
+prior to calling the distribution function, to enforce integral values.
+
+For similar reasons, in continuous distributions, parameters like degrees of freedom
+that might appear to be integral, are treated as real values
+(and are promoted from integer to floating-point if necessary).
+In this case however, that there are a small number of situations where non-integral
+degrees of freedom do have a genuine meaning.
+
+Generally speaking there is no loss of performance from allowing real-values
+parameters: the underlying special functions contain optimizations for
+integer-valued arguments when applicable.
+
+[caution
+The quantile function of a discrete distribution will by
+default return an integer result that has been
+/rounded outwards/. That is to say lower quantiles (where the probability is
+less than 0.5) are rounded downward, and upper quantiles (where the probability
+is greater than 0.5) are rounded upwards. This behaviour
+ensures that if an X% quantile is requested, then /at least/ the requested
+coverage will be present in the central region, and /no more than/
+the requested coverage will be present in the tails.
+
+This behaviour can be changed so that the quantile functions are rounded
+differently, or even return a real-valued result using
+[link math_toolkit.pol_overview Policies]. It is strongly
+recommended that you read the tutorial
+[link math_toolkit.pol_tutorial.understand_dis_quant
+Understanding Quantiles of Discrete Distributions] before
+using the quantile function on a discrete distribution. The
+[link math_toolkit.pol_ref.discrete_quant_ref reference docs]
+describe how to change the rounding policy
+for these distributions.
+]
+
+[endsect]
+
+[/
+ Copyright 2006 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
+
diff --git a/doc/distributions/bernoulli.qbk b/doc/distributions/bernoulli.qbk
new file mode 100644
index 0000000..ae9c577
--- /dev/null
+++ b/doc/distributions/bernoulli.qbk
@@ -0,0 +1,118 @@
+[section:bernoulli_dist Bernoulli Distribution]
+
+``#include <boost/math/distributions/bernoulli.hpp>``
+
+ namespace boost{ namespace math{
+ template <class RealType = double,
+ class ``__Policy`` = ``__policy_class`` >
+ class bernoulli_distribution;
+
+ typedef bernoulli_distribution<> bernoulli;
+
+ template <class RealType, class ``__Policy``>
+ class bernoulli_distribution
+ {
+ public:
+ typedef RealType value_type;
+ typedef Policy policy_type;
+
+ bernoulli_distribution(RealType p); // Constructor.
+ // Accessor function.
+ RealType success_fraction() const
+ // Probability of success (as a fraction).
+ };
+ }} // namespaces
+
+The Bernoulli distribution is a discrete distribution of the outcome
+of a single trial with only two results, 0 (failure) or 1 (success),
+with a probability of success p.
+
+The Bernoulli distribution is the simplest building block
+on which other discrete distributions of
+sequences of independent Bernoulli trials can be based.
+
+The Bernoulli is the binomial distribution (k = 1, p) with only one trial.
+
+[@http://en.wikipedia.org/wiki/Probability_density_function probability density function pdf]
+f(0) = 1 - p, f(1) = p.
+[@http://en.wikipedia.org/wiki/Cumulative_Distribution_Function Cumulative distribution function]
+D(k) = if (k == 0) 1 - p else 1.
+
+The following graph illustrates how the
+[@http://en.wikipedia.org/wiki/Probability_density_function probability density function pdf]
+varies with the outcome of the single trial:
+
+[graph bernoulli_pdf]
+
+and the [@http://en.wikipedia.org/wiki/Cumulative_Distribution_Function Cumulative distribution function]
+
+[graph bernoulli_cdf]
+
+[h4 Member Functions]
+
+ bernoulli_distribution(RealType p);
+
+Constructs a [@http://en.wikipedia.org/wiki/bernoulli_distribution
+bernoulli distribution] with success_fraction /p/.
+
+ RealType success_fraction() const
+
+Returns the /success_fraction/ parameter of this distribution.
+
+[h4 Non-member Accessors]
+
+All the [link math_toolkit.dist_ref.nmp usual non-member accessor functions]
+that are generic to all distributions are supported: __usual_accessors.
+
+The domain of the random variable is 0 and 1,
+and the useful supported range is only 0 or 1.
+
+Outside this range, functions are undefined, or may throw domain_error exception
+and make an error message available.
+
+[h4 Accuracy]
+
+The Bernoulli distribution is implemented with simple arithmetic operators
+and so should have errors within an epsilon or two.
+
+[h4 Implementation]
+
+In the following table /p/ is the probability of success and /q = 1-p/.
+/k/ is the random variate, either 0 or 1.
+
+[note The Bernoulli distribution is implemented here as a /strict discrete/ distribution.
+If a generalised version, allowing k to be any real, is required then
+the binomial distribution with a single trial should be used, for example:
+
+`binomial_distribution(1, 0.25)`
+]
+
+[table
+[[Function][Implementation Notes]]
+[[Supported range][{0, 1}]]
+[[pdf][Using the relation: pdf = 1 - p for k = 0, else p ]]
+[[cdf][Using the relation: cdf = 1 - p for k = 0, else 1]]
+[[cdf complement][q = 1 - p]]
+[[quantile][if x <= (1-p) 0 else 1]]
+[[quantile from the complement][if x <= (1-p) 1 else 0]]
+[[mean][p]]
+[[variance][p * (1 - p)]]
+[[mode][if (p < 0.5) 0 else 1]]
+[[skewness][(1 - 2 * p) / sqrt(p * q)]]
+[[kurtosis][6 * p * p - 6 * p +1/ p * q]]
+[[kurtosis excess][kurtosis -3]]
+]
+
+[h4 References]
+* [@http://en.wikipedia.org/wiki/Bernoulli_distribution Wikpedia Bernoulli distribution]
+* [@http://mathworld.wolfram.com/BernoulliDistribution.html Weisstein, Eric W. "Bernoulli Distribution." From MathWorld--A Wolfram Web Resource.]
+
+[endsect] [/section:bernoulli_dist bernoulli]
+
+[/
+ Copyright 2006 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distributions/beta.qbk b/doc/distributions/beta.qbk
new file mode 100644
index 0000000..97ca017
--- /dev/null
+++ b/doc/distributions/beta.qbk
@@ -0,0 +1,280 @@
+[section:beta_dist Beta Distribution]
+
+``#include <boost/math/distributions/beta.hpp>``
+
+ namespace boost{ namespace math{
+
+ template <class RealType = double,
+ class ``__Policy`` = ``__policy_class`` >
+ class beta_distribution;
+
+ // typedef beta_distribution<double> beta;
+ // Note that this is deliberately NOT provided,
+ // to avoid a clash with the function name beta.
+
+ template <class RealType, class ``__Policy``>
+ class beta_distribution
+ {
+ public:
+ typedef RealType value_type;
+ typedef Policy policy_type;
+ // Constructor from two shape parameters, alpha & beta:
+ beta_distribution(RealType a, RealType b);
+
+ // Parameter accessors:
+ RealType alpha() const;
+ RealType beta() const;
+
+ // Parameter estimators of alpha or beta from mean and variance.
+ static RealType find_alpha(
+ RealType mean, // Expected value of mean.
+ RealType variance); // Expected value of variance.
+
+ static RealType find_beta(
+ RealType mean, // Expected value of mean.
+ RealType variance); // Expected value of variance.
+
+ // Parameter estimators from
+ // either alpha or beta, and x and probability.
+
+ static RealType find_alpha(
+ RealType beta, // from beta.
+ RealType x, // x.
+ RealType probability); // cdf
+
+ static RealType find_beta(
+ RealType alpha, // alpha.
+ RealType x, // probability x.
+ RealType probability); // probability cdf.
+ };
+
+ }} // namespaces
+
+The class type `beta_distribution` represents a
+[@http://en.wikipedia.org/wiki/Beta_distribution beta ]
+[@http://en.wikipedia.org/wiki/Probability_distribution probability distribution function].
+
+The [@http://mathworld.wolfram.com/BetaDistribution.htm beta distribution ]
+is used as a [@http://en.wikipedia.org/wiki/Prior_distribution prior distribution]
+for binomial proportions in
+[@http://mathworld.wolfram.com/BayesianAnalysis.html Bayesian analysis].
+
+See also:
+[@http://documents.wolfram.com/calculationcenter/v2/Functions/ListsMatrices/Statistics/BetaDistribution.html beta distribution]
+and [@http://en.wikipedia.org/wiki/Bayesian_statistics Bayesian statistics].
+
+How the beta distribution is used for
+[@http://home.uchicago.edu/~grynav/bayes/ABSLec5.ppt
+Bayesian analysis of one parameter models]
+is discussed by Jeff Grynaviski.
+
+The [@http://en.wikipedia.org/wiki/Probability_density_function probability density function PDF]
+for the [@http://en.wikipedia.org/wiki/Beta_distribution beta distribution]
+defined on the interval \[0,1\] is given by:
+
+f(x;[alpha],[beta]) = x[super[alpha] - 1] (1 - x)[super[beta] -1] / B([alpha], [beta])
+
+where B([alpha], [beta]) is the
+[@http://en.wikipedia.org/wiki/Beta_function beta function],
+implemented in this library as __beta. Division by the beta function
+ensures that the pdf is normalized to the range zero to unity.
+
+The following graph illustrates examples of the pdf for various values
+of the shape parameters. Note the [alpha] = [beta] = 2 (blue line)
+is dome-shaped, and might be approximated by a symmetrical triangular
+distribution.
+
+[graph beta_pdf]
+
+If [alpha] = [beta] = 1, then it is a __space
+[@http://en.wikipedia.org/wiki/Uniform_distribution_%28continuous%29 uniform distribution],
+equal to unity in the entire interval x = 0 to 1.
+If [alpha] __space and [beta] __space are < 1, then the pdf is U-shaped.
+If [alpha] != [beta], then the shape is asymmetric
+and could be approximated by a triangle
+whose apex is away from the centre (where x = half).
+
+[h4 Member Functions]
+
+[h5 Constructor]
+
+ beta_distribution(RealType alpha, RealType beta);
+
+Constructs a beta distribution with shape parameters /alpha/ and /beta/.
+
+Requires alpha,beta > 0,otherwise __domain_error is called. Note that
+technically the beta distribution is defined for alpha,beta >= 0, but
+it's not clear whether any program can actually make use of that latitude
+or how many of the non-member functions can be usefully defined in that case.
+Therefore for now, we regard it as an error if alpha or beta is zero.
+
+For example:
+
+ beta_distribution<> mybeta(2, 5);
+
+Constructs a the beta distribution with alpha=2 and beta=5 (shown in yellow
+in the graph above).
+
+[h5 Parameter Accessors]
+
+ RealType alpha() const;
+
+Returns the parameter /alpha/ from which this distribution was constructed.
+
+ RealType beta() const;
+
+Returns the parameter /beta/ from which this distribution was constructed.
+
+So for example:
+
+ beta_distribution<> mybeta(2, 5);
+ assert(mybeta.alpha() == 2.); // mybeta.alpha() returns 2
+ assert(mybeta.beta() == 5.); // mybeta.beta() returns 5
+
+[h4 Parameter Estimators]
+
+Two pairs of parameter estimators are provided.
+
+One estimates either [alpha] __space or [beta] __space
+from presumed-known mean and variance.
+
+The other pair estimates either [alpha] __space or [beta] __space from
+the cdf and x.
+
+It is also possible to estimate [alpha] __space and [beta] __space from
+'known' mode & quantile. For example, calculators are provided by the
+[@http://www.ausvet.com.au/pprev/content.php?page=PPscript
+Pooled Prevalence Calculator] and
+[@http://www.epi.ucdavis.edu/diagnostictests/betabuster.html Beta Buster]
+but this is not yet implemented here.
+
+ static RealType find_alpha(
+ RealType mean, // Expected value of mean.
+ RealType variance); // Expected value of variance.
+
+Returns the unique value of [alpha][space] that corresponds to a
+beta distribution with mean /mean/ and variance /variance/.
+
+ static RealType find_beta(
+ RealType mean, // Expected value of mean.
+ RealType variance); // Expected value of variance.
+
+Returns the unique value of [beta][space] that corresponds to a
+beta distribution with mean /mean/ and variance /variance/.
+
+ static RealType find_alpha(
+ RealType beta, // from beta.
+ RealType x, // x.
+ RealType probability); // probability cdf
+
+Returns the value of [alpha][space] that gives:
+`cdf(beta_distribution<RealType>(alpha, beta), x) == probability`.
+
+ static RealType find_beta(
+ RealType alpha, // alpha.
+ RealType x, // probability x.
+ RealType probability); // probability cdf.
+
+Returns the value of [beta][space] that gives:
+`cdf(beta_distribution<RealType>(alpha, beta), x) == probability`.
+
+[h4 Non-member Accessor Functions]
+
+All the [link math_toolkit.dist_ref.nmp usual non-member accessor functions]
+that are generic to all distributions are supported: __usual_accessors.
+
+The formulae for calculating these are shown in the table below, and at
+[@http://mathworld.wolfram.com/BetaDistribution.html Wolfram Mathworld].
+
+[h4 Applications]
+
+The beta distribution can be used to model events constrained
+to take place within an interval defined by a minimum and maximum value:
+so it is used in project management systems.
+
+It is also widely used in [@http://en.wikipedia.org/wiki/Bayesian_inference Bayesian statistical inference].
+
+[h4 Related distributions]
+
+The beta distribution with both [alpha] __space and [beta] = 1 follows a
+[@http://en.wikipedia.org/wiki/Uniform_distribution_%28continuous%29 uniform distribution].
+
+The [@http://en.wikipedia.org/wiki/Triangular_distribution triangular]
+is used when less precise information is available.
+
+The [@http://en.wikipedia.org/wiki/Binomial_distribution binomial distribution]
+is closely related when [alpha] __space and [beta] __space are integers.
+
+With integer values of [alpha] __space and [beta] __space the distribution B(i, j) is
+that of the j-th highest of a sample of i + j + 1 independent random variables
+uniformly distributed between 0 and 1.
+The cumulative probability from 0 to x is thus
+the probability that the j-th highest value is less than x.
+Or it is the probability that at least i of the random variables are less than x,
+a probability given by summing over the __binomial_distrib
+with its p parameter set to x.
+
+[h4 Accuracy]
+
+This distribution is implemented using the
+[link math_toolkit.sf_beta.beta_function beta functions] __beta and
+[link math_toolkit.sf_beta.ibeta_function incomplete beta functions] __ibeta and __ibetac;
+please refer to these functions for information on accuracy.
+
+[h4 Implementation]
+
+In the following table /a/ and /b/ are the parameters [alpha][space] and [beta],
+/x/ is the random variable, /p/ is the probability and /q = 1-p/.
+
+[table
+[[Function][Implementation Notes]]
+[[pdf]
+ [f(x;[alpha],[beta]) = x[super[alpha] - 1] (1 - x)[super[beta] -1] / B([alpha], [beta])
+
+ Implemented using __ibeta_derivative(a, b, x).]]
+
+[[cdf][Using the incomplete beta function __ibeta(a, b, x)]]
+[[cdf complement][__ibetac(a, b, x)]]
+[[quantile][Using the inverse incomplete beta function __ibeta_inv(a, b, p)]]
+[[quantile from the complement][__ibetac_inv(a, b, q)]]
+[[mean][`a/(a+b)`]]
+[[variance][`a * b / (a+b)^2 * (a + b + 1)`]]
+[[mode][`(a-1) / (a + b - 2)`]]
+[[skewness][`2 (b-a) sqrt(a+b+1)/(a+b+2) * sqrt(a * b)`]]
+[[kurtosis excess][ [equation beta_dist_kurtosis] ]]
+[[kurtosis][`kurtosis + 3`]]
+[[parameter estimation][ ]]
+[[alpha
+
+ from mean and variance][`mean * (( (mean * (1 - mean)) / variance)- 1)`]]
+[[beta
+
+ from mean and variance][`(1 - mean) * (((mean * (1 - mean)) /variance)-1)`]]
+[[The member functions `find_alpha` and `find_beta`
+
+ from cdf and probability x
+
+ and *either* `alpha` or `beta`]
+ [Implemented in terms of the inverse incomplete beta functions
+
+__ibeta_inva, and __ibeta_invb respectively.]]
+[[`find_alpha`][`ibeta_inva(beta, x, probability)`]]
+[[`find_beta`][`ibeta_invb(alpha, x, probability)`]]
+]
+
+[h4 References]
+
+[@http://en.wikipedia.org/wiki/Beta_distribution Wikipedia Beta distribution]
+
+[@http://www.itl.nist.gov/div898/handbook/eda/section3/eda366h.htm NIST Exploratory Data Analysis]
+
+[@http://mathworld.wolfram.com/BetaDistribution.html Wolfram MathWorld]
+
+[endsect][/section:beta_dist beta]
+
+[/ beta.qbk
+ Copyright 2006 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
diff --git a/doc/distributions/binomial.qbk b/doc/distributions/binomial.qbk
new file mode 100644
index 0000000..eae3e83
--- /dev/null
+++ b/doc/distributions/binomial.qbk
@@ -0,0 +1,404 @@
+[section:binomial_dist Binomial Distribution]
+
+``#include <boost/math/distributions/binomial.hpp>``
+
+ namespace boost{ namespace math{
+
+ template <class RealType = double,
+ class ``__Policy`` = ``__policy_class`` >
+ class binomial_distribution;
+
+ typedef binomial_distribution<> binomial;
+
+ template <class RealType, class ``__Policy``>
+ class binomial_distribution
+ {
+ public:
+ typedef RealType value_type;
+ typedef Policy policy_type;
+
+ static const ``['unspecified-type]`` clopper_pearson_exact_interval;
+ static const ``['unspecified-type]`` jeffreys_prior_interval;
+
+ // construct:
+ binomial_distribution(RealType n, RealType p);
+
+ // parameter access::
+ RealType success_fraction() const;
+ RealType trials() const;
+
+ // Bounds on success fraction:
+ static RealType find_lower_bound_on_p(
+ RealType trials,
+ RealType successes,
+ RealType probability,
+ ``['unspecified-type]`` method = clopper_pearson_exact_interval);
+ static RealType find_upper_bound_on_p(
+ RealType trials,
+ RealType successes,
+ RealType probability,
+ ``['unspecified-type]`` method = clopper_pearson_exact_interval);
+
+ // estimate min/max number of trials:
+ static RealType find_minimum_number_of_trials(
+ RealType k, // number of events
+ RealType p, // success fraction
+ RealType alpha); // risk level
+
+ static RealType find_maximum_number_of_trials(
+ RealType k, // number of events
+ RealType p, // success fraction
+ RealType alpha); // risk level
+ };
+
+ }} // namespaces
+
+The class type `binomial_distribution` represents a
+[@http://mathworld.wolfram.com/BinomialDistribution.html binomial distribution]:
+it is used when there are exactly two mutually
+exclusive outcomes of a trial. These outcomes are labelled
+"success" and "failure". The
+__binomial_distrib is used to obtain
+the probability of observing k successes in N trials, with the
+probability of success on a single trial denoted by p. The
+binomial distribution assumes that p is fixed for all trials.
+
+[note The random variable for the binomial distribution is the number of successes,
+(the number of trials is a fixed property of the distribution)
+whereas for the negative binomial,
+the random variable is the number of trials, for a fixed number of successes.]
+
+The PDF for the binomial distribution is given by:
+
+[equation binomial_ref2]
+
+The following two graphs illustrate how the PDF changes depending
+upon the distributions parameters, first we'll keep the success
+fraction /p/ fixed at 0.5, and vary the sample size:
+
+[graph binomial_pdf_1]
+
+Alternatively, we can keep the sample size fixed at N=20 and
+vary the success fraction /p/:
+
+[graph binomial_pdf_2]
+
+[discrete_quantile_warning Binomial]
+
+[h4 Member Functions]
+
+[h5 Construct]
+
+ binomial_distribution(RealType n, RealType p);
+
+Constructor: /n/ is the total number of trials, /p/ is the
+probability of success of a single trial.
+
+Requires `0 <= p <= 1`, and `n >= 0`, otherwise calls __domain_error.
+
+[h5 Accessors]
+
+ RealType success_fraction() const;
+
+Returns the parameter /p/ from which this distribution was constructed.
+
+ RealType trials() const;
+
+Returns the parameter /n/ from which this distribution was constructed.
+
+[h5 Lower Bound on the Success Fraction]
+
+ static RealType find_lower_bound_on_p(
+ RealType trials,
+ RealType successes,
+ RealType alpha,
+ ``['unspecified-type]`` method = clopper_pearson_exact_interval);
+
+Returns a lower bound on the success fraction:
+
+[variablelist
+[[trials][The total number of trials conducted.]]
+[[successes][The number of successes that occurred.]]
+[[alpha][The largest acceptable probability that the true value of
+ the success fraction is [*less than] the value returned.]]
+[[method][An optional parameter that specifies the method to be used
+ to compute the interval (See below).]]
+]
+
+For example, if you observe /k/ successes from /n/ trials the
+best estimate for the success fraction is simply ['k/n], but if you
+want to be 95% sure that the true value is [*greater than] some value,
+['p[sub min]], then:
+
+ p``[sub min]`` = binomial_distribution<RealType>::find_lower_bound_on_p(
+ n, k, 0.05);
+
+[link math_toolkit.stat_tut.weg.binom_eg.binom_conf See worked example.]
+
+There are currently two possible values available for the /method/
+optional parameter: /clopper_pearson_exact_interval/
+or /jeffreys_prior_interval/. These constants are both members of
+class template `binomial_distribution`, so usage is for example:
+
+ p = binomial_distribution<RealType>::find_lower_bound_on_p(
+ n, k, 0.05, binomial_distribution<RealType>::jeffreys_prior_interval);
+
+The default method if this parameter is not specified is the Clopper Pearson
+"exact" interval. This produces an interval that guarantees at least
+`100(1-alpha)%` coverage, but which is known to be overly conservative,
+sometimes producing intervals with much greater than the requested coverage.
+
+The alternative calculation method produces a non-informative
+Jeffreys Prior interval. It produces `100(1-alpha)%` coverage only
+['in the average case], though is typically very close to the requested
+coverage level. It is one of the main methods of calculation recommended
+in the review by Brown, Cai and DasGupta.
+
+Please note that the "textbook" calculation method using
+a normal approximation (the Wald interval) is deliberately
+not provided: it is known to produce consistently poor results,
+even when the sample size is surprisingly large.
+Refer to Brown, Cai and DasGupta for a full explanation. Many other methods
+of calculation are available, and may be more appropriate for specific
+situations. Unfortunately there appears to be no consensus amongst
+statisticians as to which is "best": refer to the discussion at the end of
+Brown, Cai and DasGupta for examples.
+
+The two methods provided here were chosen principally because they
+can be used for both one and two sided intervals.
+See also:
+
+Lawrence D. Brown, T. Tony Cai and Anirban DasGupta (2001),
+Interval Estimation for a Binomial Proportion,
+Statistical Science, Vol. 16, No. 2, 101-133.
+
+T. Tony Cai (2005),
+One-sided confidence intervals in discrete distributions,
+Journal of Statistical Planning and Inference 131, 63-88.
+
+Agresti, A. and Coull, B. A. (1998). Approximate is better than
+"exact" for interval estimation of binomial proportions. Amer.
+Statist. 52 119-126.
+
+Clopper, C. J. and Pearson, E. S. (1934). The use of confidence
+or fiducial limits illustrated in the case of the binomial.
+Biometrika 26 404-413.
+
+[h5 Upper Bound on the Success Fraction]
+
+ static RealType find_upper_bound_on_p(
+ RealType trials,
+ RealType successes,
+ RealType alpha,
+ ``['unspecified-type]`` method = clopper_pearson_exact_interval);
+
+Returns an upper bound on the success fraction:
+
+[variablelist
+[[trials][The total number of trials conducted.]]
+[[successes][The number of successes that occurred.]]
+[[alpha][The largest acceptable probability that the true value of
+ the success fraction is [*greater than] the value returned.]]
+[[method][An optional parameter that specifies the method to be used
+ to compute the interval. Refer to the documentation for
+ `find_upper_bound_on_p` above for the meaning of the
+ method options.]]
+]
+
+For example, if you observe /k/ successes from /n/ trials the
+best estimate for the success fraction is simply ['k/n], but if you
+want to be 95% sure that the true value is [*less than] some value,
+['p[sub max]], then:
+
+ p``[sub max]`` = binomial_distribution<RealType>::find_upper_bound_on_p(
+ n, k, 0.05);
+
+[link math_toolkit.stat_tut.weg.binom_eg.binom_conf See worked example.]
+
+[note
+In order to obtain a two sided bound on the success fraction, you
+call both `find_lower_bound_on_p` *and* `find_upper_bound_on_p`
+each with the same arguments.
+
+If the desired risk level
+that the true success fraction lies outside the bounds is [alpha],
+then you pass [alpha]/2 to these functions.
+
+So for example a two sided 95% confidence interval would be obtained
+by passing [alpha] = 0.025 to each of the functions.
+
+[link math_toolkit.stat_tut.weg.binom_eg.binom_conf See worked example.]
+]
+
+
+[h5 Estimating the Number of Trials Required for a Certain Number of Successes]
+
+ static RealType find_minimum_number_of_trials(
+ RealType k, // number of events
+ RealType p, // success fraction
+ RealType alpha); // probability threshold
+
+This function estimates the minimum number of trials required to ensure that
+more than k events is observed with a level of risk /alpha/ that k or
+fewer events occur.
+
+[variablelist
+[[k][The number of success observed.]]
+[[p][The probability of success for each trial.]]
+[[alpha][The maximum acceptable probability that k events or fewer will be observed.]]
+]
+
+For example:
+
+ binomial_distribution<RealType>::find_number_of_trials(10, 0.5, 0.05);
+
+Returns the smallest number of trials we must conduct to be 95% sure
+of seeing 10 events that occur with frequency one half.
+
+[h5 Estimating the Maximum Number of Trials to Ensure no more than a Certain Number of Successes]
+
+ static RealType find_maximum_number_of_trials(
+ RealType k, // number of events
+ RealType p, // success fraction
+ RealType alpha); // probability threshold
+
+This function estimates the maximum number of trials we can conduct
+to ensure that k successes or fewer are observed, with a risk /alpha/
+that more than k occur.
+
+[variablelist
+[[k][The number of success observed.]]
+[[p][The probability of success for each trial.]]
+[[alpha][The maximum acceptable probability that more than k events will be observed.]]
+]
+
+For example:
+
+ binomial_distribution<RealType>::find_maximum_number_of_trials(0, 1e-6, 0.05);
+
+Returns the largest number of trials we can conduct and still be 95% certain
+of not observing any events that occur with one in a million frequency.
+This is typically used in failure analysis.
+
+[link math_toolkit.stat_tut.weg.binom_eg.binom_size_eg See Worked Example.]
+
+[h4 Non-member Accessors]
+
+All the [link math_toolkit.dist_ref.nmp usual non-member accessor functions]
+that are generic to all distributions are supported: __usual_accessors.
+
+The domain for the random variable /k/ is `0 <= k <= N`, otherwise a
+__domain_error is returned.
+
+It's worth taking a moment to define what these accessors actually mean in
+the context of this distribution:
+
+[table Meaning of the non-member accessors
+[[Function][Meaning]]
+[[__pdf]
+ [The probability of obtaining [*exactly k successes] from n trials
+ with success fraction p. For example:
+
+`pdf(binomial(n, p), k)`]]
+[[__cdf]
+ [The probability of obtaining [*k successes or fewer] from n trials
+ with success fraction p. For example:
+
+`cdf(binomial(n, p), k)`]]
+[[__ccdf]
+ [The probability of obtaining [*more than k successes] from n trials
+ with success fraction p. For example:
+
+`cdf(complement(binomial(n, p), k))`]]
+[[__quantile]
+ [The [*greatest] number of successes that may be observed from n trials
+ with success fraction p, at probability P. Note that the value returned
+ is a real-number, and not an integer. Depending on the use case you may
+ want to take either the floor or ceiling of the result. For example:
+
+`quantile(binomial(n, p), P)`]]
+[[__quantile_c]
+ [The [*smallest] number of successes that may be observed from n trials
+ with success fraction p, at probability P. Note that the value returned
+ is a real-number, and not an integer. Depending on the use case you may
+ want to take either the floor or ceiling of the result. For example:
+
+`quantile(complement(binomial(n, p), P))`]]
+]
+
+[h4 Examples]
+
+Various [link math_toolkit.stat_tut.weg.binom_eg worked examples]
+are available illustrating the use of the binomial distribution.
+
+[h4 Accuracy]
+
+This distribution is implemented using the
+incomplete beta functions __ibeta and __ibetac,
+please refer to these functions for information on accuracy.
+
+[h4 Implementation]
+
+In the following table /p/ is the probability that one trial will
+be successful (the success fraction), /n/ is the number of trials,
+/k/ is the number of successes, /p/ is the probability and /q = 1-p/.
+
+[table
+[[Function][Implementation Notes]]
+[[pdf][Implementation is in terms of __ibeta_derivative: if [sub n]C[sub k ] is the binomial
+ coefficient of a and b, then we have:
+
+[equation binomial_ref1]
+
+Which can be evaluated as `ibeta_derivative(k+1, n-k+1, p) / (n+1)`
+
+The function __ibeta_derivative is used here, since it has already
+ been optimised for the lowest possible error - indeed this is really
+ just a thin wrapper around part of the internals of the incomplete
+ beta function.
+
+There are also various special cases: refer to the code for details.
+ ]]
+[[cdf][Using the relation:
+
+``
+p = I[sub 1-p](n - k, k + 1)
+ = 1 - I[sub p](k + 1, n - k)
+ = __ibetac(k + 1, n - k, p)``
+
+There are also various special cases: refer to the code for details.
+]]
+[[cdf complement][Using the relation: q = __ibeta(k + 1, n - k, p)
+
+There are also various special cases: refer to the code for details. ]]
+[[quantile][Since the cdf is non-linear in variate /k/ none of the inverse
+ incomplete beta functions can be used here. Instead the quantile
+ is found numerically using a derivative free method
+ (__root_finding_TOMS748).]]
+[[quantile from the complement][Found numerically as above.]]
+[[mean][ `p * n` ]]
+[[variance][ `p * n * (1-p)` ]]
+[[mode][`floor(p * (n + 1))`]]
+[[skewness][`(1 - 2 * p) / sqrt(n * p * (1 - p))`]]
+[[kurtosis][`3 - (6 / n) + (1 / (n * p * (1 - p)))`]]
+[[kurtosis excess][`(1 - 6 * p * q) / (n * p * q)`]]
+[[parameter estimation][The member functions `find_upper_bound_on_p`
+ `find_lower_bound_on_p` and `find_number_of_trials` are
+ implemented in terms of the inverse incomplete beta functions
+ __ibetac_inv, __ibeta_inv, and __ibetac_invb respectively]]
+]
+
+[h4 References]
+
+* [@http://mathworld.wolfram.com/BinomialDistribution.html Weisstein, Eric W. "Binomial Distribution." From MathWorld--A Wolfram Web Resource].
+* [@http://en.wikipedia.org/wiki/Beta_distribution Wikipedia binomial distribution].
+* [@http://www.itl.nist.gov/div898/handbook/eda/section3/eda366i.htm NIST Explorary Data Analysis].
+
+[endsect] [/section:binomial_dist Binomial]
+
+[/ binomial.qbk
+ Copyright 2006 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
diff --git a/doc/distributions/binomial_example.qbk b/doc/distributions/binomial_example.qbk
new file mode 100644
index 0000000..7f03e51
--- /dev/null
+++ b/doc/distributions/binomial_example.qbk
@@ -0,0 +1,332 @@
+[section:binom_eg Binomial Distribution Examples]
+
+See also the reference documentation for the __binomial_distrib.
+
+[section:binomial_coinflip_example Binomial Coin-Flipping Example]
+
+[import ../../example/binomial_coinflip_example.cpp]
+[binomial_coinflip_example1]
+
+See [@../../example/binomial_coinflip_example.cpp binomial_coinflip_example.cpp]
+for full source code, the program output looks like this:
+
+[binomial_coinflip_example_output]
+
+[endsect] [/section:binomial_coinflip_example Binomial coinflip example]
+
+[section:binomial_quiz_example Binomial Quiz Example]
+
+[import ../../example/binomial_quiz_example.cpp]
+[binomial_quiz_example1]
+[binomial_quiz_example2]
+[discrete_quantile_real]
+
+See [@../../example/binomial_quiz_example.cpp binomial_quiz_example.cpp]
+for full source code and output.
+
+[endsect] [/section:binomial_coinflip_quiz Binomial Coin-Flipping example]
+
+[section:binom_conf Calculating Confidence Limits on the Frequency of Occurrence for a Binomial Distribution]
+
+Imagine you have a process that follows a binomial distribution: for each
+trial conducted, an event either occurs or does it does not, referred
+to as "successes" and "failures". If, by experiment, you want to measure the
+frequency with which successes occur, the best estimate is given simply
+by /k/ \/ /N/, for /k/ successes out of /N/ trials. However our confidence in that
+estimate will be shaped by how many trials were conducted, and how many successes
+were observed. The static member functions
+`binomial_distribution<>::find_lower_bound_on_p` and
+`binomial_distribution<>::find_upper_bound_on_p` allow you to calculate
+the confidence intervals for your estimate of the occurrence frequency.
+
+The sample program [@../../example/binomial_confidence_limits.cpp
+binomial_confidence_limits.cpp] illustrates their use. It begins by defining
+a procedure that will print a table of confidence limits for various degrees
+of certainty:
+
+ #include <iostream>
+ #include <iomanip>
+ #include <boost/math/distributions/binomial.hpp>
+
+ void confidence_limits_on_frequency(unsigned trials, unsigned successes)
+ {
+ //
+ // trials = Total number of trials.
+ // successes = Total number of observed successes.
+ //
+ // Calculate confidence limits for an observed
+ // frequency of occurrence that follows a binomial
+ // distribution.
+ //
+ using namespace std;
+ using namespace boost::math;
+
+ // Print out general info:
+ cout <<
+ "___________________________________________\n"
+ "2-Sided Confidence Limits For Success Ratio\n"
+ "___________________________________________\n\n";
+ cout << setprecision(7);
+ cout << setw(40) << left << "Number of Observations" << "= " << trials << "\n";
+ cout << setw(40) << left << "Number of successes" << "= " << successes << "\n";
+ cout << setw(40) << left << "Sample frequency of occurrence" << "= " << double(successes) / trials << "\n";
+
+The procedure now defines a table of significance levels: these are the
+probabilities that the true occurrence frequency lies outside the calculated
+interval:
+
+ double alpha[] = { 0.5, 0.25, 0.1, 0.05, 0.01, 0.001, 0.0001, 0.00001 };
+
+Some pretty printing of the table header follows:
+
+ cout << "\n\n"
+ "_______________________________________________________________________\n"
+ "Confidence Lower CP Upper CP Lower JP Upper JP\n"
+ " Value (%) Limit Limit Limit Limit\n"
+ "_______________________________________________________________________\n";
+
+
+And now for the important part - the intervals themselves - for each
+value of /alpha/, we call `find_lower_bound_on_p` and
+`find_lower_upper_on_p` to obtain lower and upper bounds
+respectively. Note that since we are calculating a two-sided interval,
+we must divide the value of alpha in two.
+
+Please note that calculating two separate /single sided bounds/, each with risk
+level [alpha][space]is not the same thing as calculating a two sided interval.
+Had we calculate two single-sided intervals each with a risk
+that the true value is outside the interval of [alpha], then:
+
+* The risk that it is less than the lower bound is [alpha].
+
+and
+
+* The risk that it is greater than the upper bound is also [alpha].
+
+So the risk it is outside *upper or lower bound*, is *twice* alpha, and the
+probability that it is inside the bounds is therefore not nearly as high as
+one might have thought. This is why [alpha]/2 must be used in
+the calculations below.
+
+In contrast, had we been calculating a
+single-sided interval, for example: ['"Calculate a lower bound so that we are P%
+sure that the true occurrence frequency is greater than some value"]
+then we would *not* have divided by two.
+
+Finally note that `binomial_distribution` provides a choice of two
+methods for the calculation, we print out the results from both
+methods in this example:
+
+ for(unsigned i = 0; i < sizeof(alpha)/sizeof(alpha[0]); ++i)
+ {
+ // Confidence value:
+ cout << fixed << setprecision(3) << setw(10) << right << 100 * (1-alpha[i]);
+ // Calculate Clopper Pearson bounds:
+ double l = binomial_distribution<>::find_lower_bound_on_p(
+ trials, successes, alpha[i]/2);
+ double u = binomial_distribution<>::find_upper_bound_on_p(
+ trials, successes, alpha[i]/2);
+ // Print Clopper Pearson Limits:
+ cout << fixed << setprecision(5) << setw(15) << right << l;
+ cout << fixed << setprecision(5) << setw(15) << right << u;
+ // Calculate Jeffreys Prior Bounds:
+ l = binomial_distribution<>::find_lower_bound_on_p(
+ trials, successes, alpha[i]/2,
+ binomial_distribution<>::jeffreys_prior_interval);
+ u = binomial_distribution<>::find_upper_bound_on_p(
+ trials, successes, alpha[i]/2,
+ binomial_distribution<>::jeffreys_prior_interval);
+ // Print Jeffreys Prior Limits:
+ cout << fixed << setprecision(5) << setw(15) << right << l;
+ cout << fixed << setprecision(5) << setw(15) << right << u << std::endl;
+ }
+ cout << endl;
+ }
+
+And that's all there is to it. Let's see some sample output for a 2 in 10
+success ratio, first for 20 trials:
+
+[pre'''___________________________________________
+2-Sided Confidence Limits For Success Ratio
+___________________________________________
+
+Number of Observations = 20
+Number of successes = 4
+Sample frequency of occurrence = 0.2
+
+
+_______________________________________________________________________
+Confidence Lower CP Upper CP Lower JP Upper JP
+ Value (%) Limit Limit Limit Limit
+_______________________________________________________________________
+ 50.000 0.12840 0.29588 0.14974 0.26916
+ 75.000 0.09775 0.34633 0.11653 0.31861
+ 90.000 0.07135 0.40103 0.08734 0.37274
+ 95.000 0.05733 0.43661 0.07152 0.40823
+ 99.000 0.03576 0.50661 0.04655 0.47859
+ 99.900 0.01905 0.58632 0.02634 0.55960
+ 99.990 0.01042 0.64997 0.01530 0.62495
+ 99.999 0.00577 0.70216 0.00901 0.67897
+''']
+
+As you can see, even at the 95% confidence level the bounds are
+really quite wide (this example is chosen to be easily compared to the one
+in the __handbook
+[@http://www.itl.nist.gov/div898/handbook/prc/section2/prc241.htm
+here]). Note also that the Clopper-Pearson calculation method (CP above)
+produces quite noticeably more pessimistic estimates than the Jeffreys Prior
+method (JP above).
+
+
+Compare that with the program output for
+2000 trials:
+
+[pre'''___________________________________________
+2-Sided Confidence Limits For Success Ratio
+___________________________________________
+
+Number of Observations = 2000
+Number of successes = 400
+Sample frequency of occurrence = 0.2000000
+
+
+_______________________________________________________________________
+Confidence Lower CP Upper CP Lower JP Upper JP
+ Value (%) Limit Limit Limit Limit
+_______________________________________________________________________
+ 50.000 0.19382 0.20638 0.19406 0.20613
+ 75.000 0.18965 0.21072 0.18990 0.21047
+ 90.000 0.18537 0.21528 0.18561 0.21503
+ 95.000 0.18267 0.21821 0.18291 0.21796
+ 99.000 0.17745 0.22400 0.17769 0.22374
+ 99.900 0.17150 0.23079 0.17173 0.23053
+ 99.990 0.16658 0.23657 0.16681 0.23631
+ 99.999 0.16233 0.24169 0.16256 0.24143
+''']
+
+Now even when the confidence level is very high, the limits are really
+quite close to the experimentally calculated value of 0.2. Furthermore
+the difference between the two calculation methods is now really quite small.
+
+[endsect]
+
+[section:binom_size_eg Estimating Sample Sizes for a Binomial Distribution.]
+
+Imagine you have a critical component that you know will fail in 1 in
+N "uses" (for some suitable definition of "use"). You may want to schedule
+routine replacement of the component so that its chance of failure between
+routine replacements is less than P%. If the failures follow a binomial
+distribution (each time the component is "used" it either fails or does not)
+then the static member function `binomial_distibution<>::find_maximum_number_of_trials`
+can be used to estimate the maximum number of "uses" of that component for some
+acceptable risk level /alpha/.
+
+The example program
+[@../../example/binomial_sample_sizes.cpp binomial_sample_sizes.cpp]
+demonstrates its usage. It centres on a routine that prints out
+a table of maximum sample sizes for various probability thresholds:
+
+ void find_max_sample_size(
+ double p, // success ratio.
+ unsigned successes) // Total number of observed successes permitted.
+ {
+
+The routine then declares a table of probability thresholds: these are the
+maximum acceptable probability that /successes/ or fewer events will be
+observed. In our example, /successes/ will be always zero, since we want
+no component failures, but in other situations non-zero values may well
+make sense.
+
+ double alpha[] = { 0.5, 0.25, 0.1, 0.05, 0.01, 0.001, 0.0001, 0.00001 };
+
+Much of the rest of the program is pretty-printing, the important part
+is in the calculation of maximum number of permitted trials for each
+value of alpha:
+
+ for(unsigned i = 0; i < sizeof(alpha)/sizeof(alpha[0]); ++i)
+ {
+ // Confidence value:
+ cout << fixed << setprecision(3) << setw(10) << right << 100 * (1-alpha[i]);
+ // calculate trials:
+ double t = binomial::find_maximum_number_of_trials(
+ successes, p, alpha[i]);
+ t = floor(t);
+ // Print Trials:
+ cout << fixed << setprecision(5) << setw(15) << right << t << endl;
+ }
+
+Note that since we're
+calculating the maximum number of trials permitted, we'll err on the safe
+side and take the floor of the result. Had we been calculating the
+/minimum/ number of trials required to observe a certain number of /successes/
+using `find_minimum_number_of_trials` we would have taken the ceiling instead.
+
+We'll finish off by looking at some sample output, firstly for
+a 1 in 1000 chance of component failure with each use:
+
+[pre
+'''________________________
+Maximum Number of Trials
+________________________
+
+Success ratio = 0.001
+Maximum Number of "successes" permitted = 0
+
+
+____________________________
+Confidence Max Number
+ Value (%) Of Trials
+____________________________
+ 50.000 692
+ 75.000 287
+ 90.000 105
+ 95.000 51
+ 99.000 10
+ 99.900 0
+ 99.990 0
+ 99.999 0'''
+]
+
+So 51 "uses" of the component would yield a 95% chance that no
+component failures would be observed.
+
+Compare that with a 1 in 1 million chance of component failure:
+
+[pre'''
+________________________
+Maximum Number of Trials
+________________________
+
+Success ratio = 0.0000010
+Maximum Number of "successes" permitted = 0
+
+
+____________________________
+Confidence Max Number
+ Value (%) Of Trials
+____________________________
+ 50.000 693146
+ 75.000 287681
+ 90.000 105360
+ 95.000 51293
+ 99.000 10050
+ 99.900 1000
+ 99.990 100
+ 99.999 10'''
+]
+
+In this case, even 1000 uses of the component would still yield a
+less than 1 in 1000 chance of observing a component failure
+(i.e. a 99.9% chance of no failure).
+
+[endsect] [/section:binom_size_eg Estimating Sample Sizes for a Binomial Distribution.]
+
+[endsect][/section:binom_eg Binomial Distribution]
+
+[/
+ Copyright 2006 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distributions/c_sharp.qbk b/doc/distributions/c_sharp.qbk
new file mode 100644
index 0000000..6764f25
--- /dev/null
+++ b/doc/distributions/c_sharp.qbk
@@ -0,0 +1,20 @@
+[section:c_sharp Using the Distributions from Within C#]
+
+The distributions in this library can be used from the C# programming language
+when they are built using Microsoft's Common Language Runtime (CLR) option.
+
+An example of this kind of usage is given in the
+[@../distexplorer/html/index.html Distribution Explorer]
+example. See =boost-root/libs/math/dot_net_example=
+for the source code: the application consists of a C++ .dll that contains the
+actual distributions, and a C# GUI that allows you to explore their properties.
+
+[endsect] [/section:c_sharp]
+
+[/
+ Copyright 2006, 2013 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distributions/cauchy.qbk b/doc/distributions/cauchy.qbk
new file mode 100644
index 0000000..e91ebe2
--- /dev/null
+++ b/doc/distributions/cauchy.qbk
@@ -0,0 +1,154 @@
+[section:cauchy_dist Cauchy-Lorentz Distribution]
+
+``#include <boost/math/distributions/cauchy.hpp>``
+
+ template <class RealType = double,
+ class ``__Policy`` = ``__policy_class`` >
+ class cauchy_distribution;
+
+ typedef cauchy_distribution<> cauchy;
+
+ template <class RealType, class ``__Policy``>
+ class cauchy_distribution
+ {
+ public:
+ typedef RealType value_type;
+ typedef Policy policy_type;
+
+ cauchy_distribution(RealType location = 0, RealType scale = 1);
+
+ RealType location()const;
+ RealType scale()const;
+ };
+
+The [@http://en.wikipedia.org/wiki/Cauchy_distribution Cauchy-Lorentz distribution]
+is named after Augustin Cauchy and Hendrik Lorentz.
+It is a [@http://en.wikipedia.org/wiki/Probability_distribution continuous probability distribution]
+with [@http://en.wikipedia.org/wiki/Probability_distribution probability distribution function PDF]
+given by:
+
+[equation cauchy_ref1]
+
+The location parameter x[sub 0][space] is the location of the
+peak of the distribution (the mode of the distribution),
+while the scale parameter [gamma][space] specifies half the width
+of the PDF at half the maximum height. If the location is
+zero, and the scale 1, then the result is a standard Cauchy
+distribution.
+
+The distribution is important in physics as it is the solution
+to the differential equation describing forced resonance,
+while in spectroscopy it is the description of the line shape
+of spectral lines.
+
+The following graph shows how the distributions moves as the
+location parameter changes:
+
+[graph cauchy_pdf1]
+
+While the following graph shows how the shape (scale) parameter alters
+the distribution:
+
+[graph cauchy_pdf2]
+
+[h4 Member Functions]
+
+ cauchy_distribution(RealType location = 0, RealType scale = 1);
+
+Constructs a Cauchy distribution, with location parameter /location/
+and scale parameter /scale/. When these parameters take their default
+values (location = 0, scale = 1)
+then the result is a Standard Cauchy Distribution.
+
+Requires scale > 0, otherwise calls __domain_error.
+
+ RealType location()const;
+
+Returns the location parameter of the distribution.
+
+ RealType scale()const;
+
+Returns the scale parameter of the distribution.
+
+[h4 Non-member Accessors]
+
+All the [link math_toolkit.dist_ref.nmp usual non-member accessor functions]
+that are generic to all distributions are supported: __usual_accessors.
+
+Note however that the Cauchy distribution does not have a mean,
+standard deviation, etc. See __math_undefined
+[/link math_toolkit.pol_ref.assert_undefined mathematically undefined function]
+to control whether these should fail to compile with a BOOST_STATIC_ASSERTION_FAILURE,
+which is the default.
+
+Alternately, the functions __mean, __sd,
+__variance, __skewness, __kurtosis and __kurtosis_excess will all
+return a __domain_error if called.
+
+The domain of the random variable is \[-[max_value], +[min_value]\].
+
+[h4 Accuracy]
+
+The Cauchy distribution is implemented in terms of the
+standard library `tan` and `atan` functions,
+and as such should have very low error rates.
+
+[h4 Implementation]
+
+[def __x0 x[sub 0 ]]
+
+In the following table __x0 is the location parameter of the distribution,
+[gamma][space] is its scale parameter,
+/x/ is the random variate, /p/ is the probability and /q = 1-p/.
+
+[table
+[[Function][Implementation Notes]]
+[[pdf][Using the relation: pdf = 1 / ([pi] * [gamma] * (1 + ((x - __x0) / [gamma])[super 2]) ]]
+[[cdf and its complement][
+The cdf is normally given by:
+
+p = 0.5 + atan(x)/[pi]
+
+But that suffers from cancellation error as x -> -[infin].
+So recall that for `x < 0`:
+
+atan(x) = -[pi]/2 - atan(1/x)
+
+Substituting into the above we get:
+
+p = -atan(1/x) ; x < 0
+
+So the procedure is to calculate the cdf for -fabs(x)
+using the above formula. Note that to factor in the location and scale
+parameters you must substitute (x - __x0) / [gamma][space] for x in the above.
+
+This procedure yields the smaller of /p/ and /q/, so the result
+may need subtracting from 1 depending on whether we want the complement
+or not, and whether /x/ is less than __x0 or not.
+]]
+[[quantile][The same procedure is used irrespective of whether we're starting
+ from the probability or its complement. First the argument /p/ is
+ reduced to the range \[-0.5, 0.5\], then the relation
+
+x = __x0 [plusminus] [gamma][space] / tan([pi] * p)
+
+is used to obtain the result. Whether we're adding
+ or subtracting from __x0 is determined by whether we're
+ starting from the complement or not.]]
+[[mode][The location parameter.]]
+]
+
+[h4 References]
+
+* [@http://en.wikipedia.org/wiki/Cauchy_distribution Cauchy-Lorentz distribution]
+* [@http://www.itl.nist.gov/div898/handbook/eda/section3/eda3663.htm NIST Exploratory Data Analysis]
+* [@http://mathworld.wolfram.com/CauchyDistribution.html Weisstein, Eric W. "Cauchy Distribution." From MathWorld--A Wolfram Web Resource.]
+
+[endsect][/section:cauchy_dist Cauchi]
+
+[/ cauchy.qbk
+ Copyright 2006, 2007 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
diff --git a/doc/distributions/chi_squared.qbk b/doc/distributions/chi_squared.qbk
new file mode 100644
index 0000000..77237e0
--- /dev/null
+++ b/doc/distributions/chi_squared.qbk
@@ -0,0 +1,161 @@
+[section:chi_squared_dist Chi Squared Distribution]
+
+``#include <boost/math/distributions/chi_squared.hpp>``
+
+ namespace boost{ namespace math{
+
+ template <class RealType = double,
+ class ``__Policy`` = ``__policy_class`` >
+ class chi_squared_distribution;
+
+ typedef chi_squared_distribution<> chi_squared;
+
+ template <class RealType, class ``__Policy``>
+ class chi_squared_distribution
+ {
+ public:
+ typedef RealType value_type;
+ typedef Policy policy_type;
+
+ // Constructor:
+ chi_squared_distribution(RealType i);
+
+ // Accessor to parameter:
+ RealType degrees_of_freedom()const;
+
+ // Parameter estimation:
+ static RealType find_degrees_of_freedom(
+ RealType difference_from_mean,
+ RealType alpha,
+ RealType beta,
+ RealType sd,
+ RealType hint = 100);
+ };
+
+ }} // namespaces
+
+The Chi-Squared distribution is one of the most widely used distributions
+in statistical tests. If [chi][sub i][space] are [nu][space]
+independent, normally distributed
+random variables with means [mu][sub i][space] and variances [sigma][sub i][super 2],
+then the random variable:
+
+[equation chi_squ_ref1]
+
+is distributed according to the Chi-Squared distribution.
+
+The Chi-Squared distribution is a special case of the gamma distribution
+and has a single parameter [nu][space] that specifies the number of degrees of
+freedom. The following graph illustrates how the distribution changes
+for different values of [nu]:
+
+[graph chi_squared_pdf]
+
+[h4 Member Functions]
+
+ chi_squared_distribution(RealType v);
+
+Constructs a Chi-Squared distribution with /v/ degrees of freedom.
+
+Requires v > 0, otherwise calls __domain_error.
+
+ RealType degrees_of_freedom()const;
+
+Returns the parameter /v/ from which this object was constructed.
+
+ static RealType find_degrees_of_freedom(
+ RealType difference_from_variance,
+ RealType alpha,
+ RealType beta,
+ RealType variance,
+ RealType hint = 100);
+
+Estimates the sample size required to detect a difference from a nominal
+variance in a Chi-Squared test for equal standard deviations.
+
+[variablelist
+[[difference_from_variance][The difference from the assumed nominal variance
+ that is to be detected: Note that the sign of this value is critical, see below.]]
+[[alpha][The maximum acceptable risk of rejecting the null hypothesis when it is
+ in fact true.]]
+[[beta][The maximum acceptable risk of falsely failing to reject the null hypothesis.]]
+[[variance][The nominal variance being tested against.]]
+[[hint][An optional hint on where to start looking for a result: the current sample
+ size would be a good choice.]]
+]
+
+Note that this calculation works with /variances/ and not /standard deviations/.
+
+The sign of the parameter /difference_from_variance/ is important: the Chi
+Squared distribution is asymmetric, and the caller must decide in advance
+whether they are testing for a variance greater than a nominal value (positive
+/difference_from_variance/) or testing for a variance less than a nominal value
+(negative /difference_from_variance/). If the latter, then obviously it is
+a requirement that `variance + difference_from_variance > 0`, since no sample
+can have a negative variance!
+
+This procedure uses the method in Diamond, W. J. (1989).
+Practical Experiment Designs, Van-Nostrand Reinhold, New York.
+
+See also section on Sample sizes required in
+[@http://www.itl.nist.gov/div898/handbook/prc/section2/prc232.htm the NIST Engineering Statistics Handbook, Section 7.2.3.2].
+
+[h4 Non-member Accessors]
+
+All the [link math_toolkit.dist_ref.nmp usual non-member accessor functions]
+that are generic to all distributions are supported: __usual_accessors.
+
+(We have followed the usual restriction of the mode to degrees of freedom >= 2,
+but note that the maximum of the pdf is actually zero for degrees of freedom from 2 down to 0,
+and provide an extended definition that would avoid a discontinuity in the mode
+as alternative code in a comment).
+
+The domain of the random variable is \[0, +[infin]\].
+
+[h4 Examples]
+
+Various [link math_toolkit.stat_tut.weg.cs_eg worked examples]
+are available illustrating the use of the Chi Squared Distribution.
+
+[h4 Accuracy]
+
+The Chi-Squared distribution is implemented in terms of the
+[link math_toolkit.sf_gamma.igamma incomplete gamma functions]:
+please refer to the accuracy data for those functions.
+
+[h4 Implementation]
+
+In the following table /v/ is the number of degrees of freedom of the distribution,
+/x/ is the random variate, /p/ is the probability, and /q = 1-p/.
+
+[table
+[[Function][Implementation Notes]]
+[[pdf][Using the relation: pdf = __gamma_p_derivative(v / 2, x / 2) / 2 ]]
+[[cdf][Using the relation: p = __gamma_p(v / 2, x / 2) ]]
+[[cdf complement][Using the relation: q = __gamma_q(v / 2, x / 2) ]]
+[[quantile][Using the relation: x = 2 * __gamma_p_inv(v / 2, p) ]]
+[[quantile from the complement][Using the relation: x = 2 * __gamma_q_inv(v / 2, p) ]]
+[[mean][v]]
+[[variance][2v]]
+[[mode][v - 2 (if v >= 2)]]
+[[skewness][2 * sqrt(2 / v) == sqrt(8 / v)]]
+[[kurtosis][3 + 12 / v]]
+[[kurtosis excess][12 / v]]
+]
+
+[h4 References]
+
+* [@http://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm NIST Exploratory Data Analysis]
+* [@http://en.wikipedia.org/wiki/Chi-square_distribution Chi-square distribution]
+* [@http://mathworld.wolfram.com/Chi-SquaredDistribution.html Weisstein, Eric W. "Chi-Squared Distribution." From MathWorld--A Wolfram Web Resource.]
+
+
+[endsect][/section:chi_squared_dist Chi Squared]
+
+[/ chi_squared.qbk
+ Copyright 2006 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distributions/chi_squared_examples.qbk b/doc/distributions/chi_squared_examples.qbk
new file mode 100644
index 0000000..8f01585
--- /dev/null
+++ b/doc/distributions/chi_squared_examples.qbk
@@ -0,0 +1,500 @@
+
+[section:cs_eg Chi Squared Distribution Examples]
+
+[section:chi_sq_intervals Confidence Intervals on the Standard Deviation]
+
+Once you have calculated the standard deviation for your data, a legitimate
+question to ask is "How reliable is the calculated standard deviation?".
+For this situation the Chi Squared distribution can be used to calculate
+confidence intervals for the standard deviation.
+
+The full example code & sample output is in
+[@../../example/chi_square_std_dev_test.cpp chi_square_std_dev_test.cpp].
+
+We'll begin by defining the procedure that will calculate and print out the
+confidence intervals:
+
+ void confidence_limits_on_std_deviation(
+ double Sd, // Sample Standard Deviation
+ unsigned N) // Sample size
+ {
+
+We'll begin by printing out some general information:
+
+ cout <<
+ "________________________________________________\n"
+ "2-Sided Confidence Limits For Standard Deviation\n"
+ "________________________________________________\n\n";
+ cout << setprecision(7);
+ cout << setw(40) << left << "Number of Observations" << "= " << N << "\n";
+ cout << setw(40) << left << "Standard Deviation" << "= " << Sd << "\n";
+
+and then define a table of significance levels for which we'll calculate
+intervals:
+
+ double alpha[] = { 0.5, 0.25, 0.1, 0.05, 0.01, 0.001, 0.0001, 0.00001 };
+
+The distribution we'll need to calculate the confidence intervals is a
+Chi Squared distribution, with N-1 degrees of freedom:
+
+ chi_squared dist(N - 1);
+
+For each value of alpha, the formula for the confidence interval is given by:
+
+[equation chi_squ_tut1]
+
+Where [equation chi_squ_tut2] is the upper critical value, and
+[equation chi_squ_tut3] is the lower critical value of the
+Chi Squared distribution.
+
+In code we begin by printing out a table header:
+
+ cout << "\n\n"
+ "_____________________________________________\n"
+ "Confidence Lower Upper\n"
+ " Value (%) Limit Limit\n"
+ "_____________________________________________\n";
+
+and then loop over the values of alpha and calculate the intervals
+for each: remember that the lower critical value is the same as the
+quantile, and the upper critical value is the same as the quantile
+from the complement of the probability:
+
+ for(unsigned i = 0; i < sizeof(alpha)/sizeof(alpha[0]); ++i)
+ {
+ // Confidence value:
+ cout << fixed << setprecision(3) << setw(10) << right << 100 * (1-alpha[i]);
+ // Calculate limits:
+ double lower_limit = sqrt((N - 1) * Sd * Sd / quantile(complement(dist, alpha[i] / 2)));
+ double upper_limit = sqrt((N - 1) * Sd * Sd / quantile(dist, alpha[i] / 2));
+ // Print Limits:
+ cout << fixed << setprecision(5) << setw(15) << right << lower_limit;
+ cout << fixed << setprecision(5) << setw(15) << right << upper_limit << endl;
+ }
+ cout << endl;
+
+To see some example output we'll use the
+[@http://www.itl.nist.gov/div898/handbook/eda/section3/eda3581.htm
+gear data] from the __handbook.
+The data represents measurements of gear diameter from a manufacturing
+process.
+
+[pre'''
+________________________________________________
+2-Sided Confidence Limits For Standard Deviation
+________________________________________________
+
+Number of Observations = 100
+Standard Deviation = 0.006278908
+
+
+_____________________________________________
+Confidence Lower Upper
+ Value (%) Limit Limit
+_____________________________________________
+ 50.000 0.00601 0.00662
+ 75.000 0.00582 0.00685
+ 90.000 0.00563 0.00712
+ 95.000 0.00551 0.00729
+ 99.000 0.00530 0.00766
+ 99.900 0.00507 0.00812
+ 99.990 0.00489 0.00855
+ 99.999 0.00474 0.00895
+''']
+
+So at the 95% confidence level we conclude that the standard deviation
+is between 0.00551 and 0.00729.
+
+[h4 Confidence intervals as a function of the number of observations]
+
+Similarly, we can also list the confidence intervals for the standard deviation
+for the common confidence levels 95%, for increasing numbers of observations.
+
+The standard deviation used to compute these values is unity,
+so the limits listed are *multipliers* for any particular standard deviation.
+For example, given a standard deviation of 0.0062789 as in the example
+above; for 100 observations the multiplier is 0.8780
+giving the lower confidence limit of 0.8780 * 0.006728 = 0.00551.
+
+[pre'''
+____________________________________________________
+Confidence level (two-sided) = 0.0500000
+Standard Deviation = 1.0000000
+________________________________________
+Observations Lower Upper
+ Limit Limit
+________________________________________
+ 2 0.4461 31.9102
+ 3 0.5207 6.2847
+ 4 0.5665 3.7285
+ 5 0.5991 2.8736
+ 6 0.6242 2.4526
+ 7 0.6444 2.2021
+ 8 0.6612 2.0353
+ 9 0.6755 1.9158
+ 10 0.6878 1.8256
+ 15 0.7321 1.5771
+ 20 0.7605 1.4606
+ 30 0.7964 1.3443
+ 40 0.8192 1.2840
+ 50 0.8353 1.2461
+ 60 0.8476 1.2197
+ 100 0.8780 1.1617
+ 120 0.8875 1.1454
+ 1000 0.9580 1.0459
+ 10000 0.9863 1.0141
+ 50000 0.9938 1.0062
+ 100000 0.9956 1.0044
+ 1000000 0.9986 1.0014
+''']
+
+With just 2 observations the limits are from *0.445* up to to *31.9*,
+so the standard deviation might be about *half*
+the observed value up to [*30 times] the observed value!
+
+Estimating a standard deviation with just a handful of values leaves a very great uncertainty,
+especially the upper limit.
+Note especially how far the upper limit is skewed from the most likely standard deviation.
+
+Even for 10 observations, normally considered a reasonable number,
+the range is still from 0.69 to 1.8, about a range of 0.7 to 2,
+and is still highly skewed with an upper limit *twice* the median.
+
+When we have 1000 observations, the estimate of the standard deviation is starting to look convincing,
+with a range from 0.95 to 1.05 - now near symmetrical, but still about + or - 5%.
+
+Only when we have 10000 or more repeated observations can we start to be reasonably confident
+(provided we are sure that other factors like drift are not creeping in).
+
+For 10000 observations, the interval is 0.99 to 1.1 - finally a really convincing + or -1% confidence.
+
+[endsect] [/section:chi_sq_intervals Confidence Intervals on the Standard Deviation]
+
+[section:chi_sq_test Chi-Square Test for the Standard Deviation]
+
+We use this test to determine whether the standard deviation of a sample
+differs from a specified value. Typically this occurs in process change
+situations where we wish to compare the standard deviation of a new
+process to an established one.
+
+The code for this example is contained in
+[@../../example/chi_square_std_dev_test.cpp chi_square_std_dev_test.cpp], and
+we'll begin by defining the procedure that will print out the test
+statistics:
+
+ void chi_squared_test(
+ double Sd, // Sample std deviation
+ double D, // True std deviation
+ unsigned N, // Sample size
+ double alpha) // Significance level
+ {
+
+The procedure begins by printing a summary of the input data:
+
+ using namespace std;
+ using namespace boost::math;
+
+ // Print header:
+ cout <<
+ "______________________________________________\n"
+ "Chi Squared test for sample standard deviation\n"
+ "______________________________________________\n\n";
+ cout << setprecision(5);
+ cout << setw(55) << left << "Number of Observations" << "= " << N << "\n";
+ cout << setw(55) << left << "Sample Standard Deviation" << "= " << Sd << "\n";
+ cout << setw(55) << left << "Expected True Standard Deviation" << "= " << D << "\n\n";
+
+The test statistic (T) is simply the ratio of the sample and "true" standard
+deviations squared, multiplied by the number of degrees of freedom (the
+sample size less one):
+
+ double t_stat = (N - 1) * (Sd / D) * (Sd / D);
+ cout << setw(55) << left << "Test Statistic" << "= " << t_stat << "\n";
+
+The distribution we need to use, is a Chi Squared distribution with N-1
+degrees of freedom:
+
+ chi_squared dist(N - 1);
+
+The various hypothesis that can be tested are summarised in the following table:
+
+[table
+[[Hypothesis][Test]]
+[[The null-hypothesis: there is no difference in standard deviation from the specified value]
+ [ Reject if T < [chi][super 2][sub (1-alpha/2; N-1)] or T > [chi][super 2][sub (alpha/2; N-1)] ]]
+[[The alternative hypothesis: there is a difference in standard deviation from the specified value]
+ [ Reject if [chi][super 2][sub (1-alpha/2; N-1)] >= T >= [chi][super 2][sub (alpha/2; N-1)] ]]
+[[The alternative hypothesis: the standard deviation is less than the specified value]
+ [ Reject if [chi][super 2][sub (1-alpha; N-1)] <= T ]]
+[[The alternative hypothesis: the standard deviation is greater than the specified value]
+ [ Reject if [chi][super 2][sub (alpha; N-1)] >= T ]]
+]
+
+Where [chi][super 2][sub (alpha; N-1)] is the upper critical value of the
+Chi Squared distribution, and [chi][super 2][sub (1-alpha; N-1)] is the
+lower critical value.
+
+Recall that the lower critical value is the same
+as the quantile, and the upper critical value is the same as the quantile
+from the complement of the probability, that gives us the following code
+to calculate the critical values:
+
+ double ucv = quantile(complement(dist, alpha));
+ double ucv2 = quantile(complement(dist, alpha / 2));
+ double lcv = quantile(dist, alpha);
+ double lcv2 = quantile(dist, alpha / 2);
+ cout << setw(55) << left << "Upper Critical Value at alpha: " << "= "
+ << setprecision(3) << scientific << ucv << "\n";
+ cout << setw(55) << left << "Upper Critical Value at alpha/2: " << "= "
+ << setprecision(3) << scientific << ucv2 << "\n";
+ cout << setw(55) << left << "Lower Critical Value at alpha: " << "= "
+ << setprecision(3) << scientific << lcv << "\n";
+ cout << setw(55) << left << "Lower Critical Value at alpha/2: " << "= "
+ << setprecision(3) << scientific << lcv2 << "\n\n";
+
+Now that we have the critical values, we can compare these to our test
+statistic, and print out the result of each hypothesis and test:
+
+ cout << setw(55) << left <<
+ "Results for Alternative Hypothesis and alpha" << "= "
+ << setprecision(4) << fixed << alpha << "\n\n";
+ cout << "Alternative Hypothesis Conclusion\n";
+
+ cout << "Standard Deviation != " << setprecision(3) << fixed << D << " ";
+ if((ucv2 < t_stat) || (lcv2 > t_stat))
+ cout << "ACCEPTED\n";
+ else
+ cout << "REJECTED\n";
+
+ cout << "Standard Deviation < " << setprecision(3) << fixed << D << " ";
+ if(lcv > t_stat)
+ cout << "ACCEPTED\n";
+ else
+ cout << "REJECTED\n";
+
+ cout << "Standard Deviation > " << setprecision(3) << fixed << D << " ";
+ if(ucv < t_stat)
+ cout << "ACCEPTED\n";
+ else
+ cout << "REJECTED\n";
+ cout << endl << endl;
+
+To see some example output we'll use the
+[@http://www.itl.nist.gov/div898/handbook/eda/section3/eda3581.htm
+gear data] from the __handbook.
+The data represents measurements of gear diameter from a manufacturing
+process. The program output is deliberately designed to mirror
+the DATAPLOT output shown in the
+[@http://www.itl.nist.gov/div898/handbook/eda/section3/eda358.htm
+NIST Handbook Example].
+
+[pre'''
+______________________________________________
+Chi Squared test for sample standard deviation
+______________________________________________
+
+Number of Observations = 100
+Sample Standard Deviation = 0.00628
+Expected True Standard Deviation = 0.10000
+
+Test Statistic = 0.39030
+CDF of test statistic: = 1.438e-099
+Upper Critical Value at alpha: = 1.232e+002
+Upper Critical Value at alpha/2: = 1.284e+002
+Lower Critical Value at alpha: = 7.705e+001
+Lower Critical Value at alpha/2: = 7.336e+001
+
+Results for Alternative Hypothesis and alpha = 0.0500
+
+Alternative Hypothesis Conclusion'''
+Standard Deviation != 0.100 ACCEPTED
+Standard Deviation < 0.100 ACCEPTED
+Standard Deviation > 0.100 REJECTED
+]
+
+In this case we are testing whether the sample standard deviation is 0.1,
+and the null-hypothesis is rejected, so we conclude that the standard
+deviation ['is not] 0.1.
+
+For an alternative example, consider the
+[@http://www.itl.nist.gov/div898/handbook/prc/section2/prc23.htm
+silicon wafer data] again from the __handbook.
+In this scenario a supplier of 100 ohm.cm silicon wafers claims
+that his fabrication process can produce wafers with sufficient
+consistency so that the standard deviation of resistivity for
+the lot does not exceed 10 ohm.cm. A sample of N = 10 wafers taken
+from the lot has a standard deviation of 13.97 ohm.cm, and the question
+we ask ourselves is "Is the suppliers claim correct?".
+
+The program output now looks like this:
+
+[pre'''
+______________________________________________
+Chi Squared test for sample standard deviation
+______________________________________________
+
+Number of Observations = 10
+Sample Standard Deviation = 13.97000
+Expected True Standard Deviation = 10.00000
+
+Test Statistic = 17.56448
+CDF of test statistic: = 9.594e-001
+Upper Critical Value at alpha: = 1.692e+001
+Upper Critical Value at alpha/2: = 1.902e+001
+Lower Critical Value at alpha: = 3.325e+000
+Lower Critical Value at alpha/2: = 2.700e+000
+
+Results for Alternative Hypothesis and alpha = 0.0500
+
+Alternative Hypothesis Conclusion'''
+Standard Deviation != 10.000 REJECTED
+Standard Deviation < 10.000 REJECTED
+Standard Deviation > 10.000 ACCEPTED
+]
+
+In this case, our null-hypothesis is that the standard deviation of
+the sample is less than 10: this hypothesis is rejected in the analysis
+above, and so we reject the manufacturers claim.
+
+[endsect] [/section:chi_sq_test Chi-Square Test for the Standard Deviation]
+
+[section:chi_sq_size Estimating the Required Sample Sizes for a Chi-Square Test for the Standard Deviation]
+
+Suppose we conduct a Chi Squared test for standard deviation and the result
+is borderline, a legitimate question to ask is "How large would the sample size
+have to be in order to produce a definitive result?"
+
+The class template [link math_toolkit.dist_ref.dists.chi_squared_dist
+chi_squared_distribution] has a static method
+`find_degrees_of_freedom` that will calculate this value for
+some acceptable risk of type I failure /alpha/, type II failure
+/beta/, and difference from the standard deviation /diff/. Please
+note that the method used works on variance, and not standard deviation
+as is usual for the Chi Squared Test.
+
+The code for this example is located in
+[@../../example/chi_square_std_dev_test.cpp chi_square_std_dev_test.cpp].
+
+We begin by defining a procedure to print out the sample sizes required
+for various risk levels:
+
+ void chi_squared_sample_sized(
+ double diff, // difference from variance to detect
+ double variance) // true variance
+ {
+
+The procedure begins by printing out the input data:
+
+ using namespace std;
+ using namespace boost::math;
+
+ // Print out general info:
+ cout <<
+ "_____________________________________________________________\n"
+ "Estimated sample sizes required for various confidence levels\n"
+ "_____________________________________________________________\n\n";
+ cout << setprecision(5);
+ cout << setw(40) << left << "True Variance" << "= " << variance << "\n";
+ cout << setw(40) << left << "Difference to detect" << "= " << diff << "\n";
+
+And defines a table of significance levels for which we'll calculate sample sizes:
+
+ double alpha[] = { 0.5, 0.25, 0.1, 0.05, 0.01, 0.001, 0.0001, 0.00001 };
+
+For each value of alpha we can calculate two sample sizes: one where the
+sample variance is less than the true value by /diff/ and one
+where it is greater than the true value by /diff/. Thanks to the
+asymmetric nature of the Chi Squared distribution these two values will
+not be the same, the difference in their calculation differs only in the
+sign of /diff/ that's passed to `find_degrees_of_freedom`. Finally
+in this example we'll simply things, and let risk level /beta/ be the
+same as /alpha/:
+
+ cout << "\n\n"
+ "_______________________________________________________________\n"
+ "Confidence Estimated Estimated\n"
+ " Value (%) Sample Size Sample Size\n"
+ " (lower one (upper one\n"
+ " sided test) sided test)\n"
+ "_______________________________________________________________\n";
+ //
+ // Now print out the data for the table rows.
+ //
+ for(unsigned i = 0; i < sizeof(alpha)/sizeof(alpha[0]); ++i)
+ {
+ // Confidence value:
+ cout << fixed << setprecision(3) << setw(10) << right << 100 * (1-alpha[i]);
+ // calculate df for a lower single sided test:
+ double df = chi_squared::find_degrees_of_freedom(
+ -diff, alpha[i], alpha[i], variance);
+ // convert to sample size:
+ double size = ceil(df) + 1;
+ // Print size:
+ cout << fixed << setprecision(0) << setw(16) << right << size;
+ // calculate df for an upper single sided test:
+ df = chi_squared::find_degrees_of_freedom(
+ diff, alpha[i], alpha[i], variance);
+ // convert to sample size:
+ size = ceil(df) + 1;
+ // Print size:
+ cout << fixed << setprecision(0) << setw(16) << right << size << endl;
+ }
+ cout << endl;
+
+For some example output, consider the
+[@http://www.itl.nist.gov/div898/handbook/prc/section2/prc23.htm
+silicon wafer data] from the __handbook.
+In this scenario a supplier of 100 ohm.cm silicon wafers claims
+that his fabrication process can produce wafers with sufficient
+consistency so that the standard deviation of resistivity for
+the lot does not exceed 10 ohm.cm. A sample of N = 10 wafers taken
+from the lot has a standard deviation of 13.97 ohm.cm, and the question
+we ask ourselves is "How large would our sample have to be to reliably
+detect this difference?".
+
+To use our procedure above, we have to convert the
+standard deviations to variance (square them),
+after which the program output looks like this:
+
+[pre'''
+_____________________________________________________________
+Estimated sample sizes required for various confidence levels
+_____________________________________________________________
+
+True Variance = 100.00000
+Difference to detect = 95.16090
+
+
+_______________________________________________________________
+Confidence Estimated Estimated
+ Value (%) Sample Size Sample Size
+ (lower one (upper one
+ sided test) sided test)
+_______________________________________________________________
+ 50.000 2 2
+ 75.000 2 10
+ 90.000 4 32
+ 95.000 5 51
+ 99.000 7 99
+ 99.900 11 174
+ 99.990 15 251
+ 99.999 20 330'''
+]
+
+In this case we are interested in a upper single sided test.
+So for example, if the maximum acceptable risk of falsely rejecting
+the null-hypothesis is 0.05 (Type I error), and the maximum acceptable
+risk of failing to reject the null-hypothesis is also 0.05
+(Type II error), we estimate that we would need a sample size of 51.
+
+[endsect] [/section:chi_sq_size Estimating the Required Sample Sizes for a Chi-Square Test for the Standard Deviation]
+
+[endsect] [/section:cs_eg Chi Squared Distribution]
+
+[/
+ Copyright 2006, 2013 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distributions/dist_algorithms.qbk b/doc/distributions/dist_algorithms.qbk
new file mode 100644
index 0000000..a29daee
--- /dev/null
+++ b/doc/distributions/dist_algorithms.qbk
@@ -0,0 +1,78 @@
+[section:dist_algorithms Distribution Algorithms]
+
+[h4 Finding the Location and Scale for Normal and similar distributions]
+
+Two functions aid finding location and scale of random variable z
+to give probability p (given a scale or location).
+Only applies to distributions like normal, lognormal, extreme value, Cauchy, (and symmetrical triangular),
+that have scale and location properties.
+
+These functions are useful to predict the mean and/or standard deviation that will be needed to meet a specified minimum weight or maximum dose.
+
+Complement versions are also provided, both with explicit and implicit (default) policy.
+
+ using boost::math::policies::policy; // May be needed by users defining their own policies.
+ using boost::math::complement; // Will be needed by users who want to use complements.
+
+[h4 find_location function]
+
+``#include <boost/math/distributions/find_location.hpp>``
+
+ namespace boost{ namespace math{
+
+ template <class Dist, class ``__Policy``> // explicit error handling policy
+ typename Dist::value_type find_location( // For example, normal mean.
+ typename Dist::value_type z, // location of random variable z to give probability, P(X > z) == p.
+ // For example, a nominal minimum acceptable z, so that p * 100 % are > z
+ typename Dist::value_type p, // probability value desired at x, say 0.95 for 95% > z.
+ typename Dist::value_type scale, // scale parameter, for example, normal standard deviation.
+ const ``__Policy``& pol);
+
+ template <class Dist> // with default policy.
+ typename Dist::value_type find_location( // For example, normal mean.
+ typename Dist::value_type z, // location of random variable z to give probability, P(X > z) == p.
+ // For example, a nominal minimum acceptable z, so that p * 100 % are > z
+ typename Dist::value_type p, // probability value desired at x, say 0.95 for 95% > z.
+ typename Dist::value_type scale); // scale parameter, for example, normal standard deviation.
+
+ }} // namespaces
+
+[h4 find_scale function]
+
+``#include <boost/math/distributions/find_scale.hpp>``
+
+ namespace boost{ namespace math{
+
+ template <class Dist, class ``__Policy``>
+ typename Dist::value_type find_scale( // For example, normal mean.
+ typename Dist::value_type z, // location of random variable z to give probability, P(X > z) == p.
+ // For example, a nominal minimum acceptable weight z, so that p * 100 % are > z
+ typename Dist::value_type p, // probability value desired at x, say 0.95 for 95% > z.
+ typename Dist::value_type location, // location parameter, for example, normal distribution mean.
+ const ``__Policy``& pol);
+
+ template <class Dist> // with default policy.
+ typename Dist::value_type find_scale( // For example, normal mean.
+ typename Dist::value_type z, // location of random variable z to give probability, P(X > z) == p.
+ // For example, a nominal minimum acceptable z, so that p * 100 % are > z
+ typename Dist::value_type p, // probability value desired at x, say 0.95 for 95% > z.
+ typename Dist::value_type location) // location parameter, for example, normal distribution mean.
+ }} // namespaces
+
+All argument must be finite, otherwise __domain_error is called.
+
+Probability arguments must be [0, 1], otherwise __domain_error is called.
+
+If the choice of arguments would give a negative scale, __domain_error is called, unless the policy is to ignore, when the negative (impossible) value of scale is returned.
+
+[link math_toolkit.stat_tut.weg.find_eg Find Mean and standard deviation examples]
+gives simple examples of use of both find_scale and find_location, and a longer example finding means and standard deviations of normally distributed weights to meet a specification.
+
+[endsect] [/section:dist_algorithms dist_algorithms]
+
+[/ dist_algorithms.qbk
+ Copyright 2007 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
diff --git a/doc/distributions/dist_reference.qbk b/doc/distributions/dist_reference.qbk
new file mode 100644
index 0000000..3d5d82f
--- /dev/null
+++ b/doc/distributions/dist_reference.qbk
@@ -0,0 +1,147 @@
+[section:dist_ref Statistical Distributions Reference]
+
+[include non_members.qbk]
+
+[section:dists Distributions]
+
+[include arcsine.qbk]
+[include bernoulli.qbk]
+[include beta.qbk]
+[include binomial.qbk]
+[include cauchy.qbk]
+[include chi_squared.qbk]
+[include exponential.qbk]
+[include extreme_value.qbk]
+[include fisher.qbk]
+[include gamma.qbk]
+[include geometric.qbk]
+[include hyperexponential.qbk]
+[include hypergeometric.qbk]
+[include inverse_chi_squared.qbk]
+[include inverse_gamma.qbk]
+[include inverse_gaussian.qbk]
+[include laplace.qbk]
+[include logistic.qbk]
+[include lognormal.qbk]
+[include negative_binomial.qbk]
+[include nc_beta.qbk]
+[include nc_chi_squared.qbk]
+[include nc_f.qbk]
+[include nc_t.qbk]
+[include normal.qbk]
+[include pareto.qbk]
+[include poisson.qbk]
+[include rayleigh.qbk]
+[include skew_normal.qbk]
+[include students_t.qbk]
+[include triangular.qbk]
+[include uniform.qbk]
+[include weibull.qbk]
+
+[endsect] [/section:dists Distributions]
+
+[include dist_algorithms.qbk]
+
+[endsect] [/section:dist_ref Statistical Distributions and Functions Reference]
+
+
+[section:future Extras/Future Directions]
+
+[h4 Adding Additional Location and Scale Parameters]
+
+In some modelling applications we require a distribution
+with a specific location and scale:
+often this equates to a specific mean and standard deviation, although for many
+distributions the relationship between these properties and the location and
+scale parameters are non-trivial. See
+[@http://www.itl.nist.gov/div898/handbook/eda/section3/eda364.htm http://www.itl.nist.gov/div898/handbook/eda/section3/eda364.htm]
+for more information.
+
+The obvious way to handle this is via an adapter template:
+
+ template <class Dist>
+ class scaled_distribution
+ {
+ scaled_distribution(
+ const Dist dist,
+ typename Dist::value_type location,
+ typename Dist::value_type scale = 0);
+ };
+
+Which would then have its own set of overloads for the non-member accessor functions.
+
+[h4 An "any_distribution" class]
+
+It is easy to add a distribution object that virtualises
+the actual type of the distribution, and can therefore hold "any" object
+that conforms to the conceptual requirements of a distribution:
+
+ template <class RealType>
+ class any_distribution
+ {
+ public:
+ template <class Distribution>
+ any_distribution(const Distribution& d);
+ };
+
+ // Get the cdf of the underlying distribution:
+ template <class RealType>
+ RealType cdf(const any_distribution<RealType>& d, RealType x);
+ // etc....
+
+Such a class would facilitate the writing of non-template code that can
+function with any distribution type.
+
+The [@http://sourceforge.net/projects/distexplorer/ Statistical Distribution Explorer]
+utility for Windows is a usage example.
+
+It's not clear yet whether there is a compelling use case though.
+Possibly tests for goodness of fit might
+provide such a use case: this needs more investigation.
+
+[h4 Higher Level Hypothesis Tests]
+
+Higher-level tests roughly corresponding to the
+[@http://documents.wolfram.com/mathematica/Add-onsLinks/StandardPackages/Statistics/HypothesisTests.html Mathematica Hypothesis Tests]
+package could be added reasonably easily, for example:
+
+ template <class InputIterator>
+ typename std::iterator_traits<InputIterator>::value_type
+ test_equal_mean(
+ InputIterator a,
+ InputIterator b,
+ typename std::iterator_traits<InputIterator>::value_type expected_mean);
+
+Returns the probability that the data in the sequence \[a,b) has the mean
+/expected_mean/.
+
+[h4 Integration With Statistical Accumulators]
+
+[@http://boost-sandbox.sourceforge.net/libs/accumulators/doc/html/index.html
+Eric Niebler's accumulator framework] - also work in progress - provides the means
+to calculate various statistical properties from experimental data. There is an
+opportunity to integrate the statistical tests with this framework at some later date:
+
+ // Define an accumulator, all required statistics to calculate the test
+ // are calculated automatically:
+ accumulator_set<double, features<tag::test_expected_mean> > acc(expected_mean=4);
+ // Pass our data to the accumulator:
+ acc = std::for_each(mydata.begin(), mydata.end(), acc);
+ // Extract the result:
+ double p = probability(acc);
+
+[endsect] [/section:future Extras Future Directions]
+
+[/ dist_reference.qbk
+ Copyright 2006, 2010 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
+
+
+
+
+
+
diff --git a/doc/distributions/dist_tutorial.qbk b/doc/distributions/dist_tutorial.qbk
new file mode 100644
index 0000000..0d0e0df
--- /dev/null
+++ b/doc/distributions/dist_tutorial.qbk
@@ -0,0 +1,459 @@
+[/ def names all end in distrib to avoid clashes with names of functions]
+
+[def __binomial_distrib [link math_toolkit.dist_ref.dists.binomial_dist Binomial Distribution]]
+[def __chi_squared_distrib [link math_toolkit.dist_ref.dists.chi_squared_dist Chi Squared Distribution]]
+[def __normal_distrib [link math_toolkit.dist_ref.dists.normal_dist Normal Distribution]]
+[def __F_distrib [link math_toolkit.dist_ref.dists.f_dist Fisher F Distribution]]
+[def __students_t_distrib [link math_toolkit.dist_ref.dists.students_t_dist Students t Distribution]]
+
+[def __handbook [@http://www.itl.nist.gov/div898/handbook/
+NIST/SEMATECH e-Handbook of Statistical Methods.]]
+
+[section:stat_tut Statistical Distributions Tutorial]
+This library is centred around statistical distributions, this tutorial
+will give you an overview of what they are, how they can be used, and
+provides a few worked examples of applying the library to statistical tests.
+
+[section:overview Overview of Distributions]
+
+[section:headers Headers and Namespaces]
+
+All the code in this library is inside namespace boost::math.
+
+In order to use a distribution /my_distribution/ you will need to include
+either the header <boost/math/my_distribution.hpp> or
+the "include all the distributions" header: <boost/math/distributions.hpp>.
+
+For example, to use the Students-t distribution include either
+<boost/math/students_t.hpp> or
+<boost/math/distributions.hpp>
+
+You also need to bring distribution names into scope,
+perhaps with a `using namespace boost::math;` declaration,
+
+or specific `using` declarations like `using boost::math::normal;` (*recommended*).
+
+[caution Some math function names are also used in namespace std so including <random> could cause ambiguity!]
+
+[endsect] [/ section:headers Headers and Namespaces]
+
+[section:objects Distributions are Objects]
+
+Each kind of distribution in this library is a class type - an object.
+
+[link policy Policies] provide fine-grained control
+of the behaviour of these classes, allowing the user to customise
+behaviour such as how errors are handled, or how the quantiles
+of discrete distribtions behave.
+
+[tip If you are familiar with statistics libraries using functions,
+and 'Distributions as Objects' seem alien, see
+[link math_toolkit.stat_tut.weg.nag_library the comparison to
+other statistics libraries.]
+] [/tip]
+
+Making distributions class types does two things:
+
+* It encapsulates the kind of distribution in the C++ type system;
+so, for example, Students-t distributions are always a different C++ type from
+Chi-Squared distributions.
+* The distribution objects store any parameters associated with the
+distribution: for example, the Students-t distribution has a
+['degrees of freedom] parameter that controls the shape of the distribution.
+This ['degrees of freedom] parameter has to be provided
+to the Students-t object when it is constructed.
+
+Although the distribution classes in this library are templates, there
+are typedefs on type /double/ that mostly take the usual name of the
+distribution
+(except where there is a clash with a function of the same name: beta and gamma,
+in which case using the default template arguments - `RealType = double` -
+is nearly as convenient).
+Probably 95% of uses are covered by these typedefs:
+
+ // using namespace boost::math; // Avoid potential ambiguity with names in std <random>
+ // Safer to declare specific functions with using statement(s):
+
+ using boost::math::beta_distribution;
+ using boost::math::binomial_distribution;
+ using boost::math::students_t;
+
+ // Construct a students_t distribution with 4 degrees of freedom:
+ students_t d1(4);
+
+ // Construct a double-precision beta distribution
+ // with parameters a = 10, b = 20
+ beta_distribution<> d2(10, 20); // Note: _distribution<> suffix !
+
+If you need to use the distributions with a type other than `double`,
+then you can instantiate the template directly: the names of the
+templates are the same as the `double` typedef but with `_distribution`
+appended, for example: __students_t_distrib or __binomial_distrib:
+
+ // Construct a students_t distribution, of float type,
+ // with 4 degrees of freedom:
+ students_t_distribution<float> d3(4);
+
+ // Construct a binomial distribution, of long double type,
+ // with probability of success 0.3
+ // and 20 trials in total:
+ binomial_distribution<long double> d4(20, 0.3);
+
+The parameters passed to the distributions can be accessed via getter member
+functions:
+
+ d1.degrees_of_freedom(); // returns 4.0
+
+This is all well and good, but not very useful so far. What we often want
+is to be able to calculate the /cumulative distribution functions/ and
+/quantiles/ etc for these distributions.
+
+[endsect] [/section:objects Distributions are Objects]
+
+
+[section:generic Generic operations common to all distributions are non-member functions]
+
+Want to calculate the PDF (Probability Density Function) of a distribution?
+No problem, just use:
+
+ pdf(my_dist, x); // Returns PDF (density) at point x of distribution my_dist.
+
+Or how about the CDF (Cumulative Distribution Function):
+
+ cdf(my_dist, x); // Returns CDF (integral from -infinity to point x)
+ // of distribution my_dist.
+
+And quantiles are just the same:
+
+ quantile(my_dist, p); // Returns the value of the random variable x
+ // such that cdf(my_dist, x) == p.
+
+If you're wondering why these aren't member functions, it's to
+make the library more easily extensible: if you want to add additional
+generic operations - let's say the /n'th moment/ - then all you have to
+do is add the appropriate non-member functions, overloaded for each
+implemented distribution type.
+
+[tip
+
+[*Random numbers that approximate Quantiles of Distributions]
+
+If you want random numbers that are distributed in a specific way,
+for example in a uniform, normal or triangular,
+see [@http://www.boost.org/libs/random/ Boost.Random].
+
+Whilst in principal there's nothing to prevent you from using the
+quantile function to convert a uniformly distributed random
+number to another distribution, in practice there are much more
+efficient algorithms available that are specific to random number generation.
+] [/tip Random numbers that approximate Quantiles of Distributions]
+
+For example, the binomial distribution has two parameters:
+n (the number of trials) and p (the probability of success on any one trial).
+
+The `binomial_distribution` constructor therefore has two parameters:
+
+`binomial_distribution(RealType n, RealType p);`
+
+For this distribution the __random_variate is k: the number of successes observed.
+The probability density\/mass function (pdf) is therefore written as ['f(k; n, p)].
+
+[note
+
+[*Random Variates and Distribution Parameters]
+
+The concept of a __random_variable is closely linked to the term __random_variate:
+a random variate is a particular value (outcome) of a random variable.
+and [@http://en.wikipedia.org/wiki/Parameter distribution parameters]
+are conventionally distinguished (for example in Wikipedia and Wolfram MathWorld)
+by placing a semi-colon or vertical bar)
+/after/ the __random_variable (whose value you 'choose'),
+to separate the variate from the parameter(s) that defines the shape of the distribution.[br]
+For example, the binomial distribution probability distribution function (PDF) is written as
+['f(k| n, p)] = Pr(K = k|n, p) = probability of observing k successes out of n trials.
+K is the __random_variable, k is the __random_variate,
+the parameters are n (trials) and p (probability).
+] [/tip Random Variates and Distribution Parameters]
+
+[note By convention, __random_variate are lower case, usually k is integral, x if real, and
+__random_variable are upper case, K if integral, X if real. But this implementation treats
+all as floating point values `RealType`, so if you really want an integral result,
+you must round: see note on Discrete Probability Distributions below for details.]
+
+As noted above the non-member function `pdf` has one parameter for the distribution object,
+and a second for the random variate. So taking our binomial distribution
+example, we would write:
+
+`pdf(binomial_distribution<RealType>(n, p), k);`
+
+The ranges of __random_variate values that are permitted and are supported can be
+tested by using two functions `range` and `support`.
+
+The distribution (effectively the __random_variate) is said to be 'supported'
+over a range that is
+[@http://en.wikipedia.org/wiki/Probability_distribution
+ "the smallest closed set whose complement has probability zero"].
+MathWorld uses the word 'defined' for this range.
+Non-mathematicians might say it means the 'interesting' smallest range
+of random variate x that has the cdf going from zero to unity.
+Outside are uninteresting zones where the pdf is zero, and the cdf zero or unity.
+
+For most distributions, with probability distribution functions one might describe
+as 'well-behaved', we have decided that it is most useful for the supported range
+to *exclude* random variate values like exact zero *if the end point is discontinuous*.
+For example, the Weibull (scale 1, shape 1) distribution smoothly heads for unity
+as the random variate x declines towards zero.
+But at x = zero, the value of the pdf is suddenly exactly zero, by definition.
+If you are plotting the PDF, or otherwise calculating,
+zero is not the most useful value for the lower limit of supported, as we discovered.
+So for this, and similar distributions,
+we have decided it is most numerically useful to use
+the closest value to zero, min_value, for the limit of the supported range.
+(The `range` remains from zero, so you will still get `pdf(weibull, 0) == 0`).
+(Exponential and gamma distributions have similarly discontinuous functions).
+
+Mathematically, the functions may make sense with an (+ or -) infinite value,
+but except for a few special cases (in the Normal and Cauchy distributions)
+this implementation limits random variates to finite values from the `max`
+to `min` for the `RealType`.
+(See [link math_toolkit.sf_implementation.handling_of_floating_point_infin
+Handling of Floating-Point Infinity] for rationale).
+
+
+[note
+
+[*Discrete Probability Distributions]
+
+Note that the [@http://en.wikipedia.org/wiki/Discrete_probability_distribution
+discrete distributions], including the binomial, negative binomial, Poisson & Bernoulli,
+are all mathematically defined as discrete functions:
+that is to say the functions `cdf` and `pdf` are only defined for integral values
+of the random variate.
+
+However, because the method of calculation often uses continuous functions
+it is convenient to treat them as if they were continuous functions,
+and permit non-integral values of their parameters.
+
+Users wanting to enforce a strict mathematical model may use `floor`
+or `ceil` functions on the random variate prior to calling the distribution
+function.
+
+The quantile functions for these distributions are hard to specify
+in a manner that will satisfy everyone all of the time. The default
+behaviour is to return an integer result, that has been rounded
+/outwards/: that is to say, lower quantiles - where the probablity
+is less than 0.5 are rounded down, while upper quantiles - where
+the probability is greater than 0.5 - are rounded up. This behaviour
+ensures that if an X% quantile is requested, then /at least/ the requested
+coverage will be present in the central region, and /no more than/
+the requested coverage will be present in the tails.
+
+This behaviour can be changed so that the quantile functions are rounded
+differently, or return a real-valued result using
+[link math_toolkit.pol_overview Policies]. It is strongly
+recommended that you read the tutorial
+[link math_toolkit.pol_tutorial.understand_dis_quant
+Understanding Quantiles of Discrete Distributions] before
+using the quantile function on a discrete distribtion. The
+[link math_toolkit.pol_ref.discrete_quant_ref reference docs]
+describe how to change the rounding policy
+for these distributions.
+
+For similar reasons continuous distributions with parameters like
+"degrees of freedom"
+that might appear to be integral, are treated as real values
+(and are promoted from integer to floating-point if necessary).
+In this case however, there are a small number of situations where non-integral
+degrees of freedom do have a genuine meaning.
+]
+
+[endsect] [/ section:generic Generic operations common to all distributions are non-member functions]
+
+[section:complements Complements are supported too - and when to use them]
+
+Often you don't want the value of the CDF, but its complement, which is
+to say `1-p` rather than `p`. It is tempting to calculate the CDF and subtract
+it from `1`, but if `p` is very close to `1` then cancellation error
+will cause you to lose accuracy, perhaps totally.
+
+[link why_complements See below ['"Why and when to use complements?"]]
+
+In this library, whenever you want to receive a complement, just wrap
+all the function arguments in a call to `complement(...)`, for example:
+
+ students_t dist(5);
+ cout << "CDF at t = 1 is " << cdf(dist, 1.0) << endl;
+ cout << "Complement of CDF at t = 1 is " << cdf(complement(dist, 1.0)) << endl;
+
+But wait, now that we have a complement, we have to be able to use it as well.
+Any function that accepts a probability as an argument can also accept a complement
+by wrapping all of its arguments in a call to `complement(...)`, for example:
+
+ students_t dist(5);
+
+ for(double i = 10; i < 1e10; i *= 10)
+ {
+ // Calculate the quantile for a 1 in i chance:
+ double t = quantile(complement(dist, 1/i));
+ // Print it out:
+ cout << "Quantile of students-t with 5 degrees of freedom\n"
+ "for a 1 in " << i << " chance is " << t << endl;
+ }
+
+[tip
+
+[*Critical values are just quantiles]
+
+Some texts talk about quantiles, or percentiles or fractiles,
+others about critical values, the basic rule is:
+
+['Lower critical values] are the same as the quantile.
+
+['Upper critical values] are the same as the quantile from the complement
+of the probability.
+
+For example, suppose we have a Bernoulli process, giving rise to a binomial
+distribution with success ratio 0.1 and 100 trials in total. The
+['lower critical value] for a probability of 0.05 is given by:
+
+`quantile(binomial(100, 0.1), 0.05)`
+
+and the ['upper critical value] is given by:
+
+`quantile(complement(binomial(100, 0.1), 0.05))`
+
+which return 4.82 and 14.63 respectively.
+]
+
+[#why_complements]
+[tip
+
+[*Why bother with complements anyway?]
+
+It's very tempting to dispense with complements, and simply subtract
+the probability from 1 when required. However, consider what happens when
+the probability is very close to 1: let's say the probability expressed at
+float precision is `0.999999940f`, then `1 - 0.999999940f = 5.96046448e-008`,
+but the result is actually accurate to just ['one single bit]: the only
+bit that didn't cancel out!
+
+Or to look at this another way: consider that we want the risk of falsely
+rejecting the null-hypothesis in the Student's t test to be 1 in 1 billion,
+for a sample size of 10,000.
+This gives a probability of 1 - 10[super -9], which is exactly 1 when
+calculated at float precision. In this case calculating the quantile from
+the complement neatly solves the problem, so for example:
+
+`quantile(complement(students_t(10000), 1e-9))`
+
+returns the expected t-statistic `6.00336`, where as:
+
+`quantile(students_t(10000), 1-1e-9f)`
+
+raises an overflow error, since it is the same as:
+
+`quantile(students_t(10000), 1)`
+
+Which has no finite result.
+
+With all distributions, even for more reasonable probability
+(unless the value of p can be represented exactly in the floating-point type)
+the loss of accuracy quickly becomes significant if you simply calculate probability from 1 - p
+(because it will be mostly garbage digits for p ~ 1).
+
+So always avoid, for example, using a probability near to unity like 0.99999
+
+`quantile(my_distribution, 0.99999)`
+
+and instead use
+
+`quantile(complement(my_distribution, 0.00001))`
+
+since 1 - 0.99999 is not exactly equal to 0.00001 when using floating-point arithmetic.
+
+This assumes that the 0.00001 value is either a constant,
+or can be computed by some manner other than subtracting 0.99999 from 1.
+
+] [/ tip *Why bother with complements anyway?]
+
+[endsect] [/ section:complements Complements are supported too - and why]
+
+[section:parameters Parameters can be calculated]
+
+Sometimes it's the parameters that define the distribution that you
+need to find. Suppose, for example, you have conducted a Students-t test
+for equal means and the result is borderline. Maybe your two samples
+differ from each other, or maybe they don't; based on the result
+of the test you can't be sure. A legitimate question to ask then is
+"How many more measurements would I have to take before I would get
+an X% probability that the difference is real?" Parameter finders
+can answer questions like this, and are necessarily different for
+each distribution. They are implemented as static member functions
+of the distributions, for example:
+
+ students_t::find_degrees_of_freedom(
+ 1.3, // difference from true mean to detect
+ 0.05, // maximum risk of falsely rejecting the null-hypothesis.
+ 0.1, // maximum risk of falsely failing to reject the null-hypothesis.
+ 0.13); // sample standard deviation
+
+Returns the number of degrees of freedom required to obtain a 95%
+probability that the observed differences in means is not down to
+chance alone. In the case that a borderline Students-t test result
+was previously obtained, this can be used to estimate how large the sample size
+would have to become before the observed difference was considered
+significant. It assumes, of course, that the sample mean and standard
+deviation are invariant with sample size.
+
+[endsect] [/ section:parameters Parameters can be calculated]
+
+[section:summary Summary]
+
+* Distributions are objects, which are constructed from whatever
+parameters the distribution may have.
+* Member functions allow you to retrieve the parameters of a distribution.
+* Generic non-member functions provide access to the properties that
+are common to all the distributions (PDF, CDF, quantile etc).
+* Complements of probabilities are calculated by wrapping the function's
+arguments in a call to `complement(...)`.
+* Functions that accept a probability can accept a complement of the
+probability as well, by wrapping the function's
+arguments in a call to `complement(...)`.
+* Static member functions allow the parameters of a distribution
+to be found from other information.
+
+Now that you have the basics, the next section looks at some worked examples.
+
+[endsect] [/section:summary Summary]
+[endsect] [/section:overview Overview]
+
+[section:weg Worked Examples]
+[include distribution_construction.qbk]
+[include students_t_examples.qbk]
+[include chi_squared_examples.qbk]
+[include f_dist_example.qbk]
+[include binomial_example.qbk]
+[include geometric_example.qbk]
+[include negative_binomial_example.qbk]
+[include normal_example.qbk]
+[/include inverse_gamma_example.qbk]
+[/include inverse_gaussian_example.qbk]
+[include inverse_chi_squared_eg.qbk]
+[include nc_chi_squared_example.qbk]
+[include error_handling_example.qbk]
+[include find_location_and_scale.qbk]
+[include nag_library.qbk]
+[include c_sharp.qbk]
+[endsect] [/section:weg Worked Examples]
+
+[include background.qbk]
+
+[endsect] [/ section:stat_tut Statistical Distributions Tutorial]
+
+[/ dist_tutorial.qbk
+ Copyright 2006, 2010, 2011 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distributions/distribution_construction.qbk b/doc/distributions/distribution_construction.qbk
new file mode 100644
index 0000000..169af6b
--- /dev/null
+++ b/doc/distributions/distribution_construction.qbk
@@ -0,0 +1,17 @@
+[section:dist_construct_eg Distribution Construction Examples]
+
+[import ../../example/distribution_construction.cpp]
+[distribution_construction_1]
+[distribution_construction_2]
+
+See [@../../example/distribution_construction.cpp distribution_construction.cpp] for full source code.
+
+[endsect] [/section:dist_construct_eg Distribution Construction Examples]
+
+[/
+ Copyright 2006, 2012 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distributions/error_handling_example.qbk b/doc/distributions/error_handling_example.qbk
new file mode 100644
index 0000000..0a68607
--- /dev/null
+++ b/doc/distributions/error_handling_example.qbk
@@ -0,0 +1,35 @@
+[section:error_eg Error Handling Example]
+
+See [link math_toolkit.error_handling error handling documentation]
+for a detailed explanation of the mechanism of handling errors,
+including the common "bad" arguments to distributions and functions,
+and how to use __policy_section to control it.
+
+But, by default, *exceptions will be raised*, for domain errors,
+pole errors, numeric overflow, and internal evaluation errors.
+To avoid the exceptions from getting thrown and instead get
+an appropriate value returned, usually a NaN (domain errors
+pole errors or internal errors), or infinity (from overflow),
+you need to change the policy.
+
+[import ../../example/error_handling_example.cpp]
+
+[error_handling_example]
+
+[caution If throwing of exceptions is enabled (the default) but
+you do *not* have try & catch block,
+then the program will terminate with an uncaught exception and probably abort.
+
+Therefore to get the benefit of helpful error messages, enabling *all exceptions
+and using try & catch* is recommended for most applications.
+
+However, for simplicity, the is not done for most examples.]
+
+[endsect] [/section:error_eg Error Handling Example]
+[/
+ Copyright 2007 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distributions/exponential.qbk b/doc/distributions/exponential.qbk
new file mode 100644
index 0000000..af83a29
--- /dev/null
+++ b/doc/distributions/exponential.qbk
@@ -0,0 +1,108 @@
+[section:exp_dist Exponential Distribution]
+
+``#include <boost/math/distributions/exponential.hpp>``
+
+ template <class RealType = double,
+ class ``__Policy`` = ``__policy_class`` >
+ class exponential_distribution;
+
+ typedef exponential_distribution<> exponential;
+
+ template <class RealType, class ``__Policy``>
+ class exponential_distribution
+ {
+ public:
+ typedef RealType value_type;
+ typedef Policy policy_type;
+
+ exponential_distribution(RealType lambda = 1);
+
+ RealType lambda()const;
+ };
+
+
+The [@http://en.wikipedia.org/wiki/Exponential_distribution exponential distribution]
+is a [@http://en.wikipedia.org/wiki/Probability_distribution continuous probability distribution]
+with PDF:
+
+[equation exponential_dist_ref1]
+
+It is often used to model the time between independent
+events that happen at a constant average rate.
+
+The following graph shows how the distribution changes for different
+values of the rate parameter lambda:
+
+[graph exponential_pdf]
+
+[h4 Member Functions]
+
+ exponential_distribution(RealType lambda = 1);
+
+Constructs an
+[@http://en.wikipedia.org/wiki/Exponential_distribution Exponential distribution]
+with parameter /lambda/.
+Lambda is defined as the reciprocal of the scale parameter.
+
+Requires lambda > 0, otherwise calls __domain_error.
+
+ RealType lambda()const;
+
+Accessor function returns the lambda parameter of the distribution.
+
+[h4 Non-member Accessors]
+
+All the [link math_toolkit.dist_ref.nmp usual non-member accessor functions]
+that are generic to all distributions are supported: __usual_accessors.
+
+The domain of the random variable is \[0, +[infin]\].
+
+[h4 Accuracy]
+
+The exponential distribution is implemented in terms of the
+standard library functions `exp`, `log`, `log1p` and `expm1`
+and as such should have very low error rates.
+
+[h4 Implementation]
+
+In the following table [lambda] is the parameter lambda of the distribution,
+/x/ is the random variate, /p/ is the probability and /q = 1-p/.
+
+[table
+[[Function][Implementation Notes]]
+[[pdf][Using the relation: pdf = [lambda] * exp(-[lambda] * x) ]]
+[[cdf][Using the relation: p = 1 - exp(-x * [lambda]) = -expm1(-x * [lambda]) ]]
+[[cdf complement][Using the relation: q = exp(-x * [lambda]) ]]
+[[quantile][Using the relation: x = -log(1-p) / [lambda] = -log1p(-p) / [lambda]]]
+[[quantile from the complement][Using the relation: x = -log(q) / [lambda]]]
+[[mean][1/[lambda]]]
+[[standard deviation][1/[lambda]]]
+[[mode][0]]
+[[skewness][2]]
+[[kurtosis][9]]
+[[kurtosis excess][6]]
+]
+
+[h4 references]
+
+* [@http://mathworld.wolfram.com/ExponentialDistribution.html Weisstein, Eric W. "Exponential Distribution." From MathWorld--A Wolfram Web Resource]
+* [@http://documents.wolfram.com/calccenter/Functions/ListsMatrices/Statistics/ExponentialDistribution.html Wolfram Mathematica calculator]
+* [@http://www.itl.nist.gov/div898/handbook/eda/section3/eda3667.htm NIST Exploratory Data Analysis]
+* [@http://en.wikipedia.org/wiki/Exponential_distribution Wikipedia Exponential distribution]
+
+(See also the reference documentation for the related __extreme_distrib.)
+
+*
+[@http://www.worldscibooks.com/mathematics/p191.html Extreme Value Distributions, Theory and Applications
+Samuel Kotz & Saralees Nadarajah]
+discuss the relationship of the types of extreme value distributions.
+
+[endsect][/section:exp_dist Exponential]
+
+[/ exponential.qbk
+ Copyright 2006 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distributions/extreme_value.qbk b/doc/distributions/extreme_value.qbk
new file mode 100644
index 0000000..affc509
--- /dev/null
+++ b/doc/distributions/extreme_value.qbk
@@ -0,0 +1,119 @@
+[section:extreme_dist Extreme Value Distribution]
+
+``#include <boost/math/distributions/extreme.hpp>``
+
+ template <class RealType = double,
+ class ``__Policy`` = ``__policy_class`` >
+ class extreme_value_distribution;
+
+ typedef extreme_value_distribution<> extreme_value;
+
+ template <class RealType, class ``__Policy``>
+ class extreme_value_distribution
+ {
+ public:
+ typedef RealType value_type;
+
+ extreme_value_distribution(RealType location = 0, RealType scale = 1);
+
+ RealType scale()const;
+ RealType location()const;
+ };
+
+There are various
+[@http://mathworld.wolfram.com/ExtremeValueDistribution.html extreme value distributions]
+: this implementation represents the maximum case,
+and is variously known as a Fisher-Tippett distribution,
+a log-Weibull distribution or a Gumbel distribution.
+
+Extreme value theory is important for assessing risk for highly unusual events,
+such as 100-year floods.
+
+More information can be found on the
+[@http://www.itl.nist.gov/div898/handbook/eda/section3/eda366g.htm NIST],
+[@http://en.wikipedia.org/wiki/Extreme_value_distribution Wikipedia],
+[@http://mathworld.wolfram.com/ExtremeValueDistribution.html Mathworld],
+and [@http://en.wikipedia.org/wiki/Extreme_value_theory Extreme value theory]
+websites.
+
+The relationship of the types of extreme value distributions, of which this is but one, is
+discussed by
+[@http://www.worldscibooks.com/mathematics/p191.html Extreme Value Distributions, Theory and Applications
+Samuel Kotz & Saralees Nadarajah].
+
+The distribution has a PDF given by:
+
+f(x) = (1/scale) e[super -(x-location)/scale] e[super -e[super -(x-location)/scale]]
+
+Which in the standard case (scale = 1, location = 0) reduces to:
+
+f(x) = e[super -x]e[super -e[super -x]]
+
+The following graph illustrates how the PDF varies with the location parameter:
+
+[graph extreme_value_pdf1]
+
+And this graph illustrates how the PDF varies with the shape parameter:
+
+[graph extreme_value_pdf2]
+
+[h4 Member Functions]
+
+ extreme_value_distribution(RealType location = 0, RealType scale = 1);
+
+Constructs an Extreme Value distribution with the specified location and scale
+parameters.
+
+Requires `scale > 0`, otherwise calls __domain_error.
+
+ RealType location()const;
+
+Returns the location parameter of the distribution.
+
+ RealType scale()const;
+
+Returns the scale parameter of the distribution.
+
+[h4 Non-member Accessors]
+
+All the [link math_toolkit.dist_ref.nmp usual non-member accessor functions]
+that are generic to all distributions are supported: __usual_accessors.
+
+The domain of the random parameter is \[-[infin], +[infin]\].
+
+[h4 Accuracy]
+
+The extreme value distribution is implemented in terms of the
+standard library `exp` and `log` functions and as such should have very low
+error rates.
+
+[h4 Implementation]
+
+In the following table:
+/a/ is the location parameter, /b/ is the scale parameter,
+/x/ is the random variate, /p/ is the probability and /q = 1-p/.
+
+[table
+[[Function][Implementation Notes]]
+[[pdf][Using the relation: pdf = exp((a-x)/b) * exp(-exp((a-x)/b)) / b ]]
+[[cdf][Using the relation: p = exp(-exp((a-x)/b)) ]]
+[[cdf complement][Using the relation: q = -expm1(-exp((a-x)/b)) ]]
+[[quantile][Using the relation: a - log(-log(p)) * b]]
+[[quantile from the complement][Using the relation: a - log(-log1p(-q)) * b]]
+[[mean][a + [@http://en.wikipedia.org/wiki/Euler-Mascheroni_constant Euler-Mascheroni-constant] * b]]
+[[standard deviation][pi * b / sqrt(6)]]
+[[mode][The same as the location parameter /a/.]]
+[[skewness][12 * sqrt(6) * zeta(3) / pi[super 3] ]]
+[[kurtosis][27 / 5]]
+[[kurtosis excess][kurtosis - 3 or 12 / 5]]
+]
+
+[endsect][/section:extreme_dist Extreme Value]
+
+[/ extreme_value.qbk
+ Copyright 2006 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distributions/f_dist_example.qbk b/doc/distributions/f_dist_example.qbk
new file mode 100644
index 0000000..1601199
--- /dev/null
+++ b/doc/distributions/f_dist_example.qbk
@@ -0,0 +1,220 @@
+[section:f_eg F Distribution Examples]
+
+Imagine that you want to compare the standard deviations of two
+sample to determine if they differ in any significant way, in this
+situation you use the F distribution and perform an F-test. This
+situation commonly occurs when conducting a process change comparison:
+"is a new process more consistent that the old one?".
+
+In this example we'll be using the data for ceramic strength from
+[@http://www.itl.nist.gov/div898/handbook/eda/section4/eda42a1.htm
+http://www.itl.nist.gov/div898/handbook/eda/section4/eda42a1.htm].
+The data for this case study were collected by Said Jahanmir of the
+NIST Ceramics Division in 1996 in connection with a NIST/industry
+ceramics consortium for strength optimization of ceramic strength.
+
+The example program is [@../../example/f_test.cpp f_test.cpp],
+program output has been deliberately made as similar as possible
+to the DATAPLOT output in the corresponding
+[@http://www.itl.nist.gov/div898/handbook/eda/section3/eda359.htm
+NIST EngineeringStatistics Handbook example].
+
+We'll begin by defining the procedure to conduct the test:
+
+ void f_test(
+ double sd1, // Sample 1 std deviation
+ double sd2, // Sample 2 std deviation
+ double N1, // Sample 1 size
+ double N2, // Sample 2 size
+ double alpha) // Significance level
+ {
+
+The procedure begins by printing out a summary of our input data:
+
+ using namespace std;
+ using namespace boost::math;
+
+ // Print header:
+ cout <<
+ "____________________________________\n"
+ "F test for equal standard deviations\n"
+ "____________________________________\n\n";
+ cout << setprecision(5);
+ cout << "Sample 1:\n";
+ cout << setw(55) << left << "Number of Observations" << "= " << N1 << "\n";
+ cout << setw(55) << left << "Sample Standard Deviation" << "= " << sd1 << "\n\n";
+ cout << "Sample 2:\n";
+ cout << setw(55) << left << "Number of Observations" << "= " << N2 << "\n";
+ cout << setw(55) << left << "Sample Standard Deviation" << "= " << sd2 << "\n\n";
+
+The test statistic for an F-test is simply the ratio of the square of
+the two standard deviations:
+
+F = s[sub 1][super 2] / s[sub 2][super 2]
+
+where s[sub 1] is the standard deviation of the first sample and s[sub 2]
+is the standard deviation of the second sample. Or in code:
+
+ double F = (sd1 / sd2);
+ F *= F;
+ cout << setw(55) << left << "Test Statistic" << "= " << F << "\n\n";
+
+At this point a word of caution: the F distribution is asymmetric,
+so we have to be careful how we compute the tests, the following table
+summarises the options available:
+
+[table
+[[Hypothesis][Test]]
+[[The null-hypothesis: there is no difference in standard deviations (two sided test)]
+ [Reject if F <= F[sub (1-alpha/2; N1-1, N2-1)] or F >= F[sub (alpha/2; N1-1, N2-1)] ]]
+[[The alternative hypothesis: there is a difference in means (two sided test)]
+ [Reject if F[sub (1-alpha/2; N1-1, N2-1)] <= F <= F[sub (alpha/2; N1-1, N2-1)] ]]
+[[The alternative hypothesis: Standard deviation of sample 1 is greater
+than that of sample 2]
+ [Reject if F < F[sub (alpha; N1-1, N2-1)] ]]
+[[The alternative hypothesis: Standard deviation of sample 1 is less
+than that of sample 2]
+ [Reject if F > F[sub (1-alpha; N1-1, N2-1)] ]]
+]
+
+Where F[sub (1-alpha; N1-1, N2-1)] is the lower critical value of the F distribution
+with degrees of freedom N1-1 and N2-1, and F[sub (alpha; N1-1, N2-1)] is the upper
+critical value of the F distribution with degrees of freedom N1-1 and N2-1.
+
+The upper and lower critical values can be computed using the quantile function:
+
+F[sub (1-alpha; N1-1, N2-1)] = `quantile(fisher_f(N1-1, N2-1), alpha)`
+
+F[sub (alpha; N1-1, N2-1)] = `quantile(complement(fisher_f(N1-1, N2-1), alpha))`
+
+In our example program we need both upper and lower critical values for alpha
+and for alpha/2:
+
+ double ucv = quantile(complement(dist, alpha));
+ double ucv2 = quantile(complement(dist, alpha / 2));
+ double lcv = quantile(dist, alpha);
+ double lcv2 = quantile(dist, alpha / 2);
+ cout << setw(55) << left << "Upper Critical Value at alpha: " << "= "
+ << setprecision(3) << scientific << ucv << "\n";
+ cout << setw(55) << left << "Upper Critical Value at alpha/2: " << "= "
+ << setprecision(3) << scientific << ucv2 << "\n";
+ cout << setw(55) << left << "Lower Critical Value at alpha: " << "= "
+ << setprecision(3) << scientific << lcv << "\n";
+ cout << setw(55) << left << "Lower Critical Value at alpha/2: " << "= "
+ << setprecision(3) << scientific << lcv2 << "\n\n";
+
+The final step is to perform the comparisons given above, and print
+out whether the hypothesis is rejected or not:
+
+ cout << setw(55) << left <<
+ "Results for Alternative Hypothesis and alpha" << "= "
+ << setprecision(4) << fixed << alpha << "\n\n";
+ cout << "Alternative Hypothesis Conclusion\n";
+
+ cout << "Standard deviations are unequal (two sided test) ";
+ if((ucv2 < F) || (lcv2 > F))
+ cout << "ACCEPTED\n";
+ else
+ cout << "REJECTED\n";
+
+ cout << "Standard deviation 1 is less than standard deviation 2 ";
+ if(lcv > F)
+ cout << "ACCEPTED\n";
+ else
+ cout << "REJECTED\n";
+
+ cout << "Standard deviation 1 is greater than standard deviation 2 ";
+ if(ucv < F)
+ cout << "ACCEPTED\n";
+ else
+ cout << "REJECTED\n";
+ cout << endl << endl;
+
+Using the ceramic strength data as an example we get the following
+output:
+
+[pre
+'''F test for equal standard deviations
+____________________________________
+
+Sample 1:
+Number of Observations = 240
+Sample Standard Deviation = 65.549
+
+Sample 2:
+Number of Observations = 240
+Sample Standard Deviation = 61.854
+
+Test Statistic = 1.123
+
+CDF of test statistic: = 8.148e-001
+Upper Critical Value at alpha: = 1.238e+000
+Upper Critical Value at alpha/2: = 1.289e+000
+Lower Critical Value at alpha: = 8.080e-001
+Lower Critical Value at alpha/2: = 7.756e-001
+
+Results for Alternative Hypothesis and alpha = 0.0500
+
+Alternative Hypothesis Conclusion
+Standard deviations are unequal (two sided test) REJECTED
+Standard deviation 1 is less than standard deviation 2 REJECTED
+Standard deviation 1 is greater than standard deviation 2 REJECTED'''
+]
+
+In this case we are unable to reject the null-hypothesis, and must instead
+reject the alternative hypothesis.
+
+By contrast let's see what happens when we use some different
+[@http://www.itl.nist.gov/div898/handbook/prc/section3/prc32.htm
+sample data]:, once again from the NIST Engineering Statistics Handbook:
+A new procedure to assemble a device is introduced and tested for
+possible improvement in time of assembly. The question being addressed
+is whether the standard deviation of the new assembly process (sample 2) is
+better (i.e., smaller) than the standard deviation for the old assembly
+process (sample 1).
+
+[pre
+'''____________________________________
+F test for equal standard deviations
+____________________________________
+
+Sample 1:
+Number of Observations = 11.00000
+Sample Standard Deviation = 4.90820
+
+Sample 2:
+Number of Observations = 9.00000
+Sample Standard Deviation = 2.58740
+
+Test Statistic = 3.59847
+
+CDF of test statistic: = 9.589e-001
+Upper Critical Value at alpha: = 3.347e+000
+Upper Critical Value at alpha/2: = 4.295e+000
+Lower Critical Value at alpha: = 3.256e-001
+Lower Critical Value at alpha/2: = 2.594e-001
+
+Results for Alternative Hypothesis and alpha = 0.0500
+
+Alternative Hypothesis Conclusion
+Standard deviations are unequal (two sided test) REJECTED
+Standard deviation 1 is less than standard deviation 2 REJECTED
+Standard deviation 1 is greater than standard deviation 2 ACCEPTED'''
+]
+
+In this case we take our null hypothesis as "standard deviation 1 is
+less than or equal to standard deviation 2", since this represents the "no change"
+situation. So we want to compare the upper critical value at /alpha/
+(a one sided test) with the test statistic, and since 3.35 < 3.6 this
+hypothesis must be rejected. We therefore conclude that there is a change
+for the better in our standard deviation.
+
+[endsect][/section:f_eg F Distribution]
+
+[/
+ Copyright 2006 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distributions/find_location_and_scale.qbk b/doc/distributions/find_location_and_scale.qbk
new file mode 100644
index 0000000..b722aec
--- /dev/null
+++ b/doc/distributions/find_location_and_scale.qbk
@@ -0,0 +1,39 @@
+[section:find_eg Find Location and Scale Examples]
+
+[section:find_location_eg Find Location (Mean) Example]
+[import ../../example/find_location_example.cpp]
+[find_location1]
+[find_location2]
+See [@../../example/find_location_example.cpp find_location_example.cpp]
+for full source code: the program output looks like this:
+[find_location_example_output]
+[endsect] [/section:find_location_eg Find Location (Mean) Example]
+
+[section:find_scale_eg Find Scale (Standard Deviation) Example]
+[import ../../example/find_scale_example.cpp]
+[find_scale1]
+[find_scale2]
+See [@../../example/find_scale_example.cpp find_scale_example.cpp]
+for full source code: the program output looks like this:
+[find_scale_example_output]
+[endsect] [/section:find_scale_eg Scale (Standard Deviation) Example]
+[section:find_mean_and_sd_eg Find mean and standard deviation example]
+
+[import ../../example/find_mean_and_sd_normal.cpp]
+[normal_std]
+[normal_find_location_and_scale_eg]
+See [@../../example/find_mean_and_sd_normal.cpp find_mean_and_sd_normal.cpp]
+for full source code & appended program output.
+[endsect] [/find_mean_and_sd_eg Find mean and standard deviation example]
+
+[endsect] [/section:find_eg Find Location and Scale Examples]
+
+
+
+[/
+ Copyright 2006 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distributions/fisher.qbk b/doc/distributions/fisher.qbk
new file mode 100644
index 0000000..f09388d
--- /dev/null
+++ b/doc/distributions/fisher.qbk
@@ -0,0 +1,190 @@
+[section:f_dist F Distribution]
+
+``#include <boost/math/distributions/fisher_f.hpp>``
+
+ namespace boost{ namespace math{
+
+ template <class RealType = double,
+ class ``__Policy`` = ``__policy_class`` >
+ class fisher_f_distribution;
+
+ typedef fisher_f_distribution<> fisher_f;
+
+ template <class RealType, class ``__Policy``>
+ class fisher_f_distribution
+ {
+ public:
+ typedef RealType value_type;
+
+ // Construct:
+ fisher_f_distribution(const RealType& i, const RealType& j);
+
+ // Accessors:
+ RealType degrees_of_freedom1()const;
+ RealType degrees_of_freedom2()const;
+ };
+
+ }} //namespaces
+
+The F distribution is a continuous distribution that arises when testing
+whether two samples have the same variance. If [chi][super 2][sub m][space] and
+[chi][super 2][sub n][space] are independent variates each distributed as
+Chi-Squared with /m/ and /n/ degrees of freedom, then the test statistic:
+
+F[sub n,m][space] = ([chi][super 2][sub n][space] / n) / ([chi][super 2][sub m][space] / m)
+
+Is distributed over the range \[0, [infin]\] with an F distribution, and
+has the PDF:
+
+[equation fisher_pdf]
+
+The following graph illustrates how the PDF varies depending on the
+two degrees of freedom parameters.
+
+[graph fisher_f_pdf]
+
+
+[h4 Member Functions]
+
+ fisher_f_distribution(const RealType& df1, const RealType& df2);
+
+Constructs an F-distribution with numerator degrees of freedom /df1/
+and denominator degrees of freedom /df2/.
+
+Requires that /df1/ and /df2/ are both greater than zero, otherwise __domain_error
+is called.
+
+ RealType degrees_of_freedom1()const;
+
+Returns the numerator degrees of freedom parameter of the distribution.
+
+ RealType degrees_of_freedom2()const;
+
+Returns the denominator degrees of freedom parameter of the distribution.
+
+[h4 Non-member Accessors]
+
+All the [link math_toolkit.dist_ref.nmp usual non-member accessor functions]
+that are generic to all distributions are supported: __usual_accessors.
+
+The domain of the random variable is \[0, +[infin]\].
+
+[h4 Examples]
+
+Various [link math_toolkit.stat_tut.weg.f_eg worked examples] are
+available illustrating the use of the F Distribution.
+
+[h4 Accuracy]
+
+The normal distribution is implemented in terms of the
+[link math_toolkit.sf_beta.ibeta_function incomplete beta function]
+and its [link math_toolkit.sf_beta.ibeta_inv_function inverses],
+refer to those functions for accuracy data.
+
+[h4 Implementation]
+
+In the following table /v1/ and /v2/ are the first and second
+degrees of freedom parameters of the distribution,
+/x/ is the random variate, /p/ is the probability, and /q = 1-p/.
+
+[table
+[[Function][Implementation Notes]]
+[[pdf][The usual form of the PDF is given by:
+
+[equation fisher_pdf]
+
+However, that form is hard to evaluate directly without incurring problems with
+either accuracy or numeric overflow.
+
+Direct differentiation of the CDF expressed in terms of the incomplete beta function
+
+led to the following two formulas:
+
+f[sub v1,v2](x) = y * __ibeta_derivative(v2 \/ 2, v1 \/ 2, v2 \/ (v2 + v1 * x))
+
+with y = (v2 * v1) \/ ((v2 + v1 * x) * (v2 + v1 * x))
+
+and
+
+f[sub v1,v2](x) = y * __ibeta_derivative(v1 \/ 2, v2 \/ 2, v1 * x \/ (v2 + v1 * x))
+
+with y = (z * v1 - x * v1 * v1) \/ z[super 2]
+
+and z = v2 + v1 * x
+
+The first of these is used for v1 * x > v2, otherwise the second is used.
+
+The aim is to keep the /x/ argument to __ibeta_derivative away from 1 to avoid
+rounding error. ]]
+[[cdf][Using the relations:
+
+p = __ibeta(v1 \/ 2, v2 \/ 2, v1 * x \/ (v2 + v1 * x))
+
+and
+
+p = __ibetac(v2 \/ 2, v1 \/ 2, v2 \/ (v2 + v1 * x))
+
+The first is used for v1 * x > v2, otherwise the second is used.
+
+The aim is to keep the /x/ argument to __ibeta well away from 1 to
+avoid rounding error. ]]
+
+[[cdf complement][Using the relations:
+
+p = __ibetac(v1 \/ 2, v2 \/ 2, v1 * x \/ (v2 + v1 * x))
+
+and
+
+p = __ibeta(v2 \/ 2, v1 \/ 2, v2 \/ (v2 + v1 * x))
+
+The first is used for v1 * x < v2, otherwise the second is used.
+
+The aim is to keep the /x/ argument to __ibeta well away from 1 to
+avoid rounding error. ]]
+[[quantile][Using the relation:
+
+x = v2 * a \/ (v1 * b)
+
+where:
+
+a = __ibeta_inv(v1 \/ 2, v2 \/ 2, p)
+
+and
+
+b = 1 - a
+
+Quantities /a/ and /b/ are both computed by __ibeta_inv without the
+subtraction implied above.]]
+[[quantile
+
+from the complement][Using the relation:
+
+x = v2 * a \/ (v1 * b)
+
+where
+
+a = __ibetac_inv(v1 \/ 2, v2 \/ 2, p)
+
+and
+
+b = 1 - a
+
+Quantities /a/ and /b/ are both computed by __ibetac_inv without the
+subtraction implied above.]]
+[[mean][v2 \/ (v2 - 2)]]
+[[variance][2 * v2[super 2 ] * (v1 + v2 - 2) \/ (v1 * (v2 - 2) * (v2 - 2) * (v2 - 4))]]
+[[mode][v2 * (v1 - 2) \/ (v1 * (v2 + 2))]]
+[[skewness][2 * (v2 + 2 * v1 - 2) * sqrt((2 * v2 - 8) \/ (v1 * (v2 + v1 - 2))) \/ (v2 - 6)]]
+[[kurtosis and kurtosis excess]
+ [Refer to, [@http://mathworld.wolfram.com/F-Distribution.html
+ Weisstein, Eric W. "F-Distribution." From MathWorld--A Wolfram Web Resource.] ]]
+]
+
+[endsect][/section:f_dist F distribution]
+
+[/ fisher.qbk
+ Copyright 2006 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
diff --git a/doc/distributions/gamma.qbk b/doc/distributions/gamma.qbk
new file mode 100644
index 0000000..9d5faab
--- /dev/null
+++ b/doc/distributions/gamma.qbk
@@ -0,0 +1,139 @@
+[section:gamma_dist Gamma (and Erlang) Distribution]
+
+``#include <boost/math/distributions/gamma.hpp>``
+
+ namespace boost{ namespace math{
+
+ template <class RealType = double,
+ class ``__Policy`` = ``__policy_class`` >
+ class gamma_distribution
+ {
+ public:
+ typedef RealType value_type;
+ typedef Policy policy_type;
+
+ gamma_distribution(RealType shape, RealType scale = 1)
+
+ RealType shape()const;
+ RealType scale()const;
+ };
+
+ }} // namespaces
+
+The gamma distribution is a continuous probability distribution.
+When the shape parameter is an integer then it is known as the
+Erlang Distribution. It is also closely related to the Poisson
+and Chi Squared Distributions.
+
+When the shape parameter has an integer value, the distribution is the
+[@http://en.wikipedia.org/wiki/Erlang_distribution Erlang distribution].
+Since this can be produced by ensuring that the shape parameter has an
+integer value > 0, the Erlang distribution is not separately implemented.
+
+[note
+To avoid potential confusion with the gamma functions, this
+distribution does not provide the typedef:
+
+``typedef gamma_distribution<double> gamma;``
+
+Instead if you want a double precision gamma distribution you can write
+
+``boost::math::gamma_distribution<> my_gamma(1, 1);``
+]
+
+For shape parameter /k/ and scale parameter [theta][space] it is defined by the
+probability density function:
+
+[equation gamma_dist_ref1]
+
+Sometimes an alternative formulation is used: given parameters
+[alpha][space]= k and [beta][space]= 1 / [theta], then the
+distribution can be defined by the PDF:
+
+[equation gamma_dist_ref2]
+
+In this form the inverse scale parameter is called a /rate parameter/.
+
+Both forms are in common usage: this library uses the first definition
+throughout. Therefore to construct a Gamma Distribution from a ['rate
+parameter], you should pass the reciprocal of the rate as the scale parameter.
+
+The following two graphs illustrate how the PDF of the gamma distribution
+varies as the parameters vary:
+
+[graph gamma1_pdf]
+
+[graph gamma2_pdf]
+
+The [*Erlang Distribution] is the same as the Gamma, but with the shape parameter
+an integer. It is often expressed using a /rate/ rather than a /scale/ as the
+second parameter (remember that the rate is the reciprocal of the scale).
+
+Internally the functions used to implement the Gamma Distribution are
+already optimised for small-integer arguments, so in general there should
+be no great loss of performance from using a Gamma Distribution rather than
+a dedicated Erlang Distribution.
+
+[h4 Member Functions]
+
+ gamma_distribution(RealType shape, RealType scale = 1);
+
+Constructs a gamma distribution with shape /shape/ and
+scale /scale/.
+
+Requires that the shape and scale parameters are greater than zero, otherwise calls
+__domain_error.
+
+ RealType shape()const;
+
+Returns the /shape/ parameter of this distribution.
+
+ RealType scale()const;
+
+Returns the /scale/ parameter of this distribution.
+
+[h4 Non-member Accessors]
+
+All the [link math_toolkit.dist_ref.nmp usual non-member accessor functions] that are generic to all
+distributions are supported: __usual_accessors.
+
+The domain of the random variable is \[0,+[infin]\].
+
+[h4 Accuracy]
+
+The gamma distribution is implemented in terms of the
+incomplete gamma functions __gamma_p and __gamma_q and their
+inverses __gamma_p_inv and __gamma_q_inv: refer to the accuracy
+data for those functions for more information.
+
+[h4 Implementation]
+
+In the following table /k/ is the shape parameter of the distribution,
+[theta][space] is its scale parameter, /x/ is the random variate, /p/ is the probability
+and /q = 1-p/.
+
+[table
+[[Function][Implementation Notes]]
+[[pdf][Using the relation: pdf = __gamma_p_derivative(k, x / [theta]) / [theta] ]]
+[[cdf][Using the relation: p = __gamma_p(k, x / [theta]) ]]
+[[cdf complement][Using the relation: q = __gamma_q(k, x / [theta]) ]]
+[[quantile][Using the relation: x = [theta][space]* __gamma_p_inv(k, p) ]]
+[[quantile from the complement][Using the relation: x = [theta][space]* __gamma_q_inv(k, p) ]]
+[[mean][k[theta] ]]
+[[variance][k[theta][super 2] ]]
+[[mode][(k-1)[theta][space] for ['k>1] otherwise a __domain_error ]]
+[[skewness][2 / sqrt(k) ]]
+[[kurtosis][3 + 6 / k]]
+[[kurtosis excess][6 / k ]]
+]
+
+[endsect][/section:gamma_dist Gamma (and Erlang) Distribution]
+
+
+[/
+ Copyright 2006, 2010 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distributions/geometric.qbk b/doc/distributions/geometric.qbk
new file mode 100644
index 0000000..5129b45
--- /dev/null
+++ b/doc/distributions/geometric.qbk
@@ -0,0 +1,350 @@
+[section:geometric_dist Geometric Distribution]
+
+``#include <boost/math/distributions/geometric.hpp>``
+
+ namespace boost{ namespace math{
+
+ template <class RealType = double,
+ class ``__Policy`` = ``__policy_class`` >
+ class geometric_distribution;
+
+ typedef geometric_distribution<> geometric;
+
+ template <class RealType, class ``__Policy``>
+ class geometric_distribution
+ {
+ public:
+ typedef RealType value_type;
+ typedef Policy policy_type;
+ // Constructor from success_fraction:
+ geometric_distribution(RealType p);
+
+ // Parameter accessors:
+ RealType success_fraction() const;
+ RealType successes() const;
+
+ // Bounds on success fraction:
+ static RealType find_lower_bound_on_p(
+ RealType trials,
+ RealType successes,
+ RealType probability); // alpha
+ static RealType find_upper_bound_on_p(
+ RealType trials,
+ RealType successes,
+ RealType probability); // alpha
+
+ // Estimate min/max number of trials:
+ static RealType find_minimum_number_of_trials(
+ RealType k, // Number of failures.
+ RealType p, // Success fraction.
+ RealType probability); // Probability threshold alpha.
+ static RealType find_maximum_number_of_trials(
+ RealType k, // Number of failures.
+ RealType p, // Success fraction.
+ RealType probability); // Probability threshold alpha.
+ };
+
+ }} // namespaces
+
+The class type `geometric_distribution` represents a
+[@http://en.wikipedia.org/wiki/geometric_distribution geometric distribution]:
+it is used when there are exactly two mutually exclusive outcomes of a
+[@http://en.wikipedia.org/wiki/Bernoulli_trial Bernoulli trial]:
+these outcomes are labelled "success" and "failure".
+
+For [@http://en.wikipedia.org/wiki/Bernoulli_trial Bernoulli trials]
+each with success fraction /p/, the geometric distribution gives
+the probability of observing /k/ trials (failures, events, occurrences, or arrivals)
+before the first success.
+
+[note For this implementation, the set of trials *includes zero*
+(unlike another definition where the set of trials starts at one, sometimes named /shifted/).]
+The geometric distribution assumes that success_fraction /p/ is fixed for all /k/ trials.
+
+The probability that there are /k/ failures before the first success is
+
+__spaces Pr(Y=/k/) = (1-/p/)[super /k/]/p/
+
+For example, when throwing a 6-face dice the success probability /p/ = 1/6 = 0.1666[recur][space].
+Throwing repeatedly until a /three/ appears,
+the probability distribution of the number of times /not-a-three/ is thrown
+is geometric.
+
+Geometric distribution has the Probability Density Function PDF:
+
+__spaces (1-/p/)[super /k/]/p/
+
+The following graph illustrates how the PDF and CDF vary for three examples
+of the success fraction /p/,
+(when considering the geometric distribution as a continuous function),
+
+[graph geometric_pdf_2]
+
+[graph geometric_cdf_2]
+
+and as discrete.
+
+[graph geometric_pdf_discrete]
+
+[graph geometric_cdf_discrete]
+
+
+[h4 Related Distributions]
+
+The geometric distribution is a special case of
+the __negative_binomial_distrib with successes parameter /r/ = 1,
+so only one first and only success is required : thus by definition
+__spaces `geometric(p) == negative_binomial(1, p)`
+
+ negative_binomial_distribution(RealType r, RealType success_fraction);
+ negative_binomial nb(1, success_fraction);
+ geometric g(success_fraction);
+ ASSERT(pdf(nb, 1) == pdf(g, 1));
+
+This implementation uses real numbers for the computation throughout
+(because it uses the *real-valued* power and exponential functions).
+So to obtain a conventional strictly-discrete geometric distribution
+you must ensure that an integer value is provided for the number of trials
+(random variable) /k/,
+and take integer values (floor or ceil functions) from functions that return
+a number of successes.
+
+[discrete_quantile_warning geometric]
+
+[h4 Member Functions]
+
+[h5 Constructor]
+
+ geometric_distribution(RealType p);
+
+Constructor: /p/ or success_fraction is the probability of success of a single trial.
+
+Requires: `0 <= p <= 1`.
+
+[h5 Accessors]
+
+ RealType success_fraction() const; // successes / trials (0 <= p <= 1)
+
+Returns the success_fraction parameter /p/ from which this distribution was constructed.
+
+ RealType successes() const; // required successes always one,
+ // included for compatibility with negative binomial distribution
+ // with successes r == 1.
+
+Returns unity.
+
+The following functions are equivalent to those provided for the negative binomial,
+with successes = 1, but are provided here for completeness.
+
+The best method of calculation for the following functions is disputed:
+see __binomial_distrib and __negative_binomial_distrib for more discussion.
+
+[h5 Lower Bound on success_fraction Parameter ['p]]
+
+ static RealType find_lower_bound_on_p(
+ RealType failures,
+ RealType probability) // (0 <= alpha <= 1), 0.05 equivalent to 95% confidence.
+
+Returns a *lower bound* on the success fraction:
+
+[variablelist
+[[failures][The total number of failures before the 1st success.]]
+[[alpha][The largest acceptable probability that the true value of
+ the success fraction is [*less than] the value returned.]]
+]
+
+For example, if you observe /k/ failures from /n/ trials
+the best estimate for the success fraction is simply 1/['n], but if you
+want to be 95% sure that the true value is [*greater than] some value,
+['p[sub min]], then:
+
+ p``[sub min]`` = geometric_distribution<RealType>::
+ find_lower_bound_on_p(failures, 0.05);
+
+[link math_toolkit.stat_tut.weg.neg_binom_eg.neg_binom_conf See negative_binomial confidence interval example.]
+
+This function uses the Clopper-Pearson method of computing the lower bound on the
+success fraction, whilst many texts refer to this method as giving an "exact"
+result in practice it produces an interval that guarantees ['at least] the
+coverage required, and may produce pessimistic estimates for some combinations
+of /failures/ and /successes/. See:
+
+[@http://www.ucs.louisiana.edu/~kxk4695/Discrete_new.pdf
+Yong Cai and K. Krishnamoorthy, A Simple Improved Inferential Method for Some Discrete Distributions.
+Computational statistics and data analysis, 2005, vol. 48, no3, 605-621].
+
+[h5 Upper Bound on success_fraction Parameter p]
+
+ static RealType find_upper_bound_on_p(
+ RealType trials,
+ RealType alpha); // (0 <= alpha <= 1), 0.05 equivalent to 95% confidence.
+
+Returns an *upper bound* on the success fraction:
+
+[variablelist
+[[trials][The total number of trials conducted.]]
+[[alpha][The largest acceptable probability that the true value of
+ the success fraction is [*greater than] the value returned.]]
+]
+
+For example, if you observe /k/ successes from /n/ trials the
+best estimate for the success fraction is simply ['k/n], but if you
+want to be 95% sure that the true value is [*less than] some value,
+['p[sub max]], then:
+
+ p``[sub max]`` = geometric_distribution<RealType>::find_upper_bound_on_p(
+ k, 0.05);
+
+[link math_toolkit.stat_tut.weg.neg_binom_eg.neg_binom_conf See negative binomial confidence interval example.]
+
+This function uses the Clopper-Pearson method of computing the lower bound on the
+success fraction, whilst many texts refer to this method as giving an "exact"
+result in practice it produces an interval that guarantees ['at least] the
+coverage required, and may produce pessimistic estimates for some combinations
+of /failures/ and /successes/. See:
+
+[@http://www.ucs.louisiana.edu/~kxk4695/Discrete_new.pdf
+Yong Cai and K. Krishnamoorthy, A Simple Improved Inferential Method for Some Discrete Distributions.
+Computational statistics and data analysis, 2005, vol. 48, no3, 605-621].
+
+[h5 Estimating Number of Trials to Ensure at Least a Certain Number of Failures]
+
+ static RealType find_minimum_number_of_trials(
+ RealType k, // number of failures.
+ RealType p, // success fraction.
+ RealType alpha); // probability threshold (0.05 equivalent to 95%).
+
+This functions estimates the number of trials required to achieve a certain
+probability that [*more than ['k] failures will be observed].
+
+[variablelist
+[[k][The target number of failures to be observed.]]
+[[p][The probability of ['success] for each trial.]]
+[[alpha][The maximum acceptable ['risk] that only ['k] failures or fewer will be observed.]]
+]
+
+For example:
+
+ geometric_distribution<RealType>::find_minimum_number_of_trials(10, 0.5, 0.05);
+
+Returns the smallest number of trials we must conduct to be 95% (1-0.05) sure
+of seeing 10 failures that occur with frequency one half.
+
+[link math_toolkit.stat_tut.weg.neg_binom_eg.neg_binom_size_eg Worked Example.]
+
+This function uses numeric inversion of the geometric distribution
+to obtain the result: another interpretation of the result is that it finds
+the number of trials (failures) that will lead to an /alpha/ probability
+of observing /k/ failures or fewer.
+
+[h5 Estimating Number of Trials to Ensure a Maximum Number of Failures or Less]
+
+ static RealType find_maximum_number_of_trials(
+ RealType k, // number of failures.
+ RealType p, // success fraction.
+ RealType alpha); // probability threshold (0.05 equivalent to 95%).
+
+This functions estimates the maximum number of trials we can conduct and achieve
+a certain probability that [*k failures or fewer will be observed].
+
+[variablelist
+[[k][The maximum number of failures to be observed.]]
+[[p][The probability of ['success] for each trial.]]
+[[alpha][The maximum acceptable ['risk] that more than ['k] failures will be observed.]]
+]
+
+For example:
+
+ geometric_distribution<RealType>::find_maximum_number_of_trials(0, 1.0-1.0/1000000, 0.05);
+
+Returns the largest number of trials we can conduct and still be 95% sure
+of seeing no failures that occur with frequency one in one million.
+
+This function uses numeric inversion of the geometric distribution
+to obtain the result: another interpretation of the result, is that it finds
+the number of trials that will lead to an /alpha/ probability
+of observing more than k failures.
+
+[h4 Non-member Accessors]
+
+All the [link math_toolkit.dist_ref.nmp usual non-member accessor functions]
+that are generic to all distributions are supported: __usual_accessors.
+
+However it's worth taking a moment to define what these actually mean in
+the context of this distribution:
+
+[table Meaning of the non-member accessors.
+[[Function][Meaning]]
+[[__pdf]
+ [The probability of obtaining [*exactly k failures] from /k/ trials
+ with success fraction p. For example:
+
+``pdf(geometric(p), k)``]]
+[[__cdf]
+ [The probability of obtaining [*k failures or fewer] from /k/ trials
+ with success fraction p and success on the last trial. For example:
+
+``cdf(geometric(p), k)``]]
+[[__ccdf]
+ [The probability of obtaining [*more than k failures] from /k/ trials
+ with success fraction p and success on the last trial. For example:
+
+``cdf(complement(geometric(p), k))``]]
+[[__quantile]
+ [The [*greatest] number of failures /k/ expected to be observed from /k/ trials
+ with success fraction /p/, at probability /P/. Note that the value returned
+ is a real-number, and not an integer. Depending on the use case you may
+ want to take either the floor or ceiling of the real result. For example:
+``quantile(geometric(p), P)``]]
+[[__quantile_c]
+ [The [*smallest] number of failures /k/ expected to be observed from /k/ trials
+ with success fraction /p/, at probability /P/. Note that the value returned
+ is a real-number, and not an integer. Depending on the use case you may
+ want to take either the floor or ceiling of the real result. For example:
+ ``quantile(complement(geometric(p), P))``]]
+]
+
+[h4 Accuracy]
+
+This distribution is implemented using the pow and exp functions, so most results
+are accurate within a few epsilon for the RealType.
+For extreme values of `double` /p/, for example 0.9999999999,
+accuracy can fall significantly, for example to 10 decimal digits (from 16).
+
+[h4 Implementation]
+
+In the following table, /p/ is the probability that any one trial will
+be successful (the success fraction), /k/ is the number of failures,
+/p/ is the probability and /q = 1-p/,
+/x/ is the given probability to estimate
+the expected number of failures using the quantile.
+
+[table
+[[Function][Implementation Notes]]
+[[pdf][pdf = p * pow(q, k)]]
+[[cdf][cdf = 1 - q[super k=1]]]
+[[cdf complement][exp(log1p(-p) * (k+1))]]
+[[quantile][k = log1p(-x) / log1p(-p) -1]]
+[[quantile from the complement][k = log(x) / log1p(-p) -1]]
+[[mean][(1-p)/p]]
+[[variance][(1-p)/p[sup2]]]
+[[mode][0]]
+[[skewness][(2-p)/[sqrt]q]]
+[[kurtosis][9+p[sup2]/q]]
+[[kurtosis excess][6 +p[sup2]/q]]
+[[parameter estimation member functions][See __negative_binomial_distrib]]
+[[`find_lower_bound_on_p`][See __negative_binomial_distrib]]
+[[`find_upper_bound_on_p`][See __negative_binomial_distrib]]
+[[`find_minimum_number_of_trials`][See __negative_binomial_distrib]]
+[[`find_maximum_number_of_trials`][See __negative_binomial_distrib]]
+]
+
+[endsect][/section:geometric_dist geometric]
+
+[/ geometric.qbk
+ Copyright 2010 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distributions/geometric_example.qbk b/doc/distributions/geometric_example.qbk
new file mode 100644
index 0000000..9405648
--- /dev/null
+++ b/doc/distributions/geometric_example.qbk
@@ -0,0 +1,20 @@
+[section:geometric_eg Geometric Distribution Examples]
+
+[import ../../example/geometric_examples.cpp]
+[geometric_eg1_1]
+[geometric_eg1_2]
+
+See full source C++ of this example at
+[@../../example/geometric_examples.cpp geometric_examples.cpp]
+
+[link math_toolkit.stat_tut.weg.neg_binom_eg.neg_binom_conf See negative_binomial confidence interval example.]
+
+[endsect] [/section:geometric_eg Geometric Distribution Examples]
+
+[/ geometric.qbk
+ Copyright 2010 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distributions/hyperexponential.qbk b/doc/distributions/hyperexponential.qbk
new file mode 100644
index 0000000..d2b2a1d
--- /dev/null
+++ b/doc/distributions/hyperexponential.qbk
@@ -0,0 +1,506 @@
+[section:hyperexponential_dist Hyperexponential Distribution]
+
+[import ../../example/hyperexponential_snips.cpp]
+[import ../../example/hyperexponential_more_snips.cpp]
+
+``#include <boost/math/distributions/hyperexponential.hpp>``
+
+ namespace boost{ namespace math{
+
+ template <typename RealType = double,
+ typename ``__Policy`` = ``__policy_class`` >
+ class hyperexponential_distribution;
+
+ typedef hyperexponential_distribution<> hyperexponential;
+
+ template <typename RealType, typename ``__Policy``>
+ class hyperexponential_distribution
+ {
+ public:
+ typedef RealType value_type;
+ typedef Policy policy_type;
+
+ // Constructors:
+ hyperexponential_distribution(); // Default.
+
+ template <typename RateIterT, typename RateIterT2>
+ hyperexponential_distribution( // Default equal probabilities.
+ RateIterT const& rate_first,
+ RateIterT2 const& rate_last); // Rates using Iterators.
+
+ template <typename ProbIterT, typename RateIterT>
+ hyperexponential_distribution(ProbIterT prob_first, ProbIterT prob_last,
+ RateIterT rate_first, RateIterT rate_last); // Iterators.
+
+ template <typename ProbRangeT, typename RateRangeT>
+ hyperexponential_distribution(ProbRangeT const& prob_range,
+ RateRangeT const& rate_range); // Ranges.
+
+ template <typename RateRangeT>
+ hyperexponential_distribution(RateRangeT const& rate_range);
+
+ #if !defined(BOOST_NO_CXX11_HDR_INITIALIZER_LIST) // C++11 initializer lists supported.
+ hyperexponential_distribution(std::initializer_list<RealType> l1, std::initializer_list<RealType> l2);
+ hyperexponential_distribution(std::initializer_list<RealType> l1);
+ #endif
+
+ // Accessors:
+ std::size_t num_phases() const;
+ std::vector<RealType> probabilities() const;
+ std::vector<RealType> rates() const;
+ };
+
+ }} // namespaces
+
+[note An implementation-defined mechanism is provided to avoid
+ambiguity between constructors accepting ranges, iterators and constants as parameters.
+This should be transparent to the user.
+See below and the header file hyperexponential.hpp for details and explanatory comments.]
+
+The class type `hyperexponential_distribution` represents a [@http://en.wikipedia.org/wiki/Hyperexponential_distribution hyperexponential distribution].
+
+A /k/-phase hyperexponential distribution is a [@http://en.wikipedia.org/wiki/Continuous_probability_distribution continuous probability distribution] obtained as a mixture of /k/ [link math_toolkit.dist_ref.dists.exp_dist Exponential Distribution]s.
+It is also referred to as /mixed exponential distribution/ or parallel /k-phase exponential distribution/.
+
+A /k/-phase hyperexponential distribution is characterized by two parameters, namely a /phase probability vector/ ['[*[alpha]]=([alpha][sub 1],...,[alpha][sub k])] and a /rate vector/ ['[*[lambda]]=([lambda][sub 1],...,[lambda][sub k])].
+
+The [@http://en.wikipedia.org/wiki/Probability_density_function probability density function] for random variate /x/ in a hyperexponential distribution is given by:
+
+[equation hyperexponential_pdf]
+
+The following graph illustrates the PDF of the hyperexponential distribution with five different parameters, namely:
+
+# ['[*[alpha]]=(1.0)] and ['[*[lambda]]=(1.0)] (which degenerates to a simple exponential distribution),
+# ['[*[alpha]]=(0.1, 0.9)] and ['[*[lambda]]=(0.5, 1.5)],
+# ['[*[alpha]]=(0.9, 0.1)] and ['[*[lambda]]=(0.5, 1.5)],
+# ['[*[alpha]]=(0.2, 0.3, 0.5)] and ['[*[lambda]]=(0.5, 1.0, 1.5)],
+# ['[*[alpha]]=(0.5, 0.3, 0.2)] and ['[*[lambda]]=(0.5, 1.0, 1.5)].
+
+[graph hyperexponential_pdf]
+
+Also, the following graph illustrates the PDF of the hyperexponential distribution (solid lines) where only the /phase probability vector/ changes together with the PDF of the two limiting exponential distributions (dashed lines):
+
+# ['[*[alpha]]=(0.1, 0.9)] and ['[*[lambda]]=(0.5, 1.5)],
+# ['[*[alpha]]=(0.6, 0.4)] and ['[*[lambda]]=(0.5, 1.5)],
+# ['[*[alpha]]=(0.9, 0.1)] and ['[*[lambda]]=(0.5, 1.5)],
+# Exponential distribution with parameter ['[lambda]=0.5],
+# Exponential distribution with parameter ['[lambda]=1.5].
+
+As expected, as the first element ['[alpha][sub 1]] of the /phase probability vector/ approaches to /1/ (or, equivalently, ['[alpha][sub 2]] approaches to /0/), the resulting hyperexponential distribution nears the exponential distribution with parameter ['[lambda]=0.5].
+Conversely, as the first element ['[alpha][sub 2]] of the /phase probability vector/ approaches to /1/ (or, equivalently, ['[alpha][sub 1]] approaches to /0/), the resulting hyperexponential distribution nears the exponential distribution with parameter ['[lambda]=1.5].
+
+[graph hyperexponential_pdf_samerate]
+
+Finally, the following graph compares the PDF of the hyperexponential distribution with different number of phases but with the same mean value equal to /2/:
+
+# ['[*[alpha]]=(1.0)] and ['[*[lambda]]=(2.0)] (which degenerates to a simple exponential distribution),
+# ['[*[alpha]]=(0.5, 0.5)] and ['[*[lambda]]=(0.3, 1.5)],
+# ['[*[alpha]]=(1.0/3.0, 1.0/3.0, 1.0/3.0)] and ['[*[lambda]]=(0.2, 1.5, 3.0)],
+
+[graph hyperexponential_pdf_samemean]
+
+As can be noted, even if the three distributions have the same mean value, the two hyperexponential distributions have a /longer/ tail with respect to the one of the exponential distribution.
+Indeed, the hyperexponential distribution has a larger variability than the exponential distribution, thus resulting in a [@http://en.wikipedia.org/wiki/Coefficient_of_variation Coefficient of Variation] greater than /1/ (as opposed to the one of the exponential distribution which is exactly /1/).
+
+[h3 Applications]
+
+A /k/-phase hyperexponential distribution is frequently used in [@http://en.wikipedia.org/wiki/Queueing_theory queueing theory] to model the distribution of the superposition of /k/ independent events, like, for instance, the service time distribution of a queueing station with /k/ servers in parallel where the /i/-th server is chosen with probability ['[alpha][sub i]] and its service time distribution is an exponential distribution with rate ['[lambda][sub i]] (Allen,1990; Papadopolous et al.,1993; Trivedi,2002).
+
+For instance, CPUs service-time distribution in a computing system has often been observed to possess such a distribution (Rosin,1965).
+Also, the arrival of different types of customer to a single queueing station is often modeled as a hyperexponential distribution (Papadopolous et al.,1993).
+Similarly, if a product manufactured in several parallel assembly lines and the outputs are merged, the failure density of the overall product is likely to be hyperexponential (Trivedi,2002).
+
+Finally, since the hyperexponential distribution exhibits a high Coefficient of Variation (CoV), that is a CoV > 1, it is especially suited to fit empirical data with large CoV (Feitelson,2014; Wolski et al.,2013) and to approximate [@http://en.wikipedia.org/wiki/Long_tail long-tail probability distributions] (Feldmann et al.,1998).
+
+[/ Another possible example (work in progress):
+For instance, suppose that at the airport the company Foobar Airlines has a help desk with 3 different windows (servers) such that window A is for regional flights, window B is for international flights and window C is general customer care.
+From previous studies, it has been observed that each window is able to serve requests with the following timings:
+- window W1: 20 minutes, on average,
+- window W2: 30 minutes, on average, and
+- window W3: 10 minutes, on average.
+
+Furthermore, another airline company has a help desk with a single window.
+It has been observed that to the window can arrive three types of customers:
+- customer C1 (e.g., premium customer):
+- customer C2 (e.g., business customer):
+- customer C3 (e.g., regular customer):
+]
+
+[h3 Related distributions]
+
+* When the number of phases /k/ is equal to `1`, the hyperexponential distribution is simply an __exp_distrib.
+* When the /k/ rates are all equal to ['[lambda]], the hyperexponential distribution is simple an __exp_distrib with rate ['[lambda]].
+
+[h3 Examples]
+
+[h4 Lifetime of Appliances]
+
+Suppose a customer is buying an appliance and is choosing at random between an appliance with average lifetime of 10 years and an appliance with average lifetime of 12 years.
+Assuming the lifetime of this appliance follows an exponential distribution, the lifetime distribution of the purchased appliance can be modeled as a hyperexponential distribution with
+phase probability vector ['[*[alpha]]=(1/2,1/2)] and rate vector ['[*[lambda]]=(1/10,1/12)] (Wolfram,2014).
+
+In the rest of this section, we provide an example C++ implementation for computing the average lifetime and the probability that the appliance will work for more than 15 years.
+
+[hyperexponential_snip1]
+
+The resulting output is:
+
+ Average lifetime: 11 years
+ Probability that the appliance will work for more than 15 years: 0.254817
+
+
+[h4 Workloads of Private Cloud Computing Systems]
+
+[@http://en.wikipedia.org/wiki/Cloud_computing Cloud computing] has become a popular metaphor for dynamic and secure self-service access to computational and storage capabilities.
+In (Wolski et al.,2013), the authors analyze and model workloads gathered from enterprise-operated commercial [@http://en.wikipedia.org/wiki/Cloud_computing#Private_cloud private clouds] and show that 3-phase hyperexponential distributions (fitted using the [@http://en.wikipedia.org/wiki/Expectation%E2%80%93maximization_algorithm Expectation Maximization algorithm]) capture workload attributes accurately.
+
+In this type of computing system, user requests consist in demanding the provisioning of one or more [@http://en.wikipedia.org/wiki/Virtual_machine Virtual Machines] (VMs).
+In particular, in (Wolski et al.,2013) the workload experienced by each cloud system is a function of four distributions, one for each of the following workload attributes:
+
+* /Request Interarrival Time/: the amount of time until the next request,
+* /VM Lifetime/: the time duration over which a VM is provisioned to a physical machine,
+* /Request Size/: the number of VMs in the request, and
+* /Core Count/: the CPU core count requested for each VM.
+
+The authors assume that all VMs in a request have the same core count, but request sizes and core counts can vary from request to request.
+Moreover, all VMs within a request are assumed to have the same lifetime.
+Given these assumptions, the authors build a statistical model for the request interarrival time and VM lifetime attributes by fitting their respective data to a 3-phase hyperexponential distribution.
+
+In the following table, we show the sample mean and standard deviation (SD), in seconds, of the request interarrival time and of the VM lifetime distributions of the three datasets collected by authors:
+
+[table
+[[Dataset][Mean Request Interarrival Time (SD)][Mean Multi-core VM Lifetime (SD)][Mean Single-core VM Lifetime (SD)]]
+[[DS1][2202.1 (2.2e+04)][257173 (4.6e+05)][28754.4 (1.6e+05)]]
+[[DS2][41285.7 (1.1e+05)][144669.0 (7.9e+05)][599815.0 (1.7e+06)]]
+[[DS3][11238.8 (3.0e+04)][30739.2 (1.6e+05)][44447.8 (2.2e+05)]]
+]
+
+Whereas in the following table we show the hyperexponential distribution parameters resulting from the fit:
+
+[table
+[[Dataset][Request Interarrival Time][Multi-core VM Lifetime][Single-core VM Lifetime]]
+[[DS1][['[*[alpha]]=(0.34561,0.08648,0.56791), [*[lambda]]=(0.008,0.00005,0.02894)]][['[*[alpha]]=(0.24667,0.37948,0.37385), [*[lambda]]=(0.00004,0.000002,0.00059)]][['[*[alpha]]=(0.09325,0.22251,0.68424), [*[lambda]]=(0.000003,0.00109,0.00109)]]]
+[[DS2][['[*[alpha]]=(0.38881,0.18227,0.42892), [*[lambda]]=(0.000006,0.05228,0.00081)]][['[*[alpha]]=(0.42093,0.43960,0.13947), [*[lambda]]=(0.00186,0.00008,0.0000008)]][['[*[alpha]]=(0.44885,0.30675,0.2444), [*[lambda]]=(0.00143,0.00005,0.0000004)]]]
+[[DS3][['[*[alpha]]=(0.39442,0.24644,0.35914), [*[lambda]]=(0.00030,0.00003,0.00257)]][['[*[alpha]]=(0.37621,0.14838,0.47541), [*[lambda]]=(0.00498,0.000005,0.00022)]][['[*[alpha]]=(0.34131,0.12544,0.53325), [*[lambda]]=(0.000297,0.000003,0.00410)]]]
+]
+
+In the rest of this section, we provide an example C++ implementation for computing some statistical properties of the fitted distributions for each of the analyzed dataset.
+
+[hyperexponential_more_snip1]
+
+The resulting output (with floating-point precision set to 2) is:
+
+ ### DS1
+ * Fitted Request Interarrival Time
+ - Mean (SD): 2.2e+03 (8.1e+03) seconds.
+ - 99th Percentile: 4.3e+04 seconds.
+ - Probability that a VM will arrive within 30 minutes: 0.84
+ - Probability that a VM will arrive after 1 hour: 0.092
+ * Fitted Multi-core VM Lifetime
+ - Mean (SD): 2e+05 (3.9e+05) seconds.
+ - 99th Percentile: 1.8e+06 seconds.
+ - Probability that a VM will last for less than 1 month: 1
+ - Probability that a VM will last for more than 3 months: 6.7e-08
+ * Fitted Single-core VM Lifetime
+ - Mean (SD): 3.2e+04 (1.4e+05) seconds.
+ - 99th Percentile: 7.4e+05 seconds.
+ - Probability that a VM will last for less than 1 month: 1
+ - Probability that a VM will last for more than 3 months: 6.9e-12
+ ### DS2
+ * Fitted Request Interarrival Time
+ - Mean (SD): 6.5e+04 (1.3e+05) seconds.
+ - 99th Percentile: 6.1e+05 seconds.
+ - Probability that a VM will arrive within 30 minutes: 0.52
+ - Probability that a VM will arrive after 1 hour: 0.4
+ * Fitted Multi-core VM Lifetime
+ - Mean (SD): 1.8e+05 (6.4e+05) seconds.
+ - 99th Percentile: 3.3e+06 seconds.
+ - Probability that a VM will last for less than 1 month: 0.98
+ - Probability that a VM will last for more than 3 months: 0.00028
+ * Fitted Single-core VM Lifetime
+ - Mean (SD): 6.2e+05 (1.6e+06) seconds.
+ - 99th Percentile: 8e+06 seconds.
+ - Probability that a VM will last for less than 1 month: 0.91
+ - Probability that a VM will last for more than 3 months: 0.011
+ ### DS3
+ * Fitted Request Interarrival Time
+ - Mean (SD): 9.7e+03 (2.2e+04) seconds.
+ - 99th Percentile: 1.1e+05 seconds.
+ - Probability that a VM will arrive within 30 minutes: 0.53
+ - Probability that a VM will arrive after 1 hour: 0.36
+ * Fitted Multi-core VM Lifetime
+ - Mean (SD): 3.2e+04 (1e+05) seconds.
+ - 99th Percentile: 5.4e+05 seconds.
+ - Probability that a VM will last for less than 1 month: 1
+ - Probability that a VM will last for more than 3 months: 1.9e-18
+ * Fitted Single-core VM Lifetime
+ - Mean (SD): 4.3e+04 (1.6e+05) seconds.
+ - 99th Percentile: 8.4e+05 seconds.
+ - Probability that a VM will last for less than 1 month: 1
+ - Probability that a VM will last for more than 3 months: 9.3e-12
+
+[note The above results differ from the ones shown in Tables III, V, and VII of (Wolski et al.,2013).
+We carefully double-checked them with Wolfram Mathematica 10, which confirmed our results.]
+
+
+[h3 Member Functions]
+
+[h4 Default Constructor]
+
+ hyperexponential_distribution();
+
+Constructs a /1/-phase hyperexponential distribution (i.e., an exponential distribution) with rate `1`.
+
+
+[h4 Constructor from Iterators]
+
+ template <typename ProbIterT, typename RateIterT>
+ hyperexponential_distribution(ProbIterT prob_first, ProbIterT prob_last,
+ RateIterT rate_first, RateIterT rate_last);
+
+Constructs a hyperexponential distribution with /phase probability vector/ parameter given
+by the range defined by \[`prob_first`, `prob_last`) iterator pair, and /rate vector/ parameter
+given by the range defined by the \[`rate_first`, `rate_last`) iterator pair.
+
+[h5 Parameters]
+
+* `prob_first`, `prob_last`: the range of non-negative real elements representing the phase probabilities; elements are normalized to sum to unity.
+* `rate_first`, `rate_last`: the range of positive elements representing the rates.
+
+[h5 Type Requirements]
+
+[itemized_list [`ProbIterT`, `RateIterT`: must meet the requirements of the [@http://en.cppreference.com/w/cpp/concept/InputIterator InputIterator] concept.]]
+
+[h5 Example]
+
+[hyperexponential_snip2]
+
+[h4 Construction from Ranges/Containers]
+
+ template <typename ProbRangeT, typename RateRangeT>
+ hyperexponential_distribution(ProbRangeT const& prob_range,
+ RateRangeT const& rate_range);
+
+Constructs a hyperexponential distribution with /phase probability vector/ parameter
+given by the range defined by `prob_range`, and /rate vector/ parameter given by the range defined by `rate_range`.
+
+[note As an implementation detail, this constructor uses Boost's
+[@http://www.boost.org/doc/libs/release/libs/core/doc/html/core/enable_if.html enable_if/disable_if mechanism] to
+disambiguate between this and other 2-argument constructors. Refer to the source code for more details.]
+
+[h5 Parameters]
+
+* `prob_range`: the range of non-negative real elements representing the phase probabilities; elements are normalized to sum to unity.
+* `rate_range`: the range of positive real elements representing the rates.
+
+[h5 Type Requirements]
+
+[itemized_list [`ProbRangeT`, `RateRangeT`: must meet the requirements of the [@http://www.boost.org/doc/libs/release/libs/range/doc/html/range/concepts.html Range] concept:
+that includes native C++ arrays, standard library containers, or a std::pair or iterators.]]
+
+[h5 Examples]
+
+[hyperexponential_snip3]
+
+[h4 Construction with rates-iterators (and all phase probabilities equal)]
+
+ template <typename RateIterT, typename RateIterT2>
+ hyperexponential_distribution(RateIterT const& rate_first,
+ RateIterT2 const& rate_last);
+
+Constructs a hyperexponential distribution with /rate vector/ parameter given by the range defined by the
+\[`rate_first`, `rate_last`) iterator pair, and /phase probability vector/ set to the equal phase
+probabilities (i.e., to a vector of the same length `n` of the /rate vector/ and with each element set to `1.0/n`).
+
+[note As an implementation detail, this constructor uses Boost's
+[@http://www.boost.org/doc/libs/release/libs/core/doc/html/core/enable_if.html enable_if/disable_if mechanism] to
+disambiguate between this and other 2-argument constructors. Refer to the source code for more details.]
+
+[h5 Parameters]
+
+* `rate_first`, `rate_last`: the range of positive elements representing the rates.
+
+[h5 Type Requirements]
+
+[itemized_list [`RateIterT`, `RateIterT2`: must meet the requirements of the [@http://en.cppreference.com/w/cpp/concept/InputIterator InputIterator] concept.]]
+
+[h5 Example]
+
+[hyperexponential_snip4]
+
+[h4 Construction from a single range of rates (all phase probabilities will be equal)]
+
+ template <typename RateRangeT>
+ hyperexponential_distribution(RateRangeT const& rate_range);
+
+Constructs a hyperexponential distribution with /rate vector/ parameter given by the range defined by `rate_range`,
+and /phase probability vector/ set to the equal phase probabilities (i.e., to a vector of the same length
+`n` of the /rate vector/ and with each element set to `1.0/n`).
+
+[h5 Parameters]
+
+* `rate_range`: the range of positive real elements representing the rates.
+
+[h5 Type Requirements]
+
+[itemized_list [`RateRangeT`: must meet the requirements of the [@http://www.boost.org/doc/libs/release/libs/range/doc/html/range/concepts.html Range] concept: this includes
+native C++ array, standard library containers, and a `std::pair` of iterators.]]
+
+[h5 Examples]
+
+[hyperexponential_snip5]
+
+[h4 Construction from Initializer lists]
+
+ hyperexponential_distribution(std::initializer_list<RealType> l1, std::initializer_list<RealType> l2);
+
+Constructs a hyperexponential distribution with /phase probability vector/ parameter
+given by the [@http://en.cppreference.com/w/cpp/language/list_initialization brace-init-list] defined by `l1`,
+and /rate vector/ parameter given by the [@http://en.cppreference.com/w/cpp/language/list_initialization brace-init-list]
+defined by `l2`.
+
+[h5 Parameters]
+
+* `l1`: the brace-init-list of non-negative real elements representing the phase probabilities;
+elements are normalized to sum to unity.
+* `l2`: the brace-init-list of positive real elements representing the rates.
+
+The number of elements of the phase probabilities list and the rates list must be the same.
+
+[h5 Example]
+
+[hyperexponential_snip6]
+
+[h4 Construction from a single initializer list (all phase probabilities will be equal)]
+
+ hyperexponential_distribution(std::initializer_list<RealType> l1);
+
+Constructs a hyperexponential distribution with /rate vector/ parameter given by the
+[@http://en.cppreference.com/w/cpp/language/list_initialization brace-init-list] defined by `l1`, and
+/phase probability vector/ set to the equal phase probabilities (i.e., to a vector of the same length
+`n` of the /rate vector/ and with each element set to `1.0/n`).
+
+[h5 Parameters]
+
+* `l1`: the brace-init-list of non-negative real elements representing the phase probabilities; they are normalized to ensure that they sum to unity.
+
+[h5 Example]
+
+[hyperexponential_snip7]
+
+[h4 Accessors]
+
+ std::size_t num_phases() const;
+
+Gets the number of phases of this distribution (the size of both the rate and probability vectors).
+
+[h5 Return Value] An non-negative integer number representing the number of phases of this distribution.
+
+
+ std::vector<RealType> probabilities() const;
+
+Gets the /phase probability vector/ parameter of this distribution.
+
+[note The returned probabilities are the [*normalized] versions of the probability parameter values passed at construction time.]
+
+[h5 Return Value] A vector of non-negative real numbers representing the /phase probability vector/ parameter of this distribution.
+
+
+ std::vector<RealType> rates() const;
+
+Gets the /rate vector/ parameter of this distribution.
+
+[h5 Return Value] A vector of positive real numbers representing the /rate vector/ parameter of this distribution.
+
+[warning The return type of these functions is a vector-by-value. This is deliberate as we wish to hide the actual container
+used internally which may be subject to future changes (for example to facilitate vectorization of the cdf code etc).
+Users should note that some code that might otherwise have been expected to work does not.
+For example, an attempt to output the (normalized) probabilities:
+
+``
+std::copy(he.probabilities().begin(), he.probabilities().end(), std::ostream_iterator<double>(std::cout, " "));
+``
+
+fails at compile or runtime because iterator types are incompatible, but, for example,
+
+``
+std::cout << he.probabilities()[0] << ' ' << he.probabilities()[1] << std::endl;
+``
+
+outputs the expected values.
+
+In general if you want to access a member of the returned container, then assign to a variable first, and then access those
+members:
+
+``
+std::vector<double> t = he.probabilities();
+std::copy(t.begin(), t.end(), std::ostream_iterator<double>(std::cout, " "));
+``
+]
+
+[h3 Non-member Accessor Functions]
+
+All the [link math_toolkit.dist_ref.nmp usual non-member accessor functions] that are generic to all distributions are supported: __usual_accessors.
+
+The formulae for calculating these are shown in the table below.
+
+
+[h3 Accuracy]
+
+The hyperexponential distribution is implemented in terms of the __exp_distrib and as such should have very small errors, usually an
+[@http://en.wikipedia.org/wiki/Machine_epsilon epsilon] or few.
+
+
+[h3 Implementation]
+
+In the following table:
+
+* ['[*[alpha]]=([alpha][sub 1],...,[alpha][sub k])] is the /phase probability vector/ parameter of the /k/-phase hyperexponential distribution,
+* ['[*[lambda]]=([lambda][sub 1],...,[lambda][sub k])] is the /rate vector/ parameter of the /k/-phase hyperexponential distribution,
+* /x/ is the random variate.
+
+[table
+[[Function][Implementation Notes]]
+[[support][['x] [isin] \[0,[infin])]]
+[[pdf][[equation hyperexponential_pdf]]]
+[[cdf][[equation hyperexponential_cdf]]]
+[[cdf complement][[equation hyperexponential_ccdf]]]
+[[quantile][No closed form available. Computed numerically.]]
+[[quantile from the complement][No closed form available. Computed numerically.]]
+[[mean][[equation hyperexponential_mean]]]
+[[variance][[equation hyperexponential_variance]]]
+[[mode][`0`]]
+[[skewness][[equation hyperexponential_skewness]]]
+[[kurtosis][[equation hyperexponential_kurtosis]]]
+[[kurtosis excess][kurtosis `- 3`]]
+]
+
+
+[h3 References]
+
+* A.O. Allen, /Probability, Statistics, and Queuing Theory with Computer Science Applications, Second Edition/, Academic Press, 1990.
+
+* D.G. Feitelson, /Workload Modeling for Computer Systems Performance Evaluation/, Cambridge University Press, 2014
+
+* A. Feldmann and W. Whitt, /Fitting mixtures of exponentials to long-tail distributions to analyze network performance models/, Performance Evaluation 31(3-4):245, doi:10.1016/S0166-5316(97)00003-5, 1998.
+
+* H.T. Papadopolous, C. Heavey and J. Browne, /Queueing Theory in Manufacturing Systems Analysis and Design/, Chapman & Hall/CRC, 1993, p. 35.
+
+* R.F. Rosin, /Determining a computing center environment/, Communications of the ACM 8(7):463-468, 1965.
+
+* K.S. Trivedi, /Probability and Statistics with Reliability, Queueing, and Computer Science Applications/, John Wiley & Sons, Inc., 2002.
+
+* Wikipedia, /Hyperexponential Distribution/, Online: [@http://en.wikipedia.org/wiki/Hyperexponential_distribution], 2014
+
+* R. Wolski and J. Brevik, /Using Parametric Models to Represent Private Cloud Workloads/, IEEE TSC, PrePrint, DOI: [@http://doi.ieeecomputersociety.org/10.1109/TSC.2013.48 10.1109/TSC.2013.48], 2013.
+
+* Wolfram Mathematica, /Hyperexponential Distribution/, Online: [@http://reference.wolfram.com/language/ref/HyperexponentialDistribution.html], 2014.
+
+[endsect][/section:hyperexponential_dist hyperexponential]
+
+[/ hyperexponential.qbk
+ Copyright 2014 Marco Guazzone (marco.guazzone@gmail.com)
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
diff --git a/doc/distributions/hypergeometric.qbk b/doc/distributions/hypergeometric.qbk
new file mode 100644
index 0000000..f894189
--- /dev/null
+++ b/doc/distributions/hypergeometric.qbk
@@ -0,0 +1,229 @@
+[section:hypergeometric_dist Hypergeometric Distribution]
+
+``#include <boost/math/distributions/hypergeometric.hpp>``
+
+ namespace boost{ namespace math{
+
+ template <class RealType = double,
+ class ``__Policy`` = ``__policy_class`` >
+ class hypergeometric_distribution;
+
+ template <class RealType, class Policy>
+ class hypergeometric_distribution
+ {
+ public:
+ typedef RealType value_type;
+ typedef Policy policy_type;
+ // Construct:
+ hypergeometric_distribution(unsigned r, unsigned n, unsigned N);
+ // Accessors:
+ unsigned total()const;
+ unsigned defective()const;
+ unsigned sample_count()const;
+ };
+
+ typedef hypergeometric_distribution<> hypergeometric;
+
+ }} // namespaces
+
+The hypergeometric distribution describes the number of "events" /k/
+from a sample /n/ drawn from a total population /N/ ['without replacement].
+
+Imagine we have a sample of /N/ objects of which /r/ are "defective"
+and N-r are "not defective"
+(the terms "success\/failure" or "red\/blue" are also used). If we sample /n/
+items /without replacement/ then what is the probability that exactly
+/k/ items in the sample are defective? The answer is given by the pdf of the
+hypergeometric distribution `f(k; r, n, N)`, whilst the probability of
+/k/ defectives or fewer is given by F(k; r, n, N), where F(k) is the
+CDF of the hypergeometric distribution.
+
+[note Unlike almost all of the other distributions in this library,
+the hypergeometric distribution is strictly discrete: it can not be
+extended to real valued arguments of its parameters or random variable.]
+
+The following graph shows how the distribution changes as the proportion
+of "defective" items changes, while keeping the population and sample sizes
+constant:
+
+[graph hypergeometric_pdf_1]
+
+Note that since the distribution is symmetrical in parameters /n/ and /r/, if we
+change the sample size and keep the population and proportion "defective" the same
+then we obtain basically the same graphs:
+
+[graph hypergeometric_pdf_2]
+
+[h4 Member Functions]
+
+ hypergeometric_distribution(unsigned r, unsigned n, unsigned N);
+
+Constructs a hypergeometric distribution with a population of /N/ objects,
+of which /r/ are defective, and from which /n/ are sampled.
+
+ unsigned total()const;
+
+Returns the total number of objects /N/.
+
+ unsigned defective()const;
+
+Returns the number of objects /r/ in population /N/ which are defective.
+
+ unsigned sample_count()const;
+
+Returns the number of objects /n/ which are sampled from the population /N/.
+
+[h4 Non-member Accessors]
+
+All the [link math_toolkit.dist_ref.nmp usual non-member accessor functions]
+that are generic to all distributions are supported: __usual_accessors.
+
+The domain of the random variable is the unsigned integers in the range
+\[max(0, n + r - N), min(n, r)\]. A __domain_error is raised if the
+random variable is outside this range, or is not an integral value.
+
+[caution
+The quantile function will by default return an integer result that has been
+/rounded outwards/. That is to say lower quantiles (where the probability is
+less than 0.5) are rounded downward, and upper quantiles (where the probability
+is greater than 0.5) are rounded upwards. This behaviour
+ensures that if an X% quantile is requested, then /at least/ the requested
+coverage will be present in the central region, and /no more than/
+the requested coverage will be present in the tails.
+
+This behaviour can be changed so that the quantile functions are rounded
+differently using
+[link math_toolkit.pol_overview Policies]. It is strongly
+recommended that you read the tutorial
+[link math_toolkit.pol_tutorial.understand_dis_quant
+Understanding Quantiles of Discrete Distributions] before
+using the quantile function on the Hypergeometric distribution. The
+[link math_toolkit.pol_ref.discrete_quant_ref reference docs]
+describe how to change the rounding policy
+for these distributions.
+
+However, note that the implementation method of the quantile function
+always returns an integral value, therefore attempting to use a __Policy
+that requires (or produces) a real valued result will result in a
+compile time error.
+] [/ caution]
+
+
+[h4 Accuracy]
+
+For small N such that
+`N < boost::math::max_factorial<RealType>::value` then table based
+lookup of the results gives an accuracy to a few epsilon.
+`boost::math::max_factorial<RealType>::value` is 170 at double or long double
+precision.
+
+For larger N such that `N < boost::math::prime(boost::math::max_prime)`
+then only basic arithmetic is required for the calculation
+and the accuracy is typically < 20 epsilon. This takes care of N
+up to 104729.
+
+For `N > boost::math::prime(boost::math::max_prime)` then accuracy quickly
+degrades, with 5 or 6 decimal digits being lost for N = 110000.
+
+In general for very large N, the user should expect to lose log[sub 10]N
+decimal digits of precision during the calculation, with the results
+becoming meaningless for N >= 10[super 15].
+
+[h4 Testing]
+
+There are three sets of tests: our implementation is tested against a table of values
+produced by Mathematica's implementation of this distribution. We also sanity check
+our implementation against some spot values computed using the online calculator
+here [@http://stattrek.com/Tables/Hypergeometric.aspx http://stattrek.com/Tables/Hypergeometric.aspx].
+Finally we test accuracy against some high precision test data using
+this implementation and NTL::RR.
+
+[h4 Implementation]
+
+The PDF can be calculated directly using the formula:
+
+[equation hypergeometric1]
+
+However, this can only be used directly when the largest of the factorials
+is guaranteed not to overflow the floating point representation used.
+This formula is used directly when `N < max_factorial<RealType>::value`
+in which case table lookup of the factorials gives a rapid and accurate
+implementation method.
+
+For larger /N/ the method described in
+"An Accurate Computation of the Hypergeometric Distribution Function",
+Trong Wu, ACM Transactions on Mathematical Software, Vol. 19, No. 1,
+March 1993, Pages 33-43 is used. The method relies on the fact that
+there is an easy method for factorising a factorial into the product
+of prime numbers:
+
+[equation hypergeometric2]
+
+Where p[sub i] is the i'th prime number, and e[sub i] is a small
+positive integer or zero, which can be calculated via:
+
+[equation hypergeometric3]
+
+Further we can combine the factorials in the expression for the PDF
+to yield the PDF directly as the product of prime numbers:
+
+[equation hypergeometric4]
+
+With this time the exponents e[sub i] being either positive, negative
+or zero. Indeed such a degree of cancellation occurs in the calculation
+of the e[sub i] that many are zero, and typically most have a magnitude
+or no more than 1 or 2.
+
+Calculation of the product of the primes requires some care to prevent
+numerical overflow, we use a novel recursive method which splits the
+calculation into a series of sub-products, with a new sub-product
+started each time the next multiplication would cause either overflow
+or underflow. The sub-products are stored in a linked list on the
+program stack, and combined in an order that will guarantee no overflow
+or unnecessary-underflow once the last sub-product has been calculated.
+
+This method can be used as long as N is smaller than the largest prime
+number we have stored in our table of primes (currently 104729). The method
+is relatively slow (calculating the exponents requires the most time), but
+requires only a small number of arithmetic operations to
+calculate the result (indeed there is no shorter method involving only basic
+arithmetic once the exponents have been found), the method is therefore
+much more accurate than the alternatives.
+
+For much larger N, we can calculate the PDF from the factorials using
+either lgamma, or by directly combining lanczos approximations to avoid
+calculating via logarithms. We use the latter method, as it is usually
+1 or 2 decimal digits more accurate than computing via logarithms with
+lgamma. However, in this area where N > 104729, the user should expect
+to lose around log[sub 10]N decimal digits during the calculation in
+the worst case.
+
+The CDF and its complement is calculated by directly summing the PDF's.
+We start by deciding whether the CDF, or its complement, is likely to be
+the smaller of the two and then calculate the PDF at /k/ (or /k+1/ if we're
+calculating the complement) and calculate successive PDF values via the
+recurrence relations:
+
+[equation hypergeometric5]
+
+Until we either reach the end of the distributions domain, or the next
+PDF value to be summed would be too small to affect the result.
+
+The quantile is calculated in a similar manner to the CDF: we first guess
+which end of the distribution we're nearer to, and then sum PDFs starting
+from the end of the distribution this time, until we have some value /k/ that
+gives the required CDF.
+
+The median is simply the quantile at 0.5, and the remaining properties are
+calculated via:
+
+[equation hypergeometric6]
+
+[endsect]
+
+[/ hypergeometric.qbk
+ Copyright 2008 John Maddock.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
diff --git a/doc/distributions/inverse_chi_squared.qbk b/doc/distributions/inverse_chi_squared.qbk
new file mode 100644
index 0000000..4c1f9f9
--- /dev/null
+++ b/doc/distributions/inverse_chi_squared.qbk
@@ -0,0 +1,176 @@
+[section:inverse_chi_squared_dist Inverse Chi Squared Distribution]
+
+``#include <boost/math/distributions/inverse_chi_squared.hpp>``
+
+ namespace boost{ namespace math{
+
+ template <class RealType = double,
+ class ``__Policy`` = ``__policy_class`` >
+ class inverse_chi_squared_distribution
+ {
+ public:
+ typedef RealType value_type;
+ typedef Policy policy_type;
+
+ inverse_chi_squared_distribution(RealType df = 1); // Not explicitly scaled, default 1/df.
+ inverse_chi_squared_distribution(RealType df, RealType scale = 1/df); // Scaled.
+
+ RealType degrees_of_freedom()const; // Default 1.
+ RealType scale()const; // Optional scale [xi] (variance), default 1/degrees_of_freedom.
+ };
+
+ }} // namespace boost // namespace math
+
+The inverse chi squared distribution is a continuous probability distribution
+of the *reciprocal* of a variable distributed according to the chi squared distribution.
+
+The sources below give confusingly different formulae
+using different symbols for the distribution pdf,
+but they are all the same, or related by a change of variable, or choice of scale.
+
+Two constructors are available to implement both the scaled and (implicitly) unscaled versions.
+
+The main version has an explicit scale parameter which implements the
+[@http://en.wikipedia.org/wiki/Scaled-inverse-chi-square_distribution scaled inverse chi_squared distribution].
+
+A second version has an implicit scale = 1/degrees of freedom and gives the 1st definition in the
+[@http://en.wikipedia.org/wiki/Inverse-chi-square_distribution Wikipedia inverse chi_squared distribution].
+The 2nd Wikipedia inverse chi_squared distribution definition can be implemented
+by explicitly specifying a scale = 1.
+
+Both definitions are also available in Wolfram Mathematica and in __R (geoR) with default scale = 1/degrees of freedom.
+
+See
+
+* Inverse chi_squared distribution [@http://en.wikipedia.org/wiki/Inverse-chi-square_distribution]
+* Scaled inverse chi_squared distribution[@http://en.wikipedia.org/wiki/Scaled-inverse-chi-square_distribution]
+* R inverse chi_squared distribution functions [@http://hosho.ees.hokudai.ac.jp/~kubo/Rdoc/library/geoR/html/InvChisquare.html R ]
+* Inverse chi_squared distribution functions [@http://mathworld.wolfram.com/InverseChi-SquaredDistribution.html Weisstein, Eric W. "Inverse Chi-Squared Distribution." From MathWorld--A Wolfram Web Resource.]
+* Inverse chi_squared distribution reference [@http://reference.wolfram.com/mathematica/ref/InverseChiSquareDistribution.html Weisstein, Eric W. "Inverse Chi-Squared Distribution reference." From Wolfram Mathematica.]
+
+The inverse_chi_squared distribution is used in
+[@http://en.wikipedia.org/wiki/Bayesian_statistics Bayesian statistics]:
+the scaled inverse chi-square is conjugate prior for the normal distribution
+with known mean, model parameter [sigma][pow2] (variance).
+
+See [@http://en.wikipedia.org/wiki/Conjugate_prior conjugate priors including a table of distributions and their priors.]
+
+See also __inverse_gamma_distrib and __chi_squared_distrib.
+
+The inverse_chi_squared distribution is a special case of a inverse_gamma distribution
+with [nu] (degrees_of_freedom) shape ([alpha]) and scale ([beta]) where
+
+__spaces [alpha]= [nu] /2 and [beta] = [frac12].
+
+[note This distribution *does* provide the typedef:
+
+``typedef inverse_chi_squared_distribution<double> inverse_chi_squared;``
+
+If you want a `double` precision inverse_chi_squared distribution you can use
+
+``boost::math::inverse_chi_squared_distribution<>``
+
+or you can write `inverse_chi_squared my_invchisqr(2, 3);`]
+
+For degrees of freedom parameter [nu],
+the (*unscaled*) inverse chi_squared distribution is defined by the probability density function (PDF):
+
+__spaces f(x;[nu]) = 2[super -[nu]/2] x[super -[nu]/2-1] e[super -1/2x] / [Gamma]([nu]/2)
+
+and Cumulative Density Function (CDF)
+
+__spaces F(x;[nu]) = [Gamma]([nu]/2, 1/2x) / [Gamma]([nu]/2)
+
+For degrees of freedom parameter [nu] and scale parameter [xi],
+the *scaled* inverse chi_squared distribution is defined by the probability density function (PDF):
+
+__spaces f(x;[nu], [xi]) = ([xi][nu]/2)[super [nu]/2] e[super -[nu][xi]/2x] x[super -1-[nu]/2] / [Gamma]([nu]/2)
+
+and Cumulative Density Function (CDF)
+
+__spaces F(x;[nu], [xi]) = [Gamma]([nu]/2, [nu][xi]/2x) / [Gamma]([nu]/2)
+
+The following graphs illustrate how the PDF and CDF of the inverse chi_squared distribution
+varies for a few values of parameters [nu] and [xi]:
+
+[graph inverse_chi_squared_pdf] [/.png or .svg]
+
+[graph inverse_chi_squared_cdf]
+
+[h4 Member Functions]
+
+ inverse_chi_squared_distribution(RealType df = 1); // Implicitly scaled 1/df.
+ inverse_chi_squared_distribution(RealType df = 1, RealType scale); // Explicitly scaled.
+
+Constructs an inverse chi_squared distribution with [nu] degrees of freedom ['df],
+and scale ['scale] with default value 1\/df.
+
+Requires that the degrees of freedom [nu] parameter is greater than zero, otherwise calls
+__domain_error.
+
+ RealType degrees_of_freedom()const;
+
+Returns the degrees_of_freedom [nu] parameter of this distribution.
+
+ RealType scale()const;
+
+Returns the scale [xi] parameter of this distribution.
+
+[h4 Non-member Accessors]
+
+All the [link math_toolkit.dist_ref.nmp usual non-member accessor functions] that are generic to all
+distributions are supported: __usual_accessors.
+
+The domain of the random variate is \[0,+[infin]\].
+[note Unlike some definitions, this implementation supports a random variate
+equal to zero as a special case, returning zero for both pdf and cdf.]
+
+[h4 Accuracy]
+
+The inverse gamma distribution is implemented in terms of the
+incomplete gamma functions like the __inverse_gamma_distrib that use
+__gamma_p and __gamma_q and their inverses __gamma_p_inv and __gamma_q_inv:
+refer to the accuracy data for those functions for more information.
+But in general, gamma (and thus inverse gamma) results are often accurate to a few epsilon,
+>14 decimal digits accuracy for 64-bit double.
+unless iteration is involved, as for the estimation of degrees of freedom.
+
+[h4 Implementation]
+
+In the following table [nu] is the degrees of freedom parameter and
+[xi] is the scale parameter of the distribution,
+/x/ is the random variate, /p/ is the probability and /q = 1-p/ its complement.
+Parameters [alpha] for shape and [beta] for scale
+are used for the inverse gamma function: [alpha] = [nu]/2 and [beta] = [nu] * [xi]/2.
+
+[table
+[[Function][Implementation Notes]]
+[[pdf][Using the relation: pdf = __gamma_p_derivative([alpha], [beta]/ x, [beta]) / x * x ]]
+[[cdf][Using the relation: p = __gamma_q([alpha], [beta] / x) ]]
+[[cdf complement][Using the relation: q = __gamma_p([alpha], [beta] / x) ]]
+[[quantile][Using the relation: x = [beta][space]/ __gamma_q_inv([alpha], p) ]]
+[[quantile from the complement][Using the relation: x = [alpha][space]/ __gamma_p_inv([alpha], q) ]]
+[[mode][[nu] * [xi] / ([nu] + 2) ]]
+[[median][no closed form analytic equation is known, but is evaluated as quantile(0.5)]]
+[[mean][[nu][xi] / ([nu] - 2) for [nu] > 2, else a __domain_error]]
+[[variance][2 [nu][pow2] [xi][pow2] / (([nu] -2)[pow2] ([nu] -4)) for [nu] >4, else a __domain_error]]
+[[skewness][4 [sqrt]2 [sqrt]([nu]-4) /([nu]-6) for [nu] >6, else a __domain_error ]]
+[[kurtosis_excess][12 * (5[nu] - 22) / (([nu] - 6) * ([nu] - 8)) for [nu] >8, else a __domain_error]]
+[[kurtosis][3 + 12 * (5[nu] - 22) / (([nu] - 6) * ([nu]-8)) for [nu] >8, else a __domain_error]]
+] [/table]
+
+[h4 References]
+
+# Bayesian Data Analysis, Andrew Gelman, John B. Carlin, Hal S. Stern, Donald B. Rubin,
+ISBN-13: 978-1584883883, Chapman & Hall; 2 edition (29 July 2003).
+
+# Bayesian Computation with R, Jim Albert, ISBN-13: 978-0387922973, Springer; 2nd ed. edition (10 Jun 2009)
+
+[endsect] [/section:inverse_chi_squared_dist Inverse chi_squared Distribution]
+
+[/
+ Copyright 2010 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
\ No newline at end of file
diff --git a/doc/distributions/inverse_chi_squared_eg.qbk b/doc/distributions/inverse_chi_squared_eg.qbk
new file mode 100644
index 0000000..067c746
--- /dev/null
+++ b/doc/distributions/inverse_chi_squared_eg.qbk
@@ -0,0 +1,32 @@
+
+[section:inverse_chi_squared_eg Inverse Chi-Squared Distribution Bayes Example]
+
+[import ../../example/inverse_chi_squared_bayes_eg.cpp]
+[inverse_chi_squared_bayes_eg_1]
+[inverse_chi_squared_bayes_eg_output_1]
+[inverse_chi_squared_bayes_eg_2]
+[inverse_chi_squared_bayes_eg_output_2]
+[inverse_chi_squared_bayes_eg_3]
+[inverse_chi_squared_bayes_eg_output_3]
+[inverse_chi_squared_bayes_eg_4]
+[inverse_chi_squared_bayes_eg_output_4]
+
+[inverse_chi_squared_bayes_eg_5]
+
+A full sample output is:
+[inverse_chi_squared_bayes_eg_output]
+
+(See also the reference documentation for the __inverse_chi_squared_distrib.)
+
+See the full source C++ of this example at
+[@../../example/inverse_chi_squared_bayes_eg.cpp]
+
+[endsect] [/section:inverse_chi_squared_eg Inverse Chi-Squared Distribution Bayes Example]
+
+[/
+ Copyright 2011 Paul A. Bristow and Thomas Mang.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distributions/inverse_gamma.qbk b/doc/distributions/inverse_gamma.qbk
new file mode 100644
index 0000000..0f8afea
--- /dev/null
+++ b/doc/distributions/inverse_gamma.qbk
@@ -0,0 +1,129 @@
+[section:inverse_gamma_dist Inverse Gamma Distribution]
+
+``#include <boost/math/distributions/inverse_gamma.hpp>``
+
+ namespace boost{ namespace math{
+
+ template <class RealType = double,
+ class ``__Policy`` = ``__policy_class`` >
+ class inverse_gamma_distribution
+ {
+ public:
+ typedef RealType value_type;
+ typedef Policy policy_type;
+
+ inverse_gamma_distribution(RealType shape, RealType scale = 1)
+
+ RealType shape()const;
+ RealType scale()const;
+ };
+
+ }} // namespaces
+
+The inverse_gamma distribution is a continuous probability distribution
+of the reciprocal of a variable distributed according to the gamma distribution.
+
+The inverse_gamma distribution is used in Bayesian statistics.
+
+See [@http://en.wikipedia.org/wiki/Inverse-gamma_distribution inverse gamma distribution].
+
+[@http://rss.acs.unt.edu/Rdoc/library/pscl/html/igamma.html R inverse gamma distribution functions].
+
+[@http://reference.wolfram.com/mathematica/ref/InverseGammaDistribution.html Wolfram inverse gamma distribution].
+
+See also __gamma_distrib.
+
+
+[note
+In spite of potential confusion with the inverse gamma function, this
+distribution *does* provide the typedef:
+
+``typedef inverse_gamma_distribution<double> gamma;``
+
+If you want a `double` precision gamma distribution you can use
+
+``boost::math::inverse_gamma_distribution<>``
+
+or you can write `inverse_gamma my_ig(2, 3);`]
+
+For shape parameter [alpha] and scale parameter [beta], it is defined
+by the probability density function (PDF):
+
+__spaces f(x;[alpha], [beta]) = [beta][super [alpha]] * (1/x) [super [alpha]+1] exp(-[beta]/x) / [Gamma]([alpha])
+
+and cumulative density function (CDF)
+
+__spaces F(x;[alpha], [beta]) = [Gamma]([alpha], [beta]/x) / [Gamma]([alpha])
+
+The following graphs illustrate how the PDF and CDF of the inverse gamma distribution
+varies as the parameters vary:
+
+[graph inverse_gamma_pdf] [/png or svg]
+
+[graph inverse_gamma_cdf]
+
+[h4 Member Functions]
+
+ inverse_gamma_distribution(RealType shape = 1, RealType scale = 1);
+
+Constructs an inverse gamma distribution with shape [alpha] and scale [beta].
+
+Requires that the shape and scale parameters are greater than zero, otherwise calls
+__domain_error.
+
+ RealType shape()const;
+
+Returns the [alpha] shape parameter of this inverse gamma distribution.
+
+ RealType scale()const;
+
+Returns the [beta] scale parameter of this inverse gamma distribution.
+
+[h4 Non-member Accessors]
+
+All the [link math_toolkit.dist_ref.nmp usual non-member accessor functions] that are generic to all
+distributions are supported: __usual_accessors.
+
+The domain of the random variate is \[0,+[infin]\].
+[note Unlike some definitions, this implementation supports a random variate
+equal to zero as a special case, returning zero for pdf and cdf.]
+
+[h4 Accuracy]
+
+The inverse gamma distribution is implemented in terms of the
+incomplete gamma functions __gamma_p and __gamma_q and their
+inverses __gamma_p_inv and __gamma_q_inv: refer to the accuracy
+data for those functions for more information.
+But in general, inverse_gamma results are accurate to a few epsilon,
+>14 decimal digits accuracy for 64-bit double.
+
+[h4 Implementation]
+
+In the following table [alpha] is the shape parameter of the distribution,
+[alpha][space] is its scale parameter, /x/ is the random variate, /p/ is the probability
+and /q = 1-p/.
+
+[table
+[[Function][Implementation Notes]]
+[[pdf][Using the relation: pdf = __gamma_p_derivative([alpha], [beta]/ x, [beta]) / x * x ]]
+[[cdf][Using the relation: p = __gamma_q([alpha], [beta] / x) ]]
+[[cdf complement][Using the relation: q = __gamma_p([alpha], [beta] / x) ]]
+[[quantile][Using the relation: x = [beta][space]/ __gamma_q_inv([alpha], p) ]]
+[[quantile from the complement][Using the relation: x = [alpha][space]/ __gamma_p_inv([alpha], q) ]]
+[[mode][[beta] / ([alpha] + 1) ]]
+[[median][no analytic equation is known, but is evaluated as quantile(0.5)]]
+[[mean][[beta] / ([alpha] - 1) for [alpha] > 1, else a __domain_error]]
+[[variance][([beta] * [beta]) / (([alpha] - 1) * ([alpha] - 1) * ([alpha] - 2)) for [alpha] >2, else a __domain_error]]
+[[skewness][4 * sqrt ([alpha] -2) / ([alpha] -3) for [alpha] >3, else a __domain_error]]
+[[kurtosis_excess][(30 * [alpha] - 66) / (([alpha]-3)*([alpha] - 4)) for [alpha] >4, else a __domain_error]]
+] [/table]
+
+[endsect][/section:inverse_gamma_dist Inverse Gamma Distribution]
+
+[/
+ Copyright 2010 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distributions/inverse_gamma_example.qbk b/doc/distributions/inverse_gamma_example.qbk
new file mode 100644
index 0000000..3feaa26
--- /dev/null
+++ b/doc/distributions/inverse_gamma_example.qbk
@@ -0,0 +1,15 @@
+
+[section:inverse_gamma_eg Inverse Gamma Distribution Bayes Example]
+
+TODO
+
+[endsect] [/section:inverse_gamma_eg Inverse Gamma Distribution Bayes Example]
+
+
+[/
+ Copyright 2006, 2010 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distributions/inverse_gaussian.qbk b/doc/distributions/inverse_gaussian.qbk
new file mode 100644
index 0000000..612801f
--- /dev/null
+++ b/doc/distributions/inverse_gaussian.qbk
@@ -0,0 +1,171 @@
+[section:inverse_gaussian_dist Inverse Gaussian (or Inverse Normal) Distribution]
+
+``#include <boost/math/distributions/inverse_gaussian.hpp>``
+
+ namespace boost{ namespace math{
+
+ template <class RealType = double,
+ class ``__Policy`` = ``__policy_class`` >
+ class inverse_gaussian_distribution
+ {
+ public:
+ typedef RealType value_type;
+ typedef Policy policy_type;
+
+ inverse_gaussian_distribution(RealType mean = 1, RealType scale = 1);
+
+ RealType mean()const; // mean default 1.
+ RealType scale()const; // Optional scale, default 1 (unscaled).
+ RealType shape()const; // Shape = scale/mean.
+ };
+ typedef inverse_gaussian_distribution<double> inverse_gaussian;
+
+ }} // namespace boost // namespace math
+
+The Inverse Gaussian distribution distribution is a continuous probability distribution.
+
+The distribution is also called 'normal-inverse Gaussian distribution',
+and 'normal Inverse' distribution.
+
+It is also convenient to provide unity as default for both mean and scale.
+This is the Standard form for all distributions.
+The Inverse Gaussian distribution was first studied in relation to Brownian motion.
+In 1956 M.C.K. Tweedie used the name Inverse Gaussian because there is an inverse relationship
+between the time to cover a unit distance and distance covered in unit time.
+The inverse Gaussian is one of family of distributions that have been called the
+[@http://en.wikipedia.org/wiki/Tweedie_distributions Tweedie distributions].
+
+(So ['inverse] in the name may mislead: it does [*not] relate to the inverse of a distribution).
+
+The tails of the distribution decrease more slowly than the normal distribution.
+It is therefore suitable to model phenomena
+where numerically large values are more probable than is the case for the normal distribution.
+For stock market returns and prices, a key characteristic is that it models
+that extremely large variations from typical (crashes) can occur
+even when almost all (normal) variations are small.
+
+Examples are returns from financial assets and turbulent wind speeds.
+
+The normal-inverse Gaussian distributions form
+a subclass of the generalised hyperbolic distributions.
+
+See
+[@http://en.wikipedia.org/wiki/Normal-inverse_Gaussian_distribution distribution].
+[@http://mathworld.wolfram.com/InverseGaussianDistribution.html
+ Weisstein, Eric W. "Inverse Gaussian Distribution." From MathWorld--A Wolfram Web Resource.]
+
+If you want a `double` precision inverse_gaussian distribution you can use
+
+``boost::math::inverse_gaussian_distribution<>``
+
+or, more conveniently, you can write
+
+ using boost::math::inverse_gaussian;
+ inverse_gaussian my_ig(2, 3);
+
+For mean parameters [mu] and scale (also called precision) parameter [lambda],
+and random variate x,
+the inverse_gaussian distribution is defined by the probability density function (PDF):
+
+__spaces f(x;[mu], [lambda]) = [sqrt]([lambda]/2[pi]x[super 3]) e[super -[lambda](x-[mu])[sup2]/2[mu][sup2]x]
+
+and Cumulative Density Function (CDF):
+
+__spaces F(x;[mu], [lambda]) = [Phi]{[sqrt]([lambda]/x) (x/[mu]-1)} + e[super 2[mu]/[lambda]] [Phi]{-[sqrt]([lambda]/[mu]) (1+x/[mu])}
+
+where [Phi] is the standard normal distribution CDF.
+
+The following graphs illustrate how the PDF and CDF of the inverse_gaussian distribution
+varies for a few values of parameters [mu] and [lambda]:
+
+[graph inverse_gaussian_pdf] [/.png or .svg]
+
+[graph inverse_gaussian_cdf]
+
+Tweedie also provided 3 other parameterisations where ([mu] and [lambda])
+are replaced by their ratio [phi] = [lambda]/[mu] and by 1/[mu]:
+these forms may be more suitable for Bayesian applications.
+These can be found on Seshadri, page 2 and are also discussed by Chhikara and Folks on page 105.
+Another related parameterisation, the __wald_distrib (where mean [mu] is unity) is also provided.
+
+[h4 Member Functions]
+
+ inverse_gaussian_distribution(RealType df = 1, RealType scale = 1); // optionally scaled.
+
+Constructs an inverse_gaussian distribution with [mu] mean,
+and scale [lambda], with both default values 1.
+
+Requires that both the mean [mu] parameter and scale [lambda] are greater than zero,
+otherwise calls __domain_error.
+
+ RealType mean()const;
+
+Returns the mean [mu] parameter of this distribution.
+
+ RealType scale()const;
+
+Returns the scale [lambda] parameter of this distribution.
+
+[h4 Non-member Accessors]
+
+All the [link math_toolkit.dist_ref.nmp usual non-member accessor functions] that are generic to all
+distributions are supported: __usual_accessors.
+
+The domain of the random variate is \[0,+[infin]).
+[note Unlike some definitions, this implementation supports a random variate
+equal to zero as a special case, returning zero for both pdf and cdf.]
+
+[h4 Accuracy]
+
+The inverse_gaussian distribution is implemented in terms of the
+exponential function and standard normal distribution ['N]0,1 [Phi] :
+refer to the accuracy data for those functions for more information.
+But in general, gamma (and thus inverse gamma) results are often accurate to a few epsilon,
+>14 decimal digits accuracy for 64-bit double.
+
+[h4 Implementation]
+
+In the following table [mu] is the mean parameter and
+[lambda] is the scale parameter of the inverse_gaussian distribution,
+/x/ is the random variate, /p/ is the probability and /q = 1-p/ its complement.
+Parameters [mu] for shape and [lambda] for scale
+are used for the inverse gaussian function.
+
+[table
+[[Function] [Implementation Notes] ]
+[[pdf] [ [sqrt]([lambda]/ 2[pi]x[super 3]) e[super -[lambda](x - [mu])[sup2]/ 2[mu][sup2]x]]]
+[[cdf][ [Phi]{[sqrt]([lambda]/x) (x/[mu]-1)} + e[super 2[mu]/[lambda]] [Phi]{-[sqrt]([lambda]/[mu]) (1+x/[mu])} ]]
+[[cdf complement] [using complement of [Phi] above.] ]
+[[quantile][No closed form known. Estimated using a guess refined by Newton-Raphson iteration.]]
+[[quantile from the complement][No closed form known. Estimated using a guess refined by Newton-Raphson iteration.]]
+[[mode][[mu] {[sqrt](1+9[mu][sup2]/4[lambda][sup2])[sup2] - 3[mu]/2[lambda]} ]]
+[[median][No closed form analytic equation is known, but is evaluated as quantile(0.5)]]
+[[mean][[mu]] ]
+[[variance][[mu][cubed]/[lambda]] ]
+[[skewness][3 [sqrt] ([mu]/[lambda])] ]
+[[kurtosis_excess][15[mu]/[lambda]] ]
+[[kurtosis][12[mu]/[lambda]] ]
+] [/table]
+
+[h4 References]
+
+#Wald, A. (1947). Sequential analysis. Wiley, NY.
+#The Inverse Gaussian distribution : theory, methodology, and applications, Raj S. Chhikara, J. Leroy Folks. ISBN 0824779975 (1989).
+#The Inverse Gaussian distribution : statistical theory and applications, Seshadri, V , ISBN - 0387986189 (pbk) (Dewey 519.2) (1998).
+#[@http://docs.scipy.org/doc/numpy/reference/generated/numpy.random.wald.html Numpy and Scipy Documentation].
+#[@http://bm2.genes.nig.ac.jp/RGM2/R_current/library/statmod/man/invgauss.html R statmod invgauss functions].
+#[@http://cran.r-project.org/web/packages/SuppDists/index.html R SuppDists invGauss functions].
+(Note that these R implementations names differ in case).
+#[@http://www.statsci.org/s/invgauss.html StatSci.org invgauss help].
+#[@http://www.statsci.org/s/invgauss.statSci.org invgauss R source].
+#[@http://www.biostat.wustl.edu/archives/html/s-news/2001-12/msg00144.html pwald, qwald].
+#[@http://www.brighton-webs.co.uk/distributions/wald.asp Brighton Webs wald].
+
+[endsect] [/section:inverse_gaussian_dist Inverse Gaussiann Distribution]
+
+[/
+ Copyright 2010 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
\ No newline at end of file
diff --git a/doc/distributions/laplace.qbk b/doc/distributions/laplace.qbk
new file mode 100644
index 0000000..e62cc21
--- /dev/null
+++ b/doc/distributions/laplace.qbk
@@ -0,0 +1,141 @@
+[section:laplace_dist Laplace Distribution]
+
+``#include <boost/math/distributions/laplace.hpp>``
+
+ namespace boost{ namespace math{
+
+ template <class RealType = double,
+ class ``__Policy`` = ``__policy_class`` >
+ class laplace_distribution;
+
+ typedef laplace_distribution<> laplace;
+
+ template <class RealType, class ``__Policy``>
+ class laplace_distribution
+ {
+ public:
+ typedef RealType value_type;
+ typedef Policy policy_type;
+ // Construct:
+ laplace_distribution(RealType location = 0, RealType scale = 1);
+ // Accessors:
+ RealType location()const;
+ RealType scale()const;
+ };
+
+ }} // namespaces
+
+Laplace distribution is the distribution of differences between two independent variates
+with identical exponential distributions (Abramowitz and Stegun 1972, p. 930).
+It is also called the double exponential distribution.
+
+[/ Wikipedia definition is The difference between two independent identically distributed
+exponential random variables is governed by a Laplace distribution.]
+
+For location parameter [mu][space] and scale parameter [sigma][space], it is defined by the
+probability density function:
+
+[equation laplace_pdf]
+
+The location and scale parameters are equivalent to the mean and
+standard deviation of the normal or Gaussian distribution.
+
+The following graph illustrates the effect of the
+parameters [mu][space] and [sigma][space] on the PDF.
+Note that the domain of the random variable remains
+\[-[infin],+[infin]\] irrespective of the value of the location parameter:
+
+[graph laplace_pdf]
+
+[h4 Member Functions]
+
+ laplace_distribution(RealType location = 0, RealType scale = 1);
+
+Constructs a laplace distribution with location /location/ and
+scale /scale/.
+
+The location parameter is the same as the mean of the random variate.
+
+The scale parameter is proportional to the standard deviation of the random variate.
+
+Requires that the scale parameter is greater than zero, otherwise calls
+__domain_error.
+
+ RealType location()const;
+
+Returns the /location/ parameter of this distribution.
+
+ RealType scale()const;
+
+Returns the /scale/ parameter of this distribution.
+
+[h4 Non-member Accessors]
+
+All the [link math_toolkit.dist_ref.nmp usual non-member accessor functions] that are generic to all
+distributions are supported: __usual_accessors.
+
+The domain of the random variable is \[-[infin],+[infin]\].
+
+[h4 Accuracy]
+
+The laplace distribution is implemented in terms of the
+standard library log and exp functions and as such should have very small errors.
+
+[h4 Implementation]
+
+In the following table [mu] is the location parameter of the distribution,
+[sigma] is its scale parameter, /x/ is the random variate, /p/ is the probability
+and its complement /q = 1-p/.
+
+[table
+[[Function][Implementation Notes]]
+[[pdf][Using the relation: pdf = e[super -abs(x-[mu]) \/ [sigma]] \/ (2 * [sigma]) ]]
+[[cdf][Using the relations:
+
+x < [mu] : p = e[super (x-[mu])/[sigma] ] \/ [sigma]
+
+x >= [mu] : p = 1 - e[super ([mu]-x)/[sigma] ] \/ [sigma]
+]]
+[[cdf complement][Using the relation:
+
+-x < [mu] : q = e[super (-x-[mu])/[sigma] ] \/ [sigma]
+
+-x >= [mu] : q = 1 - e[super ([mu]+x)/[sigma] ] \/ [sigma]
+]]
+[[quantile][Using the relations:
+
+p < 0.5 : x = [mu] + [sigma] * log(2*p)
+
+p >= 0.5 : x = [mu] - [sigma] * log(2-2*p)
+]]
+[[quantile from the complement][Using the relation:
+
+q > 0.5: x = [mu] + [sigma]*log(2-2*q)
+
+q <=0.5: x = [mu] - [sigma]*log( 2*q )
+]]
+[[mean][[mu]]]
+[[variance][2 * [sigma][super 2] ]]
+[[mode][[mu]]]
+[[skewness][0]]
+[[kurtosis][6]]
+[[kurtosis excess][3]]
+]
+
+[h4 References]
+
+* [@http://mathworld.wolfram.com/LaplaceDistribution.html Weisstein, Eric W. "Laplace Distribution."] From MathWorld--A Wolfram Web Resource.
+
+* [@http://en.wikipedia.org/wiki/Laplace_distribution Laplace Distribution]
+
+* M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions, 1972, p. 930.
+
+[endsect][/section:laplace_dist laplace]
+
+[/
+ Copyright 2008, 2009 John Maddock, Paul A. Bristow and M.A. (Thijs) van den Berg.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distributions/logistic.qbk b/doc/distributions/logistic.qbk
new file mode 100644
index 0000000..c2d04a8
--- /dev/null
+++ b/doc/distributions/logistic.qbk
@@ -0,0 +1,103 @@
+[section:logistic_dist Logistic Distribution]
+
+``#include <boost/math/distributions/logistic.hpp>``
+
+ namespace boost{ namespace math{
+
+ template <class RealType = double,
+ class ``__Policy`` = ``__policy_class`` >
+ class logistic_distribution;
+
+ template <class RealType, class Policy>
+ class logistic_distribution
+ {
+ public:
+ typedef RealType value_type;
+ typedef Policy policy_type;
+ // Construct:
+ logistic_distribution(RealType location = 0, RealType scale = 1);
+ // Accessors:
+ RealType location()const; // location.
+ RealType scale()const; // scale.
+
+ };
+
+ typedef logistic_distribution<> logistic;
+
+ }} // namespaces
+
+The logistic distribution is a continous probability distribution.
+It has two parameters - location and scale. The cumulative distribution
+function of the logistic distribution appears in logistic regression
+and feedforward neural networks. Among other applications,
+United State Chess Federation and FIDE use it to calculate chess ratings.
+
+The following graph shows how the distribution changes as the
+parameters change:
+
+[graph logistic_pdf]
+
+[h4 Member Functions]
+
+ logistic_distribution(RealType u = 0, RealType s = 1);
+
+Constructs a logistic distribution with location /u/ and scale /s/.
+
+Requires `scale > 0`, otherwise a __domain_error is raised.
+
+ RealType location()const;
+
+Returns the location of this distribution.
+
+ RealType scale()const;
+
+Returns the scale of this distribution.
+
+[h4 Non-member Accessors]
+
+All the [link math_toolkit.dist_ref.nmp usual non-member accessor functions]
+that are generic to all distributions are supported: __usual_accessors.
+
+The domain of the random variable is \[-\[max_value\], +\[min_value\]\].
+However, the pdf and cdf support inputs of +[infin] and -[infin]
+as special cases if RealType permits.
+
+At `p=1` and `p=0`, the quantile function returns the result of
++__overflow_error and -__overflow_error, while the complement
+quantile function returns the result of -__overflow_error and
++__overflow_error respectively.
+
+[h4 Accuracy]
+
+The logistic distribution is implemented in terms of the `std::exp`
+and the `std::log` functions, so its accuracy is related to the
+accurate implementations of those functions on a given platform.
+When calculating the quantile with a non-zero /position/ parameter
+catastrophic cancellation errors can occur:
+in such cases, only a low /absolute error/ can be guaranteed.
+
+[h4 Implementation]
+
+[table
+[[Function][Implementation Notes]]
+[[pdf][Using the relation: pdf = e[super -(x-u)/s] / (s*(1+e[super -(x-u)/s])[super 2])]]
+[[cdf][Using the relation: p = 1/(1+e[super -(x-u)/s])]]
+[[cdf complement][Using the relation: q = 1/(1+e[super (x-u)/s])]]
+[[quantile][Using the relation: x = u - s*log(1/p-1)]]
+[[quantile from the complement][Using the relation: x = u + s*log(p/1-p)]]
+[[mean][u]]
+[[mode][The same as the mean.]]
+[[skewness][0]]
+[[kurtosis excess][6/5]]
+[[variance][ ([pi]*s)[super 2] / 3]]
+]
+
+[endsect]
+
+[/ logistic.qbk
+ Copyright 2006, 2007 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distributions/lognormal.qbk b/doc/distributions/lognormal.qbk
new file mode 100644
index 0000000..5f19ed5
--- /dev/null
+++ b/doc/distributions/lognormal.qbk
@@ -0,0 +1,119 @@
+[section:lognormal_dist Log Normal Distribution]
+
+``#include <boost/math/distributions/lognormal.hpp>``
+
+ namespace boost{ namespace math{
+
+ template <class RealType = double,
+ class ``__Policy`` = ``__policy_class`` >
+ class lognormal_distribution;
+
+ typedef lognormal_distribution<> lognormal;
+
+ template <class RealType, class ``__Policy``>
+ class lognormal_distribution
+ {
+ public:
+ typedef RealType value_type;
+ typedef Policy policy_type;
+ // Construct:
+ lognormal_distribution(RealType location = 0, RealType scale = 1);
+ // Accessors:
+ RealType location()const;
+ RealType scale()const;
+ };
+
+ }} // namespaces
+
+The lognormal distribution is the distribution that arises
+when the logarithm of the random variable is normally distributed.
+A lognormal distribution results when the variable is the product
+of a large number of independent, identically-distributed variables.
+
+For location and scale parameters /m/ and /s/ it is defined by the
+probability density function:
+
+[equation lognormal_ref]
+
+The location and scale parameters are equivalent to the mean and
+standard deviation of the logarithm of the random variable.
+
+The following graph illustrates the effect of the location
+parameter on the PDF, note that the range of the random
+variable remains \[0,+[infin]\] irrespective of the value of the
+location parameter:
+
+[graph lognormal_pdf1]
+
+The next graph illustrates the effect of the scale parameter on the PDF:
+
+[graph lognormal_pdf2]
+
+[h4 Member Functions]
+
+ lognormal_distribution(RealType location = 0, RealType scale = 1);
+
+Constructs a lognormal distribution with location /location/ and
+scale /scale/.
+
+The location parameter is the same as the mean of the logarithm of the
+random variate.
+
+The scale parameter is the same as the standard deviation of the
+logarithm of the random variate.
+
+Requires that the scale parameter is greater than zero, otherwise calls
+__domain_error.
+
+ RealType location()const;
+
+Returns the /location/ parameter of this distribution.
+
+ RealType scale()const;
+
+Returns the /scale/ parameter of this distribution.
+
+[h4 Non-member Accessors]
+
+All the [link math_toolkit.dist_ref.nmp usual non-member accessor functions] that are generic to all
+distributions are supported: __usual_accessors.
+
+The domain of the random variable is \[0,+[infin]\].
+
+[h4 Accuracy]
+
+The lognormal distribution is implemented in terms of the
+standard library log and exp functions, plus the
+[link math_toolkit.sf_erf.error_function error function],
+and as such should have very low error rates.
+
+[h4 Implementation]
+
+In the following table /m/ is the location parameter of the distribution,
+/s/ is its scale parameter, /x/ is the random variate, /p/ is the probability
+and /q = 1-p/.
+
+[table
+[[Function][Implementation Notes]]
+[[pdf][Using the relation: pdf = e[super -(ln(x) - m)[super 2 ] \/ 2s[super 2 ] ] \/ (x * s * sqrt(2pi)) ]]
+[[cdf][Using the relation: p = cdf(normal_distribtion<RealType>(m, s), log(x)) ]]
+[[cdf complement][Using the relation: q = cdf(complement(normal_distribtion<RealType>(m, s), log(x))) ]]
+[[quantile][Using the relation: x = exp(quantile(normal_distribtion<RealType>(m, s), p))]]
+[[quantile from the complement][Using the relation: x = exp(quantile(complement(normal_distribtion<RealType>(m, s), q)))]]
+[[mean][e[super m + s[super 2 ] / 2 ] ]]
+[[variance][(e[super s[super 2] ] - 1) * e[super 2m + s[super 2 ] ] ]]
+[[mode][e[super m - s[super 2 ] ] ]]
+[[skewness][sqrt(e[super s[super 2] ] - 1) * (2 + e[super s[super 2] ]) ]]
+[[kurtosis][e[super 4s[super 2] ] + 2e[super 3s[super 2] ] + 3e[super 2s[super 2] ] - 3]]
+[[kurtosis excess][e[super 4s[super 2] ] + 2e[super 3s[super 2] ] + 3e[super 2s[super 2] ] - 6 ]]
+]
+
+[endsect][/section:normal_dist Normal]
+
+[/
+ Copyright 2006 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distributions/nag_library.qbk b/doc/distributions/nag_library.qbk
new file mode 100644
index 0000000..4f728a1
--- /dev/null
+++ b/doc/distributions/nag_library.qbk
@@ -0,0 +1,60 @@
+[section:nag_library Comparison with C, R, FORTRAN-style Free Functions]
+
+You are probably familiar with a statistics library that has free functions,
+for example the classic [@http://nag.com/numeric/CL/CLdescription.asp NAG C library]
+and matching [@http://nag.com/numeric/FL/FLdescription.asp NAG FORTRAN Library],
+[@http://office.microsoft.com/en-us/excel/HP052090051033.aspx Microsoft Excel BINOMDIST(number_s,trials,probability_s,cumulative)],
+[@http://www.r-project.org/ R], [@http://www.ptc.com/products/mathcad/mathcad14/mathcad_func_chart.htm MathCAD pbinom]
+and many others.
+
+If so, you may find 'Distributions as Objects' unfamiliar, if not alien.
+
+However, *do not panic*, both definition and usage are not really very different.
+
+A very simple example of generating the same values as the
+[@http://nag.com/numeric/CL/CLdescription.asp NAG C library]
+for the binomial distribution follows.
+(If you find slightly different values, the Boost C++ version, using double or better,
+is very likely to be the more accurate.
+Of course, accuracy is not usually a concern for most applications of this function).
+
+The [@http://www.nag.co.uk/numeric/cl/manual/pdf/G01/g01bjc.pdf NAG function specification] is
+
+ void nag_binomial_dist(Integer n, double p, Integer k,
+ double *plek, double *pgtk, double *peqk, NagError *fail)
+
+and is called
+
+ g01bjc(n, p, k, &plek, &pgtk, &peqk, NAGERR_DEFAULT);
+
+The equivalent using this Boost C++ library is:
+
+ using namespace boost::math; // Using declaration avoids very long names.
+ binomial my_dist(4, 0.5); // c.f. NAG n = 4, p = 0.5
+
+and values can be output thus:
+
+ cout
+ << my_dist.trials() << " " // Echo the NAG input n = 4 trials.
+ << my_dist.success_fraction() << " " // Echo the NAG input p = 0.5
+ << cdf(my_dist, 2) << " " // NAG plek with k = 2
+ << cdf(complement(my_dist, 2)) << " " // NAG pgtk with k = 2
+ << pdf(my_dist, 2) << endl; // NAG peqk with k = 2
+
+`cdf(dist, k)` is equivalent to NAG library `plek`, lower tail probability of <= k
+
+`cdf(complement(dist, k))` is equivalent to NAG library `pgtk`, upper tail probability of > k
+
+`pdf(dist, k)` is equivalent to NAG library `peqk`, point probability of == k
+
+See [@../../example/binomial_example_nag.cpp binomial_example_nag.cpp] for details.
+
+[endsect] [/section:nag_library Comparison with C, R, FORTRAN-style Free Functions]
+
+[/
+ Copyright 2006 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distributions/nc_beta.qbk b/doc/distributions/nc_beta.qbk
new file mode 100644
index 0000000..d74b716
--- /dev/null
+++ b/doc/distributions/nc_beta.qbk
@@ -0,0 +1,204 @@
+[section:nc_beta_dist Noncentral Beta Distribution]
+
+``#include <boost/math/distributions/non_central_beta.hpp>``
+
+ namespace boost{ namespace math{
+
+ template <class RealType = double,
+ class ``__Policy`` = ``__policy_class`` >
+ class non_central_beta_distribution;
+
+ typedef non_central_beta_distribution<> non_central_beta;
+
+ template <class RealType, class ``__Policy``>
+ class non_central_beta_distribution
+ {
+ public:
+ typedef RealType value_type;
+ typedef Policy policy_type;
+
+ // Constructor:
+ non_central_beta_distribution(RealType alpha, RealType beta, RealType lambda);
+
+ // Accessor to shape parameters:
+ RealType alpha()const;
+ RealType beta()const;
+
+ // Accessor to non-centrality parameter lambda:
+ RealType non_centrality()const;
+ };
+
+ }} // namespaces
+
+The noncentral beta distribution is a generalization of the __beta_distrib.
+
+It is defined as the ratio
+X = [chi][sub m][super 2]([lambda]) \/ ([chi][sub m][super 2]([lambda])
++ [chi][sub n][super 2])
+where [chi][sub m][super 2]([lambda]) is a noncentral [chi][super 2]
+random variable with /m/ degrees of freedom, and [chi][sub n][super 2]
+is a central [chi][super 2] random variable with /n/ degrees of freedom.
+
+This gives a PDF that can be expressed as a Poisson mixture
+of beta distribution PDFs:
+
+[equation nc_beta_ref1]
+
+where P(i;[lambda]\/2) is the discrete Poisson probablity at /i/, with mean
+[lambda]\/2, and I[sub x][super ']([alpha], [beta]) is the derivative of
+the incomplete beta function. This leads to the usual form of the CDF
+as:
+
+[equation nc_beta_ref2]
+
+The following graph illustrates how the distribution changes
+for different values of [lambda]:
+
+[graph nc_beta_pdf]
+
+[h4 Member Functions]
+
+ non_central_beta_distribution(RealType a, RealType b, RealType lambda);
+
+Constructs a noncentral beta distribution with shape parameters /a/ and /b/
+and non-centrality parameter /lambda/.
+
+Requires a > 0, b > 0 and lambda >= 0, otherwise calls __domain_error.
+
+ RealType alpha()const;
+
+Returns the parameter /a/ from which this object was constructed.
+
+ RealType beta()const;
+
+Returns the parameter /b/ from which this object was constructed.
+
+ RealType non_centrality()const;
+
+Returns the parameter /lambda/ from which this object was constructed.
+
+[h4 Non-member Accessors]
+
+Most of the [link math_toolkit.dist_ref.nmp usual non-member accessor functions]
+are supported: __cdf, __pdf, __quantile, __mean, __variance, __sd,
+__median, __mode, __hazard, __chf, __range and __support.
+
+Mean and variance are implemented using hypergeometric pfq functions and relations given in
+[@http://reference.wolfram.com/mathematica/ref/NoncentralBetaDistribution.html Wolfram Noncentral Beta Distribution].
+
+However, the following are not currently implemented:
+ __skewness, __kurtosis and __kurtosis_excess.
+
+The domain of the random variable is \[0, 1\].
+
+[h4 Accuracy]
+
+The following table shows the peak errors
+(in units of [@http://en.wikipedia.org/wiki/Machine_epsilon epsilon])
+found on various platforms with various floating point types.
+The failures in the comparison to the [@http://www.r-project.org/ R Math library],
+seem to be mostly in the corner cases when the probablity would be very small.
+Unless otherwise specified any floating-point type that is narrower
+than the one shown will have __zero_error.
+
+[table_non_central_beta_CDF]
+
+[table_non_central_beta_CDF_complement]
+
+Error rates for the PDF, the complement of the CDF and for the quantile
+functions are broadly similar.
+
+[h4 Tests]
+
+There are two sets of test data used to verify this implementation:
+firstly we can compare with a few sample values generated by the
+[@http://www.r-project.org/ R library].
+Secondly, we have tables of test data, computed with this
+implementation and using interval arithmetic - this data should
+be accurate to at least 50 decimal digits - and is the used for
+our accuracy tests.
+
+[h4 Implementation]
+
+The CDF and its complement are evaluated as follows:
+
+First we determine which of the two values (the CDF or its
+complement) is likely to be the smaller, the crossover point
+is taken to be the mean of the distribution: for this we use the
+approximation due to: R. Chattamvelli and R. Shanmugam,
+"Algorithm AS 310: Computing the Non-Central Beta Distribution Function",
+Applied Statistics, Vol. 46, No. 1. (1997), pp. 146-156.
+
+[equation nc_beta_ref3]
+
+Then either the CDF or its complement is computed using the
+relations:
+
+[equation nc_beta_ref4]
+
+The summation is performed by starting at i = [lambda]/2, and then recursing
+in both directions, using the usual recurrence relations for the Poisson
+PDF and incomplete beta functions. This is the "Method 2" described
+by:
+
+Denise Benton and K. Krishnamoorthy,
+"Computing discrete mixtures of continuous
+distributions: noncentral chisquare, noncentral t
+and the distribution of the square of the sample
+multiple correlation coefficient",
+Computational Statistics & Data Analysis 43 (2003) 249-267.
+
+Specific applications of the above formulae to the noncentral
+beta distribution can be found in:
+
+Russell V. Lenth,
+"Algorithm AS 226: Computing Noncentral Beta Probabilities",
+Applied Statistics, Vol. 36, No. 2. (1987), pp. 241-244.
+
+H. Frick,
+"Algorithm AS R84: A Remark on Algorithm AS 226: Computing Non-Central Beta
+Probabilities", Applied Statistics, Vol. 39, No. 2. (1990), pp. 311-312.
+
+Ming Long Lam,
+"Remark AS R95: A Remark on Algorithm AS 226: Computing Non-Central Beta
+Probabilities", Applied Statistics, Vol. 44, No. 4. (1995), pp. 551-552.
+
+Harry O. Posten,
+"An Effective Algorithm for the Noncentral Beta Distribution Function",
+The American Statistician, Vol. 47, No. 2. (May, 1993), pp. 129-131.
+
+R. Chattamvelli,
+"A Note on the Noncentral Beta Distribution Function",
+The American Statistician, Vol. 49, No. 2. (May, 1995), pp. 231-234.
+
+Of these, the Posten reference provides the most complete overview,
+and includes the modification starting iteration at [lambda]/2.
+
+The main difference between this implementation and the above
+references is the direct computation of the complement when most
+efficient to do so, and the accumulation of the sum to -1 rather
+than subtracting the result from 1 at the end: this can substantially
+reduce the number of iterations required when the result is near 1.
+
+The PDF is computed using the methodology of Benton and Krishnamoorthy
+and the relation:
+
+[equation nc_beta_ref1]
+
+Quantiles are computed using a specially modified version of
+__bracket_solve,
+starting the search for the root at the mean of the distribution.
+(A Cornish-Fisher type expansion was also tried, but while this gets
+quite close to the root in many cases, when it is wrong it tends to
+introduce quite pathological behaviour: more investigation in this
+area is probably warranted).
+
+[endsect] [/section:nc_beta_dist]
+
+[/ nc_beta.qbk
+ Copyright 2008 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distributions/nc_chi_squared.qbk b/doc/distributions/nc_chi_squared.qbk
new file mode 100644
index 0000000..a1c2a32
--- /dev/null
+++ b/doc/distributions/nc_chi_squared.qbk
@@ -0,0 +1,273 @@
+[section:nc_chi_squared_dist Noncentral Chi-Squared Distribution]
+
+``#include <boost/math/distributions/non_central_chi_squared.hpp>``
+
+ namespace boost{ namespace math{
+
+ template <class RealType = double,
+ class ``__Policy`` = ``__policy_class`` >
+ class non_central_chi_squared_distribution;
+
+ typedef non_central_chi_squared_distribution<> non_central_chi_squared;
+
+ template <class RealType, class ``__Policy``>
+ class non_central_chi_squared_distribution
+ {
+ public:
+ typedef RealType value_type;
+ typedef Policy policy_type;
+
+ // Constructor:
+ non_central_chi_squared_distribution(RealType v, RealType lambda);
+
+ // Accessor to degrees of freedom parameter v:
+ RealType degrees_of_freedom()const;
+
+ // Accessor to non centrality parameter lambda:
+ RealType non_centrality()const;
+
+ // Parameter finders:
+ static RealType find_degrees_of_freedom(RealType lambda, RealType x, RealType p);
+ template <class A, class B, class C>
+ static RealType find_degrees_of_freedom(const complemented3_type<A,B,C>& c);
+
+ static RealType find_non_centrality(RealType v, RealType x, RealType p);
+ template <class A, class B, class C>
+ static RealType find_non_centrality(const complemented3_type<A,B,C>& c);
+ };
+
+ }} // namespaces
+
+The noncentral chi-squared distribution is a generalization of the
+__chi_squared_distrib. If X[sub i] are [nu] independent, normally
+distributed random variables with means [mu][sub i] and variances
+[sigma][sub i][super 2], then the random variable
+
+[equation nc_chi_squ_ref1]
+
+is distributed according to the noncentral chi-squared distribution.
+
+The noncentral chi-squared distribution has two parameters:
+[nu] which specifies the number of degrees of freedom
+(i.e. the number of X[sub i]), and [lambda] which is related to the
+mean of the random variables X[sub i] by:
+
+[equation nc_chi_squ_ref2]
+
+(Note that some references define [lambda] as one half of the above sum).
+
+This leads to a PDF of:
+
+[equation nc_chi_squ_ref3]
+
+where ['f(x;k)] is the central chi-squared distribution PDF, and
+['I[sub v](x)] is a modified Bessel function of the first kind.
+
+The following graph illustrates how the distribution changes
+for different values of [lambda]:
+
+[graph nccs_pdf]
+
+[h4 Member Functions]
+
+ non_central_chi_squared_distribution(RealType v, RealType lambda);
+
+Constructs a Chi-Squared distribution with /v/ degrees of freedom
+and non-centrality parameter /lambda/.
+
+Requires v > 0 and lambda >= 0, otherwise calls __domain_error.
+
+ RealType degrees_of_freedom()const;
+
+Returns the parameter /v/ from which this object was constructed.
+
+ RealType non_centrality()const;
+
+Returns the parameter /lambda/ from which this object was constructed.
+
+
+ static RealType find_degrees_of_freedom(RealType lambda, RealType x, RealType p);
+
+This function returns the number of degrees of freedom /v/ such that:
+`cdf(non_central_chi_squared<RealType, Policy>(v, lambda), x) == p`
+
+ template <class A, class B, class C>
+ static RealType find_degrees_of_freedom(const complemented3_type<A,B,C>& c);
+
+When called with argument `boost::math::complement(lambda, x, q)`
+this function returns the number of degrees of freedom /v/ such that:
+
+`cdf(complement(non_central_chi_squared<RealType, Policy>(v, lambda), x)) == q`.
+
+ static RealType find_non_centrality(RealType v, RealType x, RealType p);
+
+This function returns the non centrality parameter /lambda/ such that:
+
+`cdf(non_central_chi_squared<RealType, Policy>(v, lambda), x) == p`
+
+ template <class A, class B, class C>
+ static RealType find_non_centrality(const complemented3_type<A,B,C>& c);
+
+When called with argument `boost::math::complement(v, x, q)`
+this function returns the non centrality parameter /lambda/ such that:
+
+`cdf(complement(non_central_chi_squared<RealType, Policy>(v, lambda), x)) == q`.
+
+[h4 Non-member Accessors]
+
+All the [link math_toolkit.dist_ref.nmp usual non-member accessor functions]
+that are generic to all distributions are supported: __usual_accessors.
+
+The domain of the random variable is \[0, +[infin]\].
+
+[h4 Examples]
+
+There is a
+[link math_toolkit.stat_tut.weg.nccs_eg worked example]
+for the noncentral chi-squared distribution.
+
+[h4 Accuracy]
+
+The following table shows the peak errors
+(in units of [@http://en.wikipedia.org/wiki/Machine_epsilon epsilon])
+found on various platforms with various floating point types.
+The failures in the comparison to the [@http://www.r-project.org/ R Math library],
+seem to be mostly in the corner cases when the probablity would be very small.
+Unless otherwise specified any floating-point type that is narrower
+than the one shown will have __zero_error.
+
+[table_non_central_chi_squared_CDF]
+
+[table_non_central_chi_squared_CDF_complement]
+
+Error rates for the quantile
+functions are broadly similar. Special mention should go to
+the `mode` function: there is no closed form for this function,
+so it is evaluated numerically by finding the maxima of the PDF:
+in principal this can not produce an accuracy greater than the
+square root of the machine epsilon.
+
+[h4 Tests]
+
+There are two sets of test data used to verify this implementation:
+firstly we can compare with published data, for example with
+Table 6 of "Self-Validating Computations of Probabilities for
+Selected Central and Noncentral Univariate Probability Functions",
+Morgan C. Wang and William J. Kennedy,
+Journal of the American Statistical Association,
+Vol. 89, No. 427. (Sep., 1994), pp. 878-887.
+Secondly, we have tables of test data, computed with this
+implementation and using interval arithmetic - this data should
+be accurate to at least 50 decimal digits - and is the used for
+our accuracy tests.
+
+[h4 Implementation]
+
+The CDF and its complement are evaluated as follows:
+
+First we determine which of the two values (the CDF or its
+complement) is likely to be the smaller: for this we can use the
+relation due to Temme (see "Asymptotic and Numerical Aspects of the
+Noncentral Chi-Square Distribution", N. M. Temme, Computers Math. Applic.
+Vol 25, No. 5, 55-63, 1993) that:
+
+F([nu],[lambda];[nu]+[lambda]) [asymp] 0.5
+
+and so compute the CDF when the random variable is less than
+[nu]+[lambda], and its complement when the random variable is
+greater than [nu]+[lambda]. If necessary the computed result
+is then subtracted from 1 to give the desired result (the CDF or its
+complement).
+
+For small values of the non centrality parameter, the CDF is computed
+using the method of Ding (see "Algorithm AS 275: Computing the Non-Central
+#2 Distribution Function", Cherng G. Ding, Applied Statistics, Vol. 41,
+No. 2. (1992), pp. 478-482). This uses the following series representation:
+
+[equation nc_chi_squ_ref4]
+
+which requires just one call to __gamma_p_derivative with the subsequent
+terms being computed by recursion as shown above.
+
+For larger values of the non-centrality parameter, Ding's method can take
+an unreasonable number of terms before convergence is achieved. Furthermore,
+the largest term is not the first term, so in extreme cases the first term may
+be zero, leading to a zero result, even though the true value may be non-zero.
+
+Therefore, when the non-centrality parameter is greater than 200, the method due
+to Krishnamoorthy (see "Computing discrete mixtures of continuous distributions:
+noncentral chisquare, noncentral t and the distribution of the
+square of the sample multiple correlation coefficient",
+Denise Benton and K. Krishnamoorthy, Computational Statistics &
+Data Analysis, 43, (2003), 249-267) is used.
+
+This method uses the well known sum:
+
+[equation nc_chi_squ_ref5]
+
+Where P[sub a](x) is the incomplete gamma function.
+
+The method starts at the [lambda]th term, which is where the Poisson weighting
+function achieves its maximum value, although this is not necessarily
+the largest overall term. Subsequent terms are calculated via the normal
+recurrence relations for the incomplete gamma function, and iteration proceeds
+both forwards and backwards until sufficient precision has been achieved. It
+should be noted that recurrence in the forwards direction of P[sub a](x) is
+numerically unstable. However, since we always start /after/ the largest
+term in the series, numeric instability is introduced more slowly than the
+series converges.
+
+Computation of the complement of the CDF uses an extension of Krishnamoorthy's
+method, given that:
+
+[equation nc_chi_squ_ref6]
+
+we can again start at the [lambda]'th term and proceed in both directions from
+there until the required precision is achieved. This time it is backwards
+recursion on the incomplete gamma function Q[sub a](x) which is unstable.
+However, as long as we start well /before/ the largest term, this is not an
+issue in practice.
+
+The PDF is computed directly using the relation:
+
+[equation nc_chi_squ_ref3]
+
+Where ['f(x; v)] is the PDF of the central __chi_squared_distrib and
+['I[sub v](x)] is a modified Bessel function, see __cyl_bessel_i.
+For small values of the
+non-centrality parameter the relation in terms of __cyl_bessel_i
+is used. However, this method fails for large values of the
+non-centrality parameter, so in that case the infinite sum is
+evaluated using the method of Benton and Krishnamoorthy, and
+the usual recurrence relations for successive terms.
+
+The quantile functions are computed by numeric inversion of the CDF.
+An improve starting quess is from
+Thomas Luu,
+[@http://discovery.ucl.ac.uk/1482128/, Fast and accurate parallel computation of quantile functions for random number generation, Doctorial Thesis, 2016].
+
+There is no [@http://en.wikipedia.org/wiki/Closed_form closed form]
+for the mode of the noncentral chi-squared
+distribution: it is computed numerically by finding the maximum
+of the PDF. Likewise, the median is computed numerically via
+the quantile.
+
+The remaining non-member functions use the following formulas:
+
+[equation nc_chi_squ_ref7]
+
+Some analytic properties of noncentral distributions
+(particularly unimodality, and monotonicity of their modes)
+are surveyed and summarized by:
+
+Andrea van Aubel & Wolfgang Gawronski, Applied Mathematics and Computation, 141 (2003) 3-12.
+
+[endsect] [/section:nc_chi_squared_dist]
+
+[/ nc_chi_squared.qbk
+ Copyright 2008 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distributions/nc_chi_squared_example.qbk b/doc/distributions/nc_chi_squared_example.qbk
new file mode 100644
index 0000000..38f45b0
--- /dev/null
+++ b/doc/distributions/nc_chi_squared_example.qbk
@@ -0,0 +1,20 @@
+[section:nccs_eg Non Central Chi Squared Example]
+
+(See also the reference documentation for the __non_central_chi_squared_distrib.)
+
+[section:nccs_power_eg Tables of the power function of the chi[super 2] test.]
+[/chi super 2 failed to show the chi in pdf why??? (OK in html) so use words.]
+
+[import ../../example/nc_chi_sq_example.cpp]
+[nccs_eg]
+
+[endsect] [/nccs_power_eg Tables of the power function of the chi-squared [chi][super 2] test.]
+
+[endsect] [/section:nccs_eg Non Central Chi Squared Example]
+
+[/
+ Copyright 2006, 2011 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
diff --git a/doc/distributions/nc_f.qbk b/doc/distributions/nc_f.qbk
new file mode 100644
index 0000000..535a5da
--- /dev/null
+++ b/doc/distributions/nc_f.qbk
@@ -0,0 +1,193 @@
+[section:nc_f_dist Noncentral F Distribution]
+
+``#include <boost/math/distributions/non_central_f.hpp>``
+
+ namespace boost{ namespace math{
+
+ template <class RealType = double,
+ class ``__Policy`` = ``__policy_class`` >
+ class non_central_f_distribution;
+
+ typedef non_central_f_distribution<> non_central_f;
+
+ template <class RealType, class ``__Policy``>
+ class non_central_f_distribution
+ {
+ public:
+ typedef RealType value_type;
+ typedef Policy policy_type;
+
+ // Constructor:
+ non_central_f_distribution(RealType v1, RealType v2, RealType lambda);
+
+ // Accessor to degrees_of_freedom parameters v1 & v2:
+ RealType degrees_of_freedom1()const;
+ RealType degrees_of_freedom2()const;
+
+ // Accessor to non-centrality parameter lambda:
+ RealType non_centrality()const;
+ };
+
+ }} // namespaces
+
+The noncentral F distribution is a generalization of the __F_distrib.
+It is defined as the ratio
+
+ F = (X/v1) / (Y/v2)
+
+where X is a noncentral [chi][super 2]
+random variable with /v1/ degrees of freedom and non-centrality parameter [lambda],
+and Y is a central [chi][super 2] random variable with /v2/ degrees of freedom.
+
+This gives the following PDF:
+
+[equation nc_f_ref1]
+
+where L[sub a][super b](c) is a generalised Laguerre polynomial and B(a,b) is the
+__beta function, or
+
+[equation nc_f_ref2]
+
+The following graph illustrates how the distribution changes
+for different values of [lambda]:
+
+[graph nc_f_pdf]
+
+[h4 Member Functions]
+
+ non_central_f_distribution(RealType v1, RealType v2, RealType lambda);
+
+Constructs a non-central beta distribution with parameters /v1/ and /v2/
+and non-centrality parameter /lambda/.
+
+Requires v1 > 0, v2 > 0 and lambda >= 0, otherwise calls __domain_error.
+
+ RealType degrees_of_freedom1()const;
+
+Returns the parameter /v1/ from which this object was constructed.
+
+ RealType degrees_of_freedom2()const;
+
+Returns the parameter /v2/ from which this object was constructed.
+
+ RealType non_centrality()const;
+
+Returns the non-centrality parameter /lambda/ from which this object was constructed.
+
+[h4 Non-member Accessors]
+
+All the [link math_toolkit.dist_ref.nmp usual non-member accessor functions]
+that are generic to all distributions are supported: __usual_accessors.
+
+The domain of the random variable is \[0, +[infin]\].
+
+[h4 Accuracy]
+
+This distribution is implemented in terms of the
+__non_central_beta_distrib: refer to that distribution for accuracy data.
+
+[h4 Tests]
+
+Since this distribution is implemented by adapting another distribution,
+the tests consist of basic sanity checks computed by the
+[@http://www.r-project.org/ R-2.5.1 Math library statistical
+package] and its pbeta and dbeta functions.
+
+[h4 Implementation]
+
+In the following table /v1/ and /v2/ are the first and second
+degrees of freedom parameters of the distribution, [lambda]
+is the non-centrality parameter,
+/x/ is the random variate, /p/ is the probability, and /q = 1-p/.
+
+[table
+[[Function][Implementation Notes]]
+[[pdf][Implemented in terms of the non-central beta PDF using the relation:
+
+f(x;v1,v2;[lambda]) = (v1\/v2) / ((1+y)*(1+y)) * g(y\/(1+y);v1\/2,v2\/2;[lambda])
+
+where g(x; a, b; [lambda]) is the non central beta PDF, and:
+
+y = x * v1 \/ v2
+]]
+[[cdf][Using the relation:
+
+p = B[sub y](v1\/2, v2\/2; [lambda])
+
+where B[sub x](a, b; [lambda]) is the noncentral beta distribution CDF and
+
+y = x * v1 \/ v2
+
+]]
+
+[[cdf complement][Using the relation:
+
+q = 1 - B[sub y](v1\/2, v2\/2; [lambda])
+
+where 1 - B[sub x](a, b; [lambda]) is the complement of the
+noncentral beta distribution CDF and
+
+y = x * v1 \/ v2
+
+]]
+[[quantile][Using the relation:
+
+x = (bx \/ (1-bx)) * (v1 \/ v2)
+
+where
+
+bx = Q[sub p][super -1](v1\/2, v2\/2; [lambda])
+
+and
+
+Q[sub p][super -1](v1\/2, v2\/2; [lambda])
+
+is the noncentral beta quantile.
+
+]]
+[[quantile
+
+from the complement][
+Using the relation:
+
+x = (bx \/ (1-bx)) * (v1 \/ v2)
+
+where
+
+bx = QC[sub q][super -1](v1\/2, v2\/2; [lambda])
+
+and
+
+QC[sub q][super -1](v1\/2, v2\/2; [lambda])
+
+is the noncentral beta quantile from the complement.]]
+[[mean][v2 * (v1 + l) \/ (v1 * (v2 - 2))]]
+[[mode][By numeric maximalisation of the PDF.]]
+[[variance][Refer to, [@http://mathworld.wolfram.com/NoncentralF-Distribution.html
+ Weisstein, Eric W. "Noncentral F-Distribution." From MathWorld--A Wolfram Web Resource.] ]]
+[[skewness][Refer to, [@http://mathworld.wolfram.com/NoncentralF-Distribution.html
+ Weisstein, Eric W. "Noncentral F-Distribution." From MathWorld--A Wolfram Web Resource.],
+ and to the [@http://reference.wolfram.com/mathematica/ref/NoncentralFRatioDistribution.html
+ Mathematica documentation] ]]
+[[kurtosis and kurtosis excess]
+ [Refer to, [@http://mathworld.wolfram.com/NoncentralF-Distribution.html
+ Weisstein, Eric W. "Noncentral F-Distribution." From MathWorld--A Wolfram Web Resource.],
+ and to the [@http://reference.wolfram.com/mathematica/ref/NoncentralFRatioDistribution.html
+ Mathematica documentation] ]]
+]
+
+Some analytic properties of noncentral distributions
+(particularly unimodality, and monotonicity of their modes)
+are surveyed and summarized by:
+
+Andrea van Aubel & Wolfgang Gawronski, Applied Mathematics and Computation, 141 (2003) 3-12.
+
+[endsect] [/section:nc_f_dist]
+
+[/ nc_f.qbk
+ Copyright 2008 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distributions/nc_t.qbk b/doc/distributions/nc_t.qbk
new file mode 100644
index 0000000..5318d1a
--- /dev/null
+++ b/doc/distributions/nc_t.qbk
@@ -0,0 +1,224 @@
+[section:nc_t_dist Noncentral T Distribution]
+
+``#include <boost/math/distributions/non_central_t.hpp>``
+
+ namespace boost{ namespace math{
+
+ template <class RealType = double,
+ class ``__Policy`` = ``__policy_class`` >
+ class non_central_t_distribution;
+
+ typedef non_central_t_distribution<> non_central_t;
+
+ template <class RealType, class ``__Policy``>
+ class non_central_t_distribution
+ {
+ public:
+ typedef RealType value_type;
+ typedef Policy policy_type;
+
+ // Constructor:
+ non_central_t_distribution(RealType v, RealType delta);
+
+ // Accessor to degrees_of_freedom parameter v:
+ RealType degrees_of_freedom()const;
+
+ // Accessor to non-centrality parameter delta:
+ RealType non_centrality()const;
+ };
+
+ }} // namespaces
+
+The noncentral T distribution is a generalization of the __students_t_distrib.
+Let X have a normal distribution with mean [delta] and variance 1, and let
+[nu] S[super 2] have
+a chi-squared distribution with degrees of freedom [nu]. Assume that
+X and S[super 2] are independent. The
+distribution of t[sub [nu]]([delta])=X/S is called a
+noncentral t distribution with degrees of freedom [nu] and noncentrality
+parameter [delta].
+
+This gives the following PDF:
+
+[equation nc_t_ref1]
+
+where [sub 1]F[sub 1](a;b;x) is a confluent hypergeometric function.
+
+The following graph illustrates how the distribution changes
+for different values of [nu] and [delta]:
+
+[graph nc_t_pdf]
+[graph nc_t_cdf]
+
+[h4 Member Functions]
+
+ non_central_t_distribution(RealType v, RealType delta);
+
+Constructs a non-central t distribution with degrees of freedom
+parameter /v/ and non-centrality parameter /delta/.
+
+Requires /v/ > 0 (including positive infinity) and finite /delta/, otherwise calls __domain_error.
+
+ RealType degrees_of_freedom()const;
+
+Returns the parameter /v/ from which this object was constructed.
+
+ RealType non_centrality()const;
+
+Returns the non-centrality parameter /delta/ from which this object was constructed.
+
+[h4 Non-member Accessors]
+
+All the [link math_toolkit.dist_ref.nmp usual non-member accessor functions]
+that are generic to all distributions are supported: __usual_accessors.
+
+The domain of the random variable is \[-[infin], +[infin]\].
+
+[h4 Accuracy]
+
+The following table shows the peak errors
+(in units of [@http://en.wikipedia.org/wiki/Machine_epsilon epsilon])
+found on various platforms with various floating-point types.
+Unless otherwise specified, any floating-point type that is narrower
+than the one shown will have __zero_error.
+
+[table_non_central_t_CDF]
+
+[table_non_central_t_CDF_complement]
+
+[caution The complexity of the current algorithm is dependent upon
+[delta][super 2]: consequently the time taken to evaluate the CDF
+increases rapidly for [delta] > 500, likewise the accuracy decreases
+rapidly for very large [delta].]
+
+Accuracy for the quantile and PDF functions should be broadly similar.
+The /mode/ is determined numerically and cannot
+in principal be more accurate than the square root of
+floating-point type FPT epsilon, accessed using `boost::math::tools::epsilon<FPT>()`.
+For 64-bit `double`, epsilon is about 1e-16, so the fractional accuracy is limited to 1e-8.
+
+[h4 Tests]
+
+There are two sets of tests of this distribution:
+
+Basic sanity checks compare this implementation to the test values given in
+"Computing discrete mixtures of continuous
+distributions: noncentral chisquare, noncentral t
+and the distribution of the square of the sample
+multiple correlation coefficient."
+Denise Benton, K. Krishnamoorthy,
+Computational Statistics & Data Analysis 43 (2003) 249-267.
+
+Accuracy checks use test data computed with this
+implementation and arbitary precision interval arithmetic:
+this test data is believed to be accurate to at least 50
+decimal places.
+
+The cases of large (or infinite) [nu] and/or large [delta] has received special
+treatment to avoid catastrophic loss of accuracy.
+New tests have been added to confirm the improvement achieved.
+
+From Boost 1.52, degrees of freedom [nu] can be +[infin]
+when the normal distribution located at [delta]
+(equivalent to the central Student's t distribution)
+is used in place for accuracy and speed.
+
+[h4 Implementation]
+
+The CDF is computed using a modification of the method
+described in
+"Computing discrete mixtures of continuous
+distributions: noncentral chisquare, noncentral t
+and the distribution of the square of the sample
+multiple correlation coefficient."
+Denise Benton, K. Krishnamoorthy,
+Computational Statistics & Data Analysis 43 (2003) 249-267.
+
+This uses the following formula for the CDF:
+
+[equation nc_t_ref2]
+
+Where I[sub x](a,b) is the incomplete beta function, and
+[Phi](x) is the normal CDF at x.
+
+Iteration starts at the largest of the Poisson weighting terms
+(at i = [delta][super 2] / 2) and then proceeds in both directions
+as per Benton and Krishnamoorthy's paper.
+
+Alternatively, by considering what happens when t = [infin], we have
+x = 1, and therefore I[sub x](a,b) = 1 and:
+
+[equation nc_t_ref3]
+
+From this we can easily show that:
+
+[equation nc_t_ref4]
+
+and therefore we have a means to compute either the probability or its
+complement directly without the risk of cancellation error. The
+crossover criterion for choosing whether to calculate the CDF or
+its complement is the same as for the
+__non_central_beta_distrib.
+
+The PDF can be computed by a very similar method using:
+
+[equation nc_t_ref5]
+
+Where I[sub x][super '](a,b) is the derivative of the incomplete beta function.
+
+For both the PDF and CDF we switch to approximating the distribution by a
+Student's t distribution centred on [delta] when [nu] is very large.
+The crossover location appears to be when [delta]/(4[nu]) < [epsilon],
+this location was estimated by inspection of equation 2.6 in
+"A Comparison of Approximations To Percentiles of the
+Noncentral t-Distribution". H. Sahai and M. M. Ojeda,
+Revista Investigacion Operacional Vol 21, No 2, 2000, page 123.
+
+Equation 2.6 is a Fisher-Cornish expansion by Eeden and Johnson.
+The second term includes the ratio [delta]/(4[nu]),
+so when this term become negligible, this and following terms can be ignored,
+leaving just Student's t distribution centred on [delta].
+
+This was also confirmed by experimental testing.
+
+See also
+
+* "Some Approximations to the Percentage Points of the Noncentral
+t-Distribution". C. van Eeden. International Statistical Review, 29, 4-31.
+
+* "Continuous Univariate Distributions". N.L. Johnson, S. Kotz and
+N. Balkrishnan. 1995. John Wiley and Sons New York.
+
+The quantile is calculated via the usual
+__root_finding_without_derivatives method
+with the initial guess taken as the quantile of a normal approximation
+to the noncentral T.
+
+There is no closed form for the mode, so this is computed via
+functional maximisation of the PDF.
+
+The remaining functions (mean, variance etc) are implemented
+using the formulas given in
+Weisstein, Eric W. "Noncentral Student's t-Distribution."
+From MathWorld--A Wolfram Web Resource.
+[@http://mathworld.wolfram.com/NoncentralStudentst-Distribution.html
+http://mathworld.wolfram.com/NoncentralStudentst-Distribution.html]
+and in the
+[@http://reference.wolfram.com/mathematica/ref/NoncentralStudentTDistribution.html
+Mathematica documentation].
+
+Some analytic properties of noncentral distributions
+(particularly unimodality, and monotonicity of their modes)
+are surveyed and summarized by:
+
+Andrea van Aubel & Wolfgang Gawronski, Applied Mathematics and Computation, 141 (2003) 3-12.
+
+[endsect] [/section:nc_t_dist]
+
+[/ nc_t.qbk
+ Copyright 2008, 2012 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distributions/negative_binomial.qbk b/doc/distributions/negative_binomial.qbk
new file mode 100644
index 0000000..0c5a4be
--- /dev/null
+++ b/doc/distributions/negative_binomial.qbk
@@ -0,0 +1,373 @@
+[section:negative_binomial_dist Negative Binomial Distribution]
+
+``#include <boost/math/distributions/negative_binomial.hpp>``
+
+ namespace boost{ namespace math{
+
+ template <class RealType = double,
+ class ``__Policy`` = ``__policy_class`` >
+ class negative_binomial_distribution;
+
+ typedef negative_binomial_distribution<> negative_binomial;
+
+ template <class RealType, class ``__Policy``>
+ class negative_binomial_distribution
+ {
+ public:
+ typedef RealType value_type;
+ typedef Policy policy_type;
+ // Constructor from successes and success_fraction:
+ negative_binomial_distribution(RealType r, RealType p);
+
+ // Parameter accessors:
+ RealType success_fraction() const;
+ RealType successes() const;
+
+ // Bounds on success fraction:
+ static RealType find_lower_bound_on_p(
+ RealType trials,
+ RealType successes,
+ RealType probability); // alpha
+ static RealType find_upper_bound_on_p(
+ RealType trials,
+ RealType successes,
+ RealType probability); // alpha
+
+ // Estimate min/max number of trials:
+ static RealType find_minimum_number_of_trials(
+ RealType k, // Number of failures.
+ RealType p, // Success fraction.
+ RealType probability); // Probability threshold alpha.
+ static RealType find_maximum_number_of_trials(
+ RealType k, // Number of failures.
+ RealType p, // Success fraction.
+ RealType probability); // Probability threshold alpha.
+ };
+
+ }} // namespaces
+
+The class type `negative_binomial_distribution` represents a
+[@http://en.wikipedia.org/wiki/Negative_binomial_distribution negative_binomial distribution]:
+it is used when there are exactly two mutually exclusive outcomes of a
+[@http://en.wikipedia.org/wiki/Bernoulli_trial Bernoulli trial]:
+these outcomes are labelled "success" and "failure".
+
+For k + r Bernoulli trials each with success fraction p, the
+negative_binomial distribution gives the probability of observing
+k failures and r successes with success on the last trial.
+The negative_binomial distribution
+assumes that success_fraction p is fixed for all (k + r) trials.
+
+[note The random variable for the negative binomial distribution is the number of trials,
+(the number of successes is a fixed property of the distribution)
+whereas for the binomial,
+the random variable is the number of successes, for a fixed number of trials.]
+
+It has the PDF:
+
+[equation neg_binomial_ref]
+
+The following graph illustrate how the PDF varies as the success fraction
+/p/ changes:
+
+[graph negative_binomial_pdf_1]
+
+Alternatively, this graph shows how the shape of the PDF varies as
+the number of successes changes:
+
+[graph negative_binomial_pdf_2]
+
+[h4 Related Distributions]
+
+The name negative binomial distribution is reserved by some to the
+case where the successes parameter r is an integer.
+This integer version is also called the
+[@http://mathworld.wolfram.com/PascalDistribution.html Pascal distribution].
+
+This implementation uses real numbers for the computation throughout
+(because it uses the *real-valued* incomplete beta function family of functions).
+This real-valued version is also called the Polya Distribution.
+
+The Poisson distribution is a generalization of the Pascal distribution,
+where the success parameter r is an integer: to obtain the Pascal
+distribution you must ensure that an integer value is provided for r,
+and take integer values (floor or ceiling) from functions that return
+a number of successes.
+
+For large values of r (successes), the negative binomial distribution
+converges to the Poisson distribution.
+
+The geometric distribution is a special case
+where the successes parameter r = 1,
+so only a first and only success is required.
+geometric(p) = negative_binomial(1, p).
+
+The Poisson distribution is a special case for large successes
+
+poisson([lambda]) = lim [sub r [rarr] [infin]] [space] negative_binomial(r, r / ([lambda] + r)))
+
+[discrete_quantile_warning Negative Binomial]
+
+[h4 Member Functions]
+
+[h5 Construct]
+
+ negative_binomial_distribution(RealType r, RealType p);
+
+Constructor: /r/ is the total number of successes, /p/ is the
+probability of success of a single trial.
+
+Requires: `r > 0` and `0 <= p <= 1`.
+
+[h5 Accessors]
+
+ RealType success_fraction() const; // successes / trials (0 <= p <= 1)
+
+Returns the parameter /p/ from which this distribution was constructed.
+
+ RealType successes() const; // required successes (r > 0)
+
+Returns the parameter /r/ from which this distribution was constructed.
+
+The best method of calculation for the following functions is disputed:
+see __binomial_distrib for more discussion.
+
+[h5 Lower Bound on Parameter p]
+
+ static RealType find_lower_bound_on_p(
+ RealType failures,
+ RealType successes,
+ RealType probability) // (0 <= alpha <= 1), 0.05 equivalent to 95% confidence.
+
+Returns a *lower bound* on the success fraction:
+
+[variablelist
+[[failures][The total number of failures before the ['r]th success.]]
+[[successes][The number of successes required.]]
+[[alpha][The largest acceptable probability that the true value of
+ the success fraction is [*less than] the value returned.]]
+]
+
+For example, if you observe /k/ failures and /r/ successes from /n/ = k + r trials
+the best estimate for the success fraction is simply ['r/n], but if you
+want to be 95% sure that the true value is [*greater than] some value,
+['p[sub min]], then:
+
+ p``[sub min]`` = negative_binomial_distribution<RealType>::find_lower_bound_on_p(
+ failures, successes, 0.05);
+
+[link math_toolkit.stat_tut.weg.neg_binom_eg.neg_binom_conf See negative binomial confidence interval example.]
+
+This function uses the Clopper-Pearson method of computing the lower bound on the
+success fraction, whilst many texts refer to this method as giving an "exact"
+result in practice it produces an interval that guarantees ['at least] the
+coverage required, and may produce pessimistic estimates for some combinations
+of /failures/ and /successes/. See:
+
+[@http://www.ucs.louisiana.edu/~kxk4695/Discrete_new.pdf
+Yong Cai and K. Krishnamoorthy, A Simple Improved Inferential Method for Some Discrete Distributions.
+Computational statistics and data analysis, 2005, vol. 48, no3, 605-621].
+
+[h5 Upper Bound on Parameter p]
+
+ static RealType find_upper_bound_on_p(
+ RealType trials,
+ RealType successes,
+ RealType alpha); // (0 <= alpha <= 1), 0.05 equivalent to 95% confidence.
+
+Returns an *upper bound* on the success fraction:
+
+[variablelist
+[[trials][The total number of trials conducted.]]
+[[successes][The number of successes that occurred.]]
+[[alpha][The largest acceptable probability that the true value of
+ the success fraction is [*greater than] the value returned.]]
+]
+
+For example, if you observe /k/ successes from /n/ trials the
+best estimate for the success fraction is simply ['k/n], but if you
+want to be 95% sure that the true value is [*less than] some value,
+['p[sub max]], then:
+
+ p``[sub max]`` = negative_binomial_distribution<RealType>::find_upper_bound_on_p(
+ r, k, 0.05);
+
+[link math_toolkit.stat_tut.weg.neg_binom_eg.neg_binom_conf See negative binomial confidence interval example.]
+
+This function uses the Clopper-Pearson method of computing the lower bound on the
+success fraction, whilst many texts refer to this method as giving an "exact"
+result in practice it produces an interval that guarantees ['at least] the
+coverage required, and may produce pessimistic estimates for some combinations
+of /failures/ and /successes/. See:
+
+[@http://www.ucs.louisiana.edu/~kxk4695/Discrete_new.pdf
+Yong Cai and K. Krishnamoorthy, A Simple Improved Inferential Method for Some Discrete Distributions.
+Computational statistics and data analysis, 2005, vol. 48, no3, 605-621].
+
+[h5 Estimating Number of Trials to Ensure at Least a Certain Number of Failures]
+
+ static RealType find_minimum_number_of_trials(
+ RealType k, // number of failures.
+ RealType p, // success fraction.
+ RealType alpha); // probability threshold (0.05 equivalent to 95%).
+
+This functions estimates the number of trials required to achieve a certain
+probability that [*more than k failures will be observed].
+
+[variablelist
+[[k][The target number of failures to be observed.]]
+[[p][The probability of ['success] for each trial.]]
+[[alpha][The maximum acceptable risk that only k failures or fewer will be observed.]]
+]
+
+For example:
+
+ negative_binomial_distribution<RealType>::find_minimum_number_of_trials(10, 0.5, 0.05);
+
+Returns the smallest number of trials we must conduct to be 95% sure
+of seeing 10 failures that occur with frequency one half.
+
+[link math_toolkit.stat_tut.weg.neg_binom_eg.neg_binom_size_eg Worked Example.]
+
+This function uses numeric inversion of the negative binomial distribution
+to obtain the result: another interpretation of the result, is that it finds
+the number of trials (success+failures) that will lead to an /alpha/ probability
+of observing k failures or fewer.
+
+[h5 Estimating Number of Trials to Ensure a Maximum Number of Failures or Less]
+
+ static RealType find_maximum_number_of_trials(
+ RealType k, // number of failures.
+ RealType p, // success fraction.
+ RealType alpha); // probability threshold (0.05 equivalent to 95%).
+
+This functions estimates the maximum number of trials we can conduct and achieve
+a certain probability that [*k failures or fewer will be observed].
+
+[variablelist
+[[k][The maximum number of failures to be observed.]]
+[[p][The probability of ['success] for each trial.]]
+[[alpha][The maximum acceptable ['risk] that more than k failures will be observed.]]
+]
+
+For example:
+
+ negative_binomial_distribution<RealType>::find_maximum_number_of_trials(0, 1.0-1.0/1000000, 0.05);
+
+Returns the largest number of trials we can conduct and still be 95% sure
+of seeing no failures that occur with frequency one in one million.
+
+This function uses numeric inversion of the negative binomial distribution
+to obtain the result: another interpretation of the result, is that it finds
+the number of trials (success+failures) that will lead to an /alpha/ probability
+of observing more than k failures.
+
+[h4 Non-member Accessors]
+
+All the [link math_toolkit.dist_ref.nmp usual non-member accessor functions]
+that are generic to all distributions are supported: __usual_accessors.
+
+However it's worth taking a moment to define what these actually mean in
+the context of this distribution:
+
+[table Meaning of the non-member accessors.
+[[Function][Meaning]]
+[[__pdf]
+ [The probability of obtaining [*exactly k failures] from k+r trials
+ with success fraction p. For example:
+
+``pdf(negative_binomial(r, p), k)``]]
+[[__cdf]
+ [The probability of obtaining [*k failures or fewer] from k+r trials
+ with success fraction p and success on the last trial. For example:
+
+``cdf(negative_binomial(r, p), k)``]]
+[[__ccdf]
+ [The probability of obtaining [*more than k failures] from k+r trials
+ with success fraction p and success on the last trial. For example:
+
+``cdf(complement(negative_binomial(r, p), k))``]]
+[[__quantile]
+ [The [*greatest] number of failures k expected to be observed from k+r trials
+ with success fraction p, at probability P. Note that the value returned
+ is a real-number, and not an integer. Depending on the use case you may
+ want to take either the floor or ceiling of the real result. For example:
+
+``quantile(negative_binomial(r, p), P)``]]
+[[__quantile_c]
+ [The [*smallest] number of failures k expected to be observed from k+r trials
+ with success fraction p, at probability P. Note that the value returned
+ is a real-number, and not an integer. Depending on the use case you may
+ want to take either the floor or ceiling of the real result. For example:
+ ``quantile(complement(negative_binomial(r, p), P))``]]
+]
+
+[h4 Accuracy]
+
+This distribution is implemented using the
+incomplete beta functions __ibeta and __ibetac:
+please refer to these functions for information on accuracy.
+
+[h4 Implementation]
+
+In the following table, /p/ is the probability that any one trial will
+be successful (the success fraction), /r/ is the number of successes,
+/k/ is the number of failures, /p/ is the probability and /q = 1-p/.
+
+[table
+[[Function][Implementation Notes]]
+[[pdf][pdf = exp(lgamma(r + k) - lgamma(r) - lgamma(k+1)) * pow(p, r) * pow((1-p), k)
+
+Implementation is in terms of __ibeta_derivative:
+
+(p/(r + k)) * ibeta_derivative(r, static_cast<RealType>(k+1), p)
+The function __ibeta_derivative is used here, since it has already
+been optimised for the lowest possible error - indeed this is really
+just a thin wrapper around part of the internals of the incomplete
+beta function.
+]]
+[[cdf][Using the relation:
+
+cdf = I[sub p](r, k+1) = ibeta(r, k+1, p)
+
+= ibeta(r, static_cast<RealType>(k+1), p)]]
+[[cdf complement][Using the relation:
+
+1 - cdf = I[sub p](k+1, r)
+
+= ibetac(r, static_cast<RealType>(k+1), p)
+]]
+[[quantile][ibeta_invb(r, p, P) - 1]]
+[[quantile from the complement][ibetac_invb(r, p, Q) -1)]]
+[[mean][ `r(1-p)/p` ]]
+[[variance][ `r (1-p) / p * p` ]]
+[[mode][`floor((r-1) * (1 - p)/p)`]]
+[[skewness][`(2 - p) / sqrt(r * (1 - p))`]]
+[[kurtosis][`6 / r + (p * p) / r * (1 - p )`]]
+[[kurtosis excess][`6 / r + (p * p) / r * (1 - p ) -3`]]
+[[parameter estimation member functions][]]
+[[`find_lower_bound_on_p`][ibeta_inv(successes, failures + 1, alpha)]]
+[[`find_upper_bound_on_p`][ibetac_inv(successes, failures, alpha) plus see comments in code.]]
+[[`find_minimum_number_of_trials`][ibeta_inva(k + 1, p, alpha)]]
+[[`find_maximum_number_of_trials`][ibetac_inva(k + 1, p, alpha)]]
+]
+
+Implementation notes:
+
+* The real concept type (that deliberately lacks the Lanczos approximation),
+was found to take several minutes to evaluate some extreme test values,
+so the test has been disabled for this type.
+
+* Much greater speed, and perhaps greater accuracy,
+might be achieved for extreme values by using a normal approximation.
+This is NOT been tested or implemented.
+
+[endsect][/section:negative_binomial_dist Negative Binomial]
+
+[/ negative_binomial.qbk
+ Copyright 2006 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distributions/negative_binomial_example.qbk b/doc/distributions/negative_binomial_example.qbk
new file mode 100644
index 0000000..4c992bd
--- /dev/null
+++ b/doc/distributions/negative_binomial_example.qbk
@@ -0,0 +1,192 @@
+[section:neg_binom_eg Negative Binomial Distribution Examples]
+
+(See also the reference documentation for the __negative_binomial_distrib.)
+
+[section:neg_binom_conf Calculating Confidence Limits on the Frequency of Occurrence for the Negative Binomial Distribution]
+
+Imagine you have a process that follows a negative binomial distribution:
+for each trial conducted, an event either occurs or does it does not, referred
+to as "successes" and "failures". The frequency with which successes occur
+is variously referred to as the
+success fraction, success ratio, success percentage, occurrence frequency, or probability of occurrence.
+
+If, by experiment, you want to measure the
+ the best estimate of success fraction is given simply
+by /k/ \/ /N/, for /k/ successes out of /N/ trials.
+
+However our confidence in that estimate will be shaped by how many trials were conducted,
+and how many successes were observed. The static member functions
+`negative_binomial_distribution<>::find_lower_bound_on_p` and
+`negative_binomial_distribution<>::find_upper_bound_on_p`
+allow you to calculate the confidence intervals for your estimate of the success fraction.
+
+The sample program [@../../example/neg_binom_confidence_limits.cpp
+neg_binom_confidence_limits.cpp] illustrates their use.
+
+[import ../../example/neg_binom_confidence_limits.cpp]
+
+[neg_binomial_confidence_limits]
+Let's see some sample output for a 1 in 10
+success ratio, first for a mere 20 trials:
+
+[pre'''______________________________________________
+2-Sided Confidence Limits For Success Fraction
+______________________________________________
+Number of trials = 20
+Number of successes = 2
+Number of failures = 18
+Observed frequency of occurrence = 0.1
+___________________________________________
+Confidence Lower Upper
+ Value (%) Limit Limit
+___________________________________________
+ 50.000 0.04812 0.13554
+ 75.000 0.03078 0.17727
+ 90.000 0.01807 0.22637
+ 95.000 0.01235 0.26028
+ 99.000 0.00530 0.33111
+ 99.900 0.00164 0.41802
+ 99.990 0.00051 0.49202
+ 99.999 0.00016 0.55574
+''']
+
+As you can see, even at the 95% confidence level the bounds (0.012 to 0.26) are
+really very wide, and very asymmetric about the observed value 0.1.
+
+Compare that with the program output for a mass
+2000 trials:
+
+[pre'''______________________________________________
+2-Sided Confidence Limits For Success Fraction
+______________________________________________
+Number of trials = 2000
+Number of successes = 200
+Number of failures = 1800
+Observed frequency of occurrence = 0.1
+___________________________________________
+Confidence Lower Upper
+ Value (%) Limit Limit
+___________________________________________
+ 50.000 0.09536 0.10445
+ 75.000 0.09228 0.10776
+ 90.000 0.08916 0.11125
+ 95.000 0.08720 0.11352
+ 99.000 0.08344 0.11802
+ 99.900 0.07921 0.12336
+ 99.990 0.07577 0.12795
+ 99.999 0.07282 0.13206
+''']
+
+Now even when the confidence level is very high, the limits (at 99.999%, 0.07 to 0.13) are really
+quite close and nearly symmetric to the observed value of 0.1.
+
+[endsect][/section:neg_binom_conf Calculating Confidence Limits on the Frequency of Occurrence]
+
+[section:neg_binom_size_eg Estimating Sample Sizes for the Negative Binomial.]
+
+Imagine you have an event
+(let's call it a "failure" - though we could equally well call it a success if we felt it was a 'good' event)
+that you know will occur in 1 in N trials. You may want to know how many trials you need to
+conduct to be P% sure of observing at least k such failures.
+If the failure events follow a negative binomial
+distribution (each trial either succeeds or fails)
+then the static member function `negative_binomial_distibution<>::find_minimum_number_of_trials`
+can be used to estimate the minimum number of trials required to be P% sure
+of observing the desired number of failures.
+
+The example program
+[@../../example/neg_binomial_sample_sizes.cpp neg_binomial_sample_sizes.cpp]
+demonstrates its usage.
+
+[import ../../example/neg_binomial_sample_sizes.cpp]
+[neg_binomial_sample_sizes]
+
+[note Since we're calculating the /minimum/ number of trials required,
+we'll err on the safe side and take the ceiling of the result.
+Had we been calculating the
+/maximum/ number of trials permitted to observe less than a certain
+number of /failures/ then we would have taken the floor instead. We
+would also have called `find_minimum_number_of_trials` like this:
+``
+ floor(negative_binomial::find_minimum_number_of_trials(failures, p, alpha[i]))
+``
+which would give us the largest number of trials we could conduct and
+still be P% sure of observing /failures or less/ failure events, when the
+probability of success is /p/.]
+
+We'll finish off by looking at some sample output, firstly suppose
+we wish to observe at least 5 "failures" with a 50/50 (0.5) chance of
+success or failure:
+
+[pre
+'''Target number of failures = 5, Success fraction = 50%
+
+____________________________
+Confidence Min Number
+ Value (%) Of Trials
+____________________________
+ 50.000 11
+ 75.000 14
+ 90.000 17
+ 95.000 18
+ 99.000 22
+ 99.900 27
+ 99.990 31
+ 99.999 36
+'''
+]
+
+So 18 trials or more would yield a 95% chance that at least our 5
+required failures would be observed.
+
+Compare that to what happens if the success ratio is 90%:
+
+[pre'''Target number of failures = 5.000, Success fraction = 90.000%
+
+____________________________
+Confidence Min Number
+ Value (%) Of Trials
+____________________________
+ 50.000 57
+ 75.000 73
+ 90.000 91
+ 95.000 103
+ 99.000 127
+ 99.900 159
+ 99.990 189
+ 99.999 217
+''']
+
+So now 103 trials are required to observe at least 5 failures with
+95% certainty.
+
+[endsect] [/section:neg_binom_size_eg Estimating Sample Sizes.]
+
+[section:negative_binomial_example1 Negative Binomial Sales Quota Example.]
+
+This example program
+[@../../example/negative_binomial_example1.cpp negative_binomial_example1.cpp (full source code)]
+demonstrates a simple use to find the probability of meeting a sales quota.
+
+[import ../../example/negative_binomial_example1.cpp]
+[negative_binomial_eg1_1]
+[negative_binomial_eg1_2]
+
+[endsect] [/section:negative_binomial_example1]
+
+[section:negative_binomial_example2 Negative Binomial Table Printing Example.]
+Example program showing output of a table of values of cdf and pdf for various k failures.
+[import ../../example/negative_binomial_example2.cpp]
+[neg_binomial_example2]
+[neg_binomial_example2_1]
+[endsect] [/section:negative_binomial_example1 Negative Binomial example 2.]
+
+[endsect] [/section:neg_binom_eg Negative Binomial Distribution Examples]
+
+[/
+ Copyright 2006 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distributions/non_members.qbk b/doc/distributions/non_members.qbk
new file mode 100644
index 0000000..653d249
--- /dev/null
+++ b/doc/distributions/non_members.qbk
@@ -0,0 +1,406 @@
+[section:nmp Non-Member Properties]
+
+Properties that are common to all distributions are accessed via non-member
+getter functions: non-membership allows more of these functions to be added over time,
+as the need arises. Unfortunately the literature uses many different and
+confusing names to refer to a rather small number of actual concepts; refer
+to the [link math_toolkit.dist_ref.nmp.concept_index concept index] to find the property you
+want by the name you are most familiar with.
+Or use the [link math_toolkit.dist_ref.nmp.function_index function index]
+to go straight to the function you want if you already know its name.
+
+[h4:function_index Function Index]
+
+* __cdf.
+* __ccdf.
+* __chf.
+* __hazard.
+* __kurtosis.
+* __kurtosis_excess
+* __mean.
+* __median.
+* __mode.
+* __pdf.
+* __range.
+* __quantile.
+* __quantile_c.
+* __skewness.
+* __sd.
+* __support.
+* __variance.
+
+[h4:concept_index Conceptual Index]
+
+* __ccdf.
+* __cdf.
+* __chf.
+* [link math_toolkit.dist_ref.nmp.cdf_inv Inverse Cumulative Distribution Function].
+* [link math_toolkit.dist_ref.nmp.survival_inv Inverse Survival Function].
+* __hazard
+* [link math_toolkit.dist_ref.nmp.lower_critical Lower Critical Value].
+* __kurtosis.
+* __kurtosis_excess
+* __mean.
+* __median.
+* __mode.
+* [link math_toolkit.dist_ref.nmp.cdfPQ P].
+* [link math_toolkit.dist_ref.nmp.percent Percent Point Function].
+* __pdf.
+* [link math_toolkit.dist_ref.nmp.pmf Probability Mass Function].
+* __range.
+* [link math_toolkit.dist_ref.nmp.cdfPQ Q].
+* __quantile.
+* [link math_toolkit.dist_ref.nmp.quantile_c Quantile from the complement of the probability].
+* __skewness.
+* __sd
+* [link math_toolkit.dist_ref.nmp.survival Survival Function].
+* [link math_toolkit.dist_ref.nmp.support support].
+* [link math_toolkit.dist_ref.nmp.upper_critical Upper Critical Value].
+* __variance.
+
+[h4:cdf Cumulative Distribution Function]
+
+ template <class RealType, class ``__Policy``>
+ RealType cdf(const ``['Distribution-Type]``<RealType, ``__Policy``>& dist, const RealType& x);
+
+The __cdf is the probability that
+the variable takes a value less than or equal to x. It is equivalent
+to the integral from -infinity to x of the __pdf.
+
+This function may return a __domain_error if the random variable is outside
+the defined range for the distribution.
+
+For example, the following graph shows the cdf for the
+normal distribution:
+
+[$../graphs/cdf.png]
+
+[h4:ccdf Complement of the Cumulative Distribution Function]
+
+ template <class Distribution, class RealType>
+ RealType cdf(const ``['Unspecified-Complement-Type]``<Distribution, RealType>& comp);
+
+The complement of the __cdf
+is the probability that
+the variable takes a value greater than x. It is equivalent
+to the integral from x to infinity of the __pdf, or 1 minus the __cdf of x.
+
+This is also known as the survival function.
+
+This function may return a __domain_error if the random variable is outside
+the defined range for the distribution.
+
+In this library, it is obtained by wrapping the arguments to the `cdf`
+function in a call to `complement`, for example:
+
+ // standard normal distribution object:
+ boost::math::normal norm;
+ // print survival function for x=2.0:
+ std::cout << cdf(complement(norm, 2.0)) << std::endl;
+
+For example, the following graph shows the __complement of the cdf for the
+normal distribution:
+
+[$../graphs/survival.png]
+
+See __why_complements for why the complement is useful and when it should be used.
+
+[h4:hazard Hazard Function]
+
+ template <class RealType, class ``__Policy``>
+ RealType hazard(const ``['Distribution-Type]``<RealType, ``__Policy``>& dist, const RealType& x);
+
+Returns the __hazard of /x/ and distibution /dist/.
+
+This function may return a __domain_error if the random variable is outside
+the defined range for the distribution.
+
+[equation hazard]
+
+[caution
+Some authors refer to this as the conditional failure
+density function rather than the hazard function.]
+
+[h4:chf Cumulative Hazard Function]
+
+ template <class RealType, class ``__Policy``>
+ RealType chf(const ``['Distribution-Type]``<RealType, ``__Policy``>& dist, const RealType& x);
+
+Returns the __chf of /x/ and distibution /dist/.
+
+This function may return a __domain_error if the random variable is outside
+the defined range for the distribution.
+
+[equation chf]
+
+[caution
+Some authors refer to this as simply the "Hazard Function".]
+
+[h4:mean mean]
+
+ template<class RealType, class ``__Policy``>
+ RealType mean(const ``['Distribution-Type]``<RealType, ``__Policy``>& dist);
+
+Returns the mean of the distribution /dist/.
+
+This function may return a __domain_error if the distribution does not have
+a defined mean (for example the Cauchy distribution).
+
+[h4:median median]
+
+ template<class RealType, class ``__Policy``>
+ RealType median(const ``['Distribution-Type]``<RealType, ``__Policy``>& dist);
+
+Returns the median of the distribution /dist/.
+
+[h4:mode mode]
+
+ template<class RealType, ``__Policy``>
+ RealType mode(const ``['Distribution-Type]``<RealType, ``__Policy``>& dist);
+
+Returns the mode of the distribution /dist/.
+
+This function may return a __domain_error if the distribution does not have
+a defined mode.
+
+[h4:pdf Probability Density Function]
+
+ template <class RealType, class ``__Policy``>
+ RealType pdf(const ``['Distribution-Type]``<RealType, ``__Policy``>& dist, const RealType& x);
+
+For a continuous function, the probability density function (pdf) returns
+the probability that the variate has the value x.
+Since for continuous distributions the probability at a single point is actually zero,
+the probability is better expressed as the integral of the pdf between two points:
+see the __cdf.
+
+For a discrete distribution, the pdf is the probability that the
+variate takes the value x.
+
+This function may return a __domain_error if the random variable is outside
+the defined range for the distribution.
+
+For example, for a standard normal distribution the pdf looks like this:
+
+[$../graphs/pdf.png]
+
+[h4:range Range]
+
+ template<class RealType, class ``__Policy``>
+ std::pair<RealType, RealType> range(const ``['Distribution-Type]``<RealType, ``__Policy``>& dist);
+
+Returns the valid range of the random variable over distribution /dist/.
+
+[h4:quantile Quantile]
+
+ template <class RealType, class ``__Policy``>
+ RealType quantile(const ``['Distribution-Type]``<RealType, ``__Policy``>& dist, const RealType& p);
+
+The quantile is best viewed as the inverse of the __cdf, it returns
+a value /x/ such that `cdf(dist, x) == p`.
+
+This is also known as the /percent point function/, or /percentile/, or /fractile/,
+it is also the same as calculating the ['lower critical value] of a distribution.
+
+This function returns a __domain_error if the probability lies outside [0,1].
+The function may return an __overflow_error if there is no finite value
+that has the specified probability.
+
+The following graph shows the quantile function for a standard normal
+distribution:
+
+[$../graphs/quantile.png]
+
+[h4:quantile_c Quantile from the complement of the probability.]
+See also [link math_toolkit.stat_tut.overview.complements complements].
+
+
+ template <class Distribution, class RealType>
+ RealType quantile(const ``['Unspecified-Complement-Type]``<Distribution, RealType>& comp);
+
+This is the inverse of the __ccdf. It is calculated by wrapping
+the arguments in a call to the quantile function in a call to
+/complement/. For example:
+
+ // define a standard normal distribution:
+ boost::math::normal norm;
+ // print the value of x for which the complement
+ // of the probability is 0.05:
+ std::cout << quantile(complement(norm, 0.05)) << std::endl;
+
+The function computes a value /x/ such that
+`cdf(complement(dist, x)) == q` where /q/ is complement of the
+probability.
+
+[link why_complements Why complements?]
+
+This function is also called the inverse survival function, and is the
+same as calculating the ['upper critical value] of a distribution.
+
+This function returns a __domain_error if the probablity lies outside [0,1].
+The function may return an __overflow_error if there is no finite value
+that has the specified probability.
+
+The following graph show the inverse survival function for the normal
+distribution:
+
+[$../graphs/survival_inv.png]
+
+[h4:sd Standard Deviation]
+
+ template <class RealType, class ``__Policy``>
+ RealType standard_deviation(const ``['Distribution-Type]``<RealType, ``__Policy``>& dist);
+
+Returns the standard deviation of distribution /dist/.
+
+This function may return a __domain_error if the distribution does not have
+a defined standard deviation.
+
+[h4:support support]
+
+ template<class RealType, class ``__Policy``>
+ std::pair<RealType, RealType> support(const ``['Distribution-Type]``<RealType, ``__Policy``>& dist);
+
+Returns the supported range of random variable over the distribution /dist/.
+
+The distribution is said to be 'supported' over a range that is
+[@http://en.wikipedia.org/wiki/Probability_distribution
+ "the smallest closed set whose complement has probability zero"].
+Non-mathematicians might say it means the 'interesting' smallest range
+of random variate x that has the cdf going from zero to unity.
+Outside are uninteresting zones where the pdf is zero, and the cdf zero or unity.
+
+[h4:variance Variance]
+
+ template <class RealType, class ``__Policy``>
+ RealType variance(const ``['Distribution-Type]``<RealType, ``__Policy``>& dist);
+
+Returns the variance of the distribution /dist/.
+
+This function may return a __domain_error if the distribution does not have
+a defined variance.
+
+[h4:skewness Skewness]
+
+ template <class RealType, class ``__Policy``>
+ RealType skewness(const ``['Distribution-Type]``<RealType, ``__Policy``>& dist);
+
+Returns the skewness of the distribution /dist/.
+
+This function may return a __domain_error if the distribution does not have
+a defined skewness.
+
+[h4:kurtosis Kurtosis]
+
+ template <class RealType, class ``__Policy``>
+ RealType kurtosis(const ``['Distribution-Type]``<RealType, ``__Policy``>& dist);
+
+Returns the 'proper' kurtosis (normalized fourth moment) of the distribution /dist/.
+
+kertosis = [beta][sub 2][space]= [mu][sub 4][space] / [mu][sub 2][super 2]
+
+Where [mu][sub i][space] is the i'th central moment of the distribution, and
+in particular [mu][sub 2][space] is the variance of the distribution.
+
+The kurtosis is a measure of the "peakedness" of a distribution.
+
+Note that the literature definition of kurtosis is confusing.
+The definition used here is that used by for example
+[@http://mathworld.wolfram.com/Kurtosis.html Wolfram MathWorld]
+(that includes a table of formulae for kurtosis excess for various distributions)
+but NOT the definition of
+[@http://en.wikipedia.org/wiki/Kurtosis kurtosis used by Wikipedia]
+which treats "kurtosis" and "kurtosis excess" as the same quantity.
+
+ kurtosis_excess = 'proper' kurtosis - 3
+
+This subtraction of 3 is convenient so that the ['kurtosis excess]
+of a normal distribution is zero.
+
+This function may return a __domain_error if the distribution does not have
+a defined kurtosis.
+
+'Proper' kurtosis can have a value from zero to + infinity.
+
+[h4:kurtosis_excess Kurtosis excess]
+
+ template <class RealType, ``__Policy``>
+ RealType kurtosis_excess(const ``['Distribution-Type]``<RealType, ``__Policy``>& dist);
+
+Returns the kurtosis excess of the distribution /dist/.
+
+kurtosis excess = [gamma][sub 2][space]= [mu][sub 4][space] / [mu][sub 2][super 2][space]- 3 = kurtosis - 3
+
+Where [mu][sub i][space] is the i'th central moment of the distribution, and
+in particular [mu][sub 2][space] is the variance of the distribution.
+
+The kurtosis excess is a measure of the "peakedness" of a distribution, and
+is more widely used than the "kurtosis proper". It is defined so that
+the kurtosis excess of a normal distribution is zero.
+
+This function may return a __domain_error if the distribution does not have
+a defined kurtosis excess.
+
+Kurtosis excess can have a value from -2 to + infinity.
+
+ kurtosis = kurtosis_excess +3;
+
+The kurtosis excess of a normal distribution is zero.
+
+[h4:cdfPQ P and Q]
+
+The terms P and Q are sometimes used to refer to the __cdf
+and its [link math_toolkit.dist_ref.nmp.ccdf complement] respectively.
+Lowercase p and q are sometimes used to refer to the values returned
+by these functions.
+
+[h4:percent Percent Point Function or Percentile]
+
+The percent point function, also known as the percentile, is the same as
+the __quantile.
+
+[h4:cdf_inv Inverse CDF Function.]
+
+The inverse of the cumulative distribution function, is the same as the
+__quantile.
+
+[h4:survival_inv Inverse Survival Function.]
+
+The inverse of the survival function, is the same as computing the
+[link math_toolkit.dist_ref.nmp.quantile_c quantile
+from the complement of the probability].
+
+[h4:pmf Probability Mass Function]
+
+The Probability Mass Function is the same as the __pdf.
+
+The term Mass Function is usually applied to discrete distributions,
+while the term __pdf applies to continuous distributions.
+
+[h4:lower_critical Lower Critical Value.]
+
+The lower critical value calculates the value of the random variable
+given the area under the left tail of the distribution.
+It is equivalent to calculating the __quantile.
+
+[h4: upper_critical Upper Critical Value.]
+
+The upper critical value calculates the value of the random variable
+given the area under the right tail of the distribution. It is equivalent to
+calculating the [link math_toolkit.dist_ref.nmp.quantile_c quantile from the complement of the
+probability].
+
+[h4:survival Survival Function]
+
+Refer to the __ccdf.
+
+[endsect][/section:nmp Non-Member Properties]
+
+
+[/ non_members.qbk
+ Copyright 2006 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distributions/normal.qbk b/doc/distributions/normal.qbk
new file mode 100644
index 0000000..a3db989
--- /dev/null
+++ b/doc/distributions/normal.qbk
@@ -0,0 +1,120 @@
+[section:normal_dist Normal (Gaussian) Distribution]
+
+``#include <boost/math/distributions/normal.hpp>``
+
+ namespace boost{ namespace math{
+
+ template <class RealType = double,
+ class ``__Policy`` = ``__policy_class`` >
+ class normal_distribution;
+
+ typedef normal_distribution<> normal;
+
+ template <class RealType, class ``__Policy``>
+ class normal_distribution
+ {
+ public:
+ typedef RealType value_type;
+ typedef Policy policy_type;
+ // Construct:
+ normal_distribution(RealType mean = 0, RealType sd = 1);
+ // Accessors:
+ RealType mean()const; // location.
+ RealType standard_deviation()const; // scale.
+ // Synonyms, provided to allow generic use of find_location and find_scale.
+ RealType location()const;
+ RealType scale()const;
+ };
+
+ }} // namespaces
+
+The normal distribution is probably the most well known statistical
+distribution: it is also known as the Gaussian Distribution.
+A normal distribution with mean zero and standard deviation one
+is known as the ['Standard Normal Distribution].
+
+Given mean [mu][space]and standard deviation [sigma][space]it has the PDF:
+
+[space] [equation normal_ref1]
+
+The variation the PDF with its parameters is illustrated
+in the following graph:
+
+[graph normal_pdf]
+
+The cumulative distribution function is given by
+
+[space] [equation normal_cdf]
+
+and illustrated by this graph
+
+[graph normal_cdf]
+
+
+[h4 Member Functions]
+
+ normal_distribution(RealType mean = 0, RealType sd = 1);
+
+Constructs a normal distribution with mean /mean/ and
+standard deviation /sd/.
+
+Requires sd > 0, otherwise __domain_error is called.
+
+ RealType mean()const;
+ RealType location()const;
+
+both return the /mean/ of this distribution.
+
+ RealType standard_deviation()const;
+ RealType scale()const;
+
+both return the /standard deviation/ of this distribution.
+(Redundant location and scale function are provided to match other similar distributions,
+allowing the functions find_location and find_scale to be used generically).
+
+[h4 Non-member Accessors]
+
+All the [link math_toolkit.dist_ref.nmp usual non-member accessor functions] that are generic to all
+distributions are supported: __usual_accessors.
+
+The domain of the random variable is \[-[max_value], +[min_value]\].
+However, the pdf of +[infin] and -[infin] = 0 is also supported,
+and cdf at -[infin] = 0, cdf at +[infin] = 1,
+and complement cdf -[infin] = 1 and +[infin] = 0,
+if RealType permits.
+
+[h4 Accuracy]
+
+The normal distribution is implemented in terms of the
+[link math_toolkit.sf_erf.error_function error function],
+and as such should have very low error rates.
+
+[h4 Implementation]
+
+In the following table /m/ is the mean of the distribution,
+and /s/ is its standard deviation.
+
+[table
+[[Function][Implementation Notes]]
+[[pdf][Using the relation: pdf = e[super -(x-m)[super 2]\/(2s[super 2])] \/ (s * sqrt(2*pi)) ]]
+[[cdf][Using the relation: p = 0.5 * __erfc(-(x-m)/(s*sqrt(2))) ]]
+[[cdf complement][Using the relation: q = 0.5 * __erfc((x-m)/(s*sqrt(2))) ]]
+[[quantile][Using the relation: x = m - s * sqrt(2) * __erfc_inv(2*p)]]
+[[quantile from the complement][Using the relation: x = m + s * sqrt(2) * __erfc_inv(2*p)]]
+[[mean and standard deviation][The same as `dist.mean()` and `dist.standard_deviation()`]]
+[[mode][The same as the mean.]]
+[[median][The same as the mean.]]
+[[skewness][0]]
+[[kurtosis][3]]
+[[kurtosis excess][0]]
+]
+
+[endsect] [/section:normal_dist Normal]
+
+[/ normal.qbk
+ Copyright 2006, 2007, 2012 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distributions/normal_example.qbk b/doc/distributions/normal_example.qbk
new file mode 100644
index 0000000..c8dc4a0
--- /dev/null
+++ b/doc/distributions/normal_example.qbk
@@ -0,0 +1,36 @@
+[section:normal_example Normal Distribution Examples]
+
+(See also the reference documentation for the __normal_distrib.)
+
+[section:normal_misc Some Miscellaneous Examples of the Normal (Gaussian) Distribution]
+
+The sample program [@../../example/normal_misc_examples.cpp
+normal_misc_examples.cpp] illustrates their use.
+
+[import ../../example/normal_misc_examples.cpp]
+
+[h4 Traditional Tables]
+[normal_basic1]
+
+[h4 Standard deviations either side of the Mean]
+[normal_basic2]
+[h4 Some simple examples]
+[h4 Life of light bulbs]
+[normal_bulbs_example1]
+[h4 How many onions?]
+[normal_bulbs_example3]
+[h4 Packing beef]
+[normal_bulbs_example4]
+[h4 Length of bolts]
+[normal_bulbs_example5]
+
+[endsect] [/section:normal_misc Some Miscellaneous Examples of the Normal Distribution]
+[endsect] [/section:normal_example Normal Distribution Examples]
+
+[/
+ Copyright 2006 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distributions/pareto.qbk b/doc/distributions/pareto.qbk
new file mode 100644
index 0000000..7584d53
--- /dev/null
+++ b/doc/distributions/pareto.qbk
@@ -0,0 +1,121 @@
+[section:pareto Pareto Distribution]
+
+
+``#include <boost/math/distributions/pareto.hpp>``
+
+ namespace boost{ namespace math{
+
+ template <class RealType = double,
+ class ``__Policy`` = ``__policy_class`` >
+ class pareto_distribution;
+
+ typedef pareto_distribution<> pareto;
+
+ template <class RealType, class ``__Policy``>
+ class pareto_distribution
+ {
+ public:
+ typedef RealType value_type;
+ // Constructor:
+ pareto_distribution(RealType scale = 1, RealType shape = 1)
+ // Accessors:
+ RealType scale()const;
+ RealType shape()const;
+ };
+
+ }} // namespaces
+
+The [@http://en.wikipedia.org/wiki/pareto_distribution Pareto distribution]
+is a continuous distribution with the
+[@http://en.wikipedia.org/wiki/Probability_density_function probability density function (pdf)]:
+
+f(x; [alpha], [beta]) = [alpha][beta][super [alpha]] / x[super [alpha]+ 1]
+
+For shape parameter [alpha][space] > 0, and scale parameter [beta][space] > 0.
+If x < [beta][space], the pdf is zero.
+
+The [@http://mathworld.wolfram.com/ParetoDistribution.html Pareto distribution]
+often describes the larger compared to the smaller.
+A classic example is that 80% of the wealth is owned by 20% of the population.
+
+The following graph illustrates how the PDF varies with the scale parameter [beta]:
+
+[graph pareto_pdf1]
+
+And this graph illustrates how the PDF varies with the shape parameter [alpha]:
+
+[graph pareto_pdf2]
+
+
+[h4 Related distributions]
+
+
+[h4 Member Functions]
+
+ pareto_distribution(RealType scale = 1, RealType shape = 1);
+
+Constructs a [@http://en.wikipedia.org/wiki/pareto_distribution
+pareto distribution] with shape /shape/ and scale /scale/.
+
+Requires that the /shape/ and /scale/ parameters are both greater than zero,
+otherwise calls __domain_error.
+
+ RealType scale()const;
+
+Returns the /scale/ parameter of this distribution.
+
+ RealType shape()const;
+
+Returns the /shape/ parameter of this distribution.
+
+[h4 Non-member Accessors]
+
+All the [link math_toolkit.dist_ref.nmp usual non-member accessor functions] that are generic to all
+distributions are supported: __usual_accessors.
+
+The supported domain of the random variable is \[scale, [infin]\].
+
+[h4 Accuracy]
+
+The Pareto distribution is implemented in terms of the
+standard library `exp` functions plus __expm1
+and so should have very small errors, usually only a few epsilon.
+
+If probability is near to unity (or the complement of a probability near zero) see also __why_complements.
+
+[h4 Implementation]
+
+In the following table [alpha][space] is the shape parameter of the distribution, and
+[beta][space] is its scale parameter, /x/ is the random variate, /p/ is the probability
+and its complement /q = 1-p/.
+
+[table
+[[Function][Implementation Notes]]
+[[pdf][Using the relation: pdf p = [alpha][beta][super [alpha]]/x[super [alpha] +1] ]]
+[[cdf][Using the relation: cdf p = 1 - ([beta][space] / x)[super [alpha]] ]]
+[[cdf complement][Using the relation: q = 1 - p = -([beta][space] / x)[super [alpha]] ]]
+[[quantile][Using the relation: x = [beta] / (1 - p)[super 1/[alpha]] ]]
+[[quantile from the complement][Using the relation: x = [beta] / (q)[super 1/[alpha]] ]]
+[[mean][[alpha][beta] / ([beta] - 1) ]]
+[[variance][[beta][alpha][super 2] / ([beta] - 1)[super 2] ([beta] - 2) ]]
+[[mode][[alpha]]]
+[[skewness][Refer to [@http://mathworld.wolfram.com/ParetoDistribution.html Weisstein, Eric W. "Pareto Distribution." From MathWorld--A Wolfram Web Resource.] ]]
+[[kurtosis][Refer to [@http://mathworld.wolfram.com/ParetoDistribution.html Weisstein, Eric W. "Pareto Distribution." From MathWorld--A Wolfram Web Resource.] ]]
+[[kurtosis excess][Refer to [@http://mathworld.wolfram.com/ParetoDistribution.html Weisstein, Eric W. "pareto Distribution." From MathWorld--A Wolfram Web Resource.] ]]
+]
+
+[h4 References]
+* [@http://en.wikipedia.org/wiki/pareto_distribution Pareto Distribution]
+* [@http://mathworld.wolfram.com/paretoDistribution.html Weisstein, Eric W. "Pareto Distribution." From MathWorld--A Wolfram Web Resource.]
+* Handbook of Statistical Distributions with Applications, K Krishnamoorthy, ISBN 1-58488-635-8, Chapter 23, pp 257 - 267.
+(Note the meaning of a and b is reversed in Wolfram and Krishnamoorthy).
+
+[endsect][/section:pareto pareto]
+
+[/
+ Copyright 2006, 2009 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distributions/poisson.qbk b/doc/distributions/poisson.qbk
new file mode 100644
index 0000000..432862e
--- /dev/null
+++ b/doc/distributions/poisson.qbk
@@ -0,0 +1,103 @@
+[section:poisson_dist Poisson Distribution]
+
+``#include <boost/math/distributions/poisson.hpp>``
+
+ namespace boost { namespace math {
+
+ template <class RealType = double,
+ class ``__Policy`` = ``__policy_class`` >
+ class poisson_distribution;
+
+ typedef poisson_distribution<> poisson;
+
+ template <class RealType, class ``__Policy``>
+ class poisson_distribution
+ {
+ public:
+ typedef RealType value_type;
+ typedef Policy policy_type;
+
+ poisson_distribution(RealType mean = 1); // Constructor.
+ RealType mean()const; // Accessor.
+ }
+
+ }} // namespaces boost::math
+
+The [@http://en.wikipedia.org/wiki/Poisson_distribution Poisson distribution]
+is a well-known statistical discrete distribution.
+It expresses the probability of a number of events
+(or failures, arrivals, occurrences ...)
+occurring in a fixed period of time,
+provided these events occur with a known mean rate [lambda][space]
+(events/time), and are independent of the time since the last event.
+
+The distribution was discovered by Sim__eacute on-Denis Poisson (1781 to 1840).
+
+It has the Probability Mass Function:
+
+[equation poisson_ref1]
+
+for k events, with an expected number of events [lambda].
+
+The following graph illustrates how the PDF varies with the parameter [lambda]:
+
+[graph poisson_pdf_1]
+
+[discrete_quantile_warning Poisson]
+
+[h4 Member Functions]
+
+ poisson_distribution(RealType mean = 1);
+
+Constructs a poisson distribution with mean /mean/.
+
+ RealType mean()const;
+
+Returns the /mean/ of this distribution.
+
+[h4 Non-member Accessors]
+
+All the [link math_toolkit.dist_ref.nmp usual non-member accessor functions] that are generic to all
+distributions are supported: __usual_accessors.
+
+The domain of the random variable is \[0, [infin]\].
+
+[h4 Accuracy]
+
+The Poisson distribution is implemented in terms of the
+incomplete gamma functions __gamma_p and __gamma_q
+and as such should have low error rates: but refer to the documentation
+of those functions for more information.
+The quantile and its complement use the inverse gamma functions
+and are therefore probably slightly less accurate: this is because the
+inverse gamma functions are implemented using an iterative method with a
+lower tolerance to avoid excessive computation.
+
+[h4 Implementation]
+
+In the following table [lambda][space] is the mean of the distribution,
+/k/ is the random variable, /p/ is the probability and /q = 1-p/.
+
+[table
+[[Function][Implementation Notes]]
+[[pdf][Using the relation: pdf = e[super -[lambda]] [lambda][super k] \/ k! ]]
+[[cdf][Using the relation: p = [Gamma](k+1, [lambda]) \/ k! = __gamma_q(k+1, [lambda])]]
+[[cdf complement][Using the relation: q = __gamma_p(k+1, [lambda]) ]]
+[[quantile][Using the relation: k = __gamma_q_inva([lambda], p) - 1]]
+[[quantile from the complement][Using the relation: k = __gamma_p_inva([lambda], q) - 1]]
+[[mean][[lambda]]]
+[[mode][ floor ([lambda]) or [floorlr[lambda]] ]]
+[[skewness][1/[radic][lambda]]]
+[[kurtosis][3 + 1/[lambda]]]
+[[kurtosis excess][1/[lambda]]]
+]
+
+[/ poisson.qbk
+ Copyright 2006 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
+[endsect][/section:poisson_dist Poisson]
+
diff --git a/doc/distributions/rayleigh.qbk b/doc/distributions/rayleigh.qbk
new file mode 100644
index 0000000..910c2b7
--- /dev/null
+++ b/doc/distributions/rayleigh.qbk
@@ -0,0 +1,119 @@
+[section:rayleigh Rayleigh Distribution]
+
+
+``#include <boost/math/distributions/rayleigh.hpp>``
+
+ namespace boost{ namespace math{
+
+ template <class RealType = double,
+ class ``__Policy`` = ``__policy_class`` >
+ class rayleigh_distribution;
+
+ typedef rayleigh_distribution<> rayleigh;
+
+ template <class RealType, class ``__Policy``>
+ class rayleigh_distribution
+ {
+ public:
+ typedef RealType value_type;
+ typedef Policy policy_type;
+ // Construct:
+ rayleigh_distribution(RealType sigma = 1)
+ // Accessors:
+ RealType sigma()const;
+ };
+
+ }} // namespaces
+
+The [@http://en.wikipedia.org/wiki/Rayleigh_distribution Rayleigh distribution]
+is a continuous distribution with the
+[@http://en.wikipedia.org/wiki/Probability_density_function probability density function]:
+
+f(x; sigma) = x * exp(-x[super 2]/2 [sigma][super 2]) / [sigma][super 2]
+
+For sigma parameter [sigma][space] > 0, and x > 0.
+
+The Rayleigh distribution is often used where two orthogonal components
+have an absolute value,
+for example, wind velocity and direction may be combined to yield a wind speed,
+or real and imaginary components may have absolute values that are Rayleigh distributed.
+
+The following graph illustrates how the Probability density Function(pdf) varies with the shape parameter [sigma]:
+
+[graph rayleigh_pdf]
+
+and the Cumulative Distribution Function (cdf)
+
+[graph rayleigh_cdf]
+
+[h4 Related distributions]
+
+The absolute value of two independent normal distributions X and Y, [radic] (X[super 2] + Y[super 2])
+is a Rayleigh distribution.
+
+The [@http://en.wikipedia.org/wiki/Chi_distribution Chi],
+[@http://en.wikipedia.org/wiki/Rice_distribution Rice]
+and [@http://en.wikipedia.org/wiki/Weibull_distribution Weibull] distributions are generalizations of the
+[@http://en.wikipedia.org/wiki/Rayleigh_distribution Rayleigh distribution].
+
+[h4 Member Functions]
+
+ rayleigh_distribution(RealType sigma = 1);
+
+Constructs a [@http://en.wikipedia.org/wiki/Rayleigh_distribution
+Rayleigh distribution] with [sigma] /sigma/.
+
+Requires that the [sigma] parameter is greater than zero,
+otherwise calls __domain_error.
+
+ RealType sigma()const;
+
+Returns the /sigma/ parameter of this distribution.
+
+[h4 Non-member Accessors]
+
+All the [link math_toolkit.dist_ref.nmp usual non-member accessor functions] that are generic to all
+distributions are supported: __usual_accessors.
+
+The domain of the random variable is \[0, max_value\].
+
+[h4 Accuracy]
+
+The Rayleigh distribution is implemented in terms of the
+standard library `sqrt` and `exp` and as such should have very low error rates.
+Some constants such as skewness and kurtosis were calculated using
+NTL RR type with 150-bit accuracy, about 50 decimal digits.
+
+[h4 Implementation]
+
+In the following table [sigma][space] is the sigma parameter of the distribution,
+/x/ is the random variate, /p/ is the probability and /q = 1-p/.
+
+[table
+[[Function][Implementation Notes]]
+[[pdf][Using the relation: pdf = x * exp(-x[super 2])/2 [sigma][super 2] ]]
+[[cdf][Using the relation: p = 1 - exp(-x[super 2]/2) [sigma][super 2][space] = -__expm1(-x[super 2]/2) [sigma][super 2]]]
+[[cdf complement][Using the relation: q = exp(-x[super 2]/ 2) * [sigma][super 2] ]]
+[[quantile][Using the relation: x = sqrt(-2 * [sigma] [super 2]) * log(1 - p)) = sqrt(-2 * [sigma] [super 2]) * __log1p(-p))]]
+[[quantile from the complement][Using the relation: x = sqrt(-2 * [sigma] [super 2]) * log(q)) ]]
+[[mean][[sigma] * sqrt([pi]/2) ]]
+[[variance][[sigma][super 2] * (4 - [pi]/2) ]]
+[[mode][[sigma] ]]
+[[skewness][Constant from [@http://mathworld.wolfram.com/RayleighDistribution.html Weisstein, Eric W. "Weibull Distribution." From MathWorld--A Wolfram Web Resource.] ]]
+[[kurtosis][Constant from [@http://mathworld.wolfram.com/RayleighDistribution.html Weisstein, Eric W. "Weibull Distribution." From MathWorld--A Wolfram Web Resource.] ]]
+[[kurtosis excess][Constant from [@http://mathworld.wolfram.com/RayleighDistribution.html Weisstein, Eric W. "Weibull Distribution." From MathWorld--A Wolfram Web Resource.] ]]
+]
+
+[h4 References]
+* [@http://en.wikipedia.org/wiki/Rayleigh_distribution ]
+* [@http://mathworld.wolfram.com/RayleighDistribution.html Weisstein, Eric W. "Rayleigh Distribution." From MathWorld--A Wolfram Web Resource.]
+
+[endsect] [/section:Rayleigh Rayleigh]
+
+[/
+ Copyright 2006 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distributions/skew_normal.qbk b/doc/distributions/skew_normal.qbk
new file mode 100644
index 0000000..ca12675
--- /dev/null
+++ b/doc/distributions/skew_normal.qbk
@@ -0,0 +1,193 @@
+[section:skew_normal_dist Skew Normal Distribution]
+
+``#include <boost/math/distributions/skew_normal.hpp>``
+
+ namespace boost{ namespace math{
+
+ template <class RealType = double,
+ class ``__Policy`` = ``__policy_class`` >
+ class skew_normal_distribution;
+
+ typedef skew_normal_distribution<> normal;
+
+ template <class RealType, class ``__Policy``>
+ class skew_normal_distribution
+ {
+ public:
+ typedef RealType value_type;
+ typedef Policy policy_type;
+ // Constructor:
+ skew_normal_distribution(RealType location = 0, RealType scale = 1, RealType shape = 0);
+ // Accessors:
+ RealType location()const; // mean if normal.
+ RealType scale()const; // width, standard deviation if normal.
+ RealType shape()const; // The distribution is right skewed if shape > 0 and is left skewed if shape < 0.
+ // The distribution is normal if shape is zero.
+ };
+
+ }} // namespaces
+
+The skew normal distribution is a variant of the most well known
+Gaussian statistical distribution.
+
+The skew normal distribution with shape zero resembles the
+[@http://en.wikipedia.org/wiki/Normal_distribution Normal Distribution],
+hence the latter can be regarded as a special case of the more generic skew normal distribution.
+
+If the standard (mean = 0, scale = 1) normal distribution probability density function is
+
+[space][space][equation normal01_pdf]
+
+and the cumulative distribution function
+
+[space][space][equation normal01_cdf]
+
+then the [@http://en.wikipedia.org/wiki/Probability_density_function PDF]
+of the [@http://en.wikipedia.org/wiki/Skew_normal_distribution skew normal distribution]
+with shape parameter [alpha], defined by O'Hagan and Leonhard (1976) is
+
+[space][space][equation skew_normal_pdf0]
+
+Given [@http://en.wikipedia.org/wiki/Location_parameter location] [xi],
+[@http://en.wikipedia.org/wiki/Scale_parameter scale] [omega],
+and [@http://en.wikipedia.org/wiki/Shape_parameter shape] [alpha],
+it can be
+[@http://en.wikipedia.org/wiki/Skew_normal_distribution transformed],
+to the form:
+
+[space][space][equation skew_normal_pdf]
+
+and [@http://en.wikipedia.org/wiki/Cumulative_distribution_function CDF]:
+
+[space][space][equation skew_normal_cdf]
+
+where ['T(h,a)] is Owen's T function, and ['[Phi](x)] is the normal distribution.
+
+The variation the PDF and CDF with its parameters is illustrated
+in the following graphs:
+
+[graph skew_normal_pdf]
+[graph skew_normal_cdf]
+
+[h4 Member Functions]
+
+ skew_normal_distribution(RealType location = 0, RealType scale = 1, RealType shape = 0);
+
+Constructs a skew_normal distribution with location [xi],
+scale [omega] and shape [alpha].
+
+Requires scale > 0, otherwise __domain_error is called.
+
+ RealType location()const;
+
+returns the location [xi] of this distribution,
+
+ RealType scale()const;
+
+returns the scale [omega] of this distribution,
+
+ RealType shape()const;
+
+returns the shape [alpha] of this distribution.
+
+(Location and scale function match other similar distributions,
+allowing the functions `find_location` and `find_scale` to be used generically).
+
+[note While the shape parameter may be chosen arbitrarily (finite),
+the resulting [*skewness] of the distribution is in fact limited to about (-1, 1);
+strictly, the interval is (-0.9952717, 0.9952717).
+
+A parameter [delta] is related to the shape [alpha] by
+[delta] = [alpha] / (1 + [alpha][pow2]),
+and used in the expression for skewness
+[equation skew_normal_skewness]
+] [/note]
+
+[h4 References]
+
+* [@http://azzalini.stat.unipd.it/SN/ Skew-Normal Probability Distribution] for many links and bibliography.
+* [@http://azzalini.stat.unipd.it/SN/Intro/intro.html A very brief introduction to the skew-normal distribution]
+by Adelchi Azzalini (2005-11-2).
+* See a [@http://www.tri.org.au/azzalini.html skew-normal function animation].
+
+[h4 Non-member Accessors]
+
+All the [link math_toolkit.dist_ref.nmp usual non-member accessor functions]
+that are generic to all distributions are supported: __usual_accessors.
+
+The domain of the random variable is ['-[max_value], +[min_value]].
+Infinite values are not supported.
+
+There are no [@http://en.wikipedia.org/wiki/Closed-form_expression closed-form expression]
+known for the mode and median, but these are computed for the
+
+* mode - by finding the maximum of the PDF.
+* median - by computing `quantile(1/2)`.
+
+The maximum of the PDF is sought through searching the root of f'(x)=0.
+
+Both involve iterative methods that will have lower accuracy than other estimates.
+
+[h4 Testing]
+
+__R using library(sn) described at
+[@http://azzalini.stat.unipd.it/SN/ Skew-Normal Probability Distribution],
+and at [@http://cran.r-project.org/web/packages/sn/sn.pd R skew-normal(sn) package].
+
+Package sn provides functions related to the skew-normal (SN)
+and the skew-t (ST) probability distributions,
+both for the univariate and for the the multivariate case,
+including regression models.
+
+__Mathematica was also used to generate some more accurate spot test data.
+
+[h4 Accuracy]
+
+The skew_normal distribution with shape = zero is implemented as a special case,
+equivalent to the normal distribution in terms of the
+[link math_toolkit.sf_erf.error_function error function],
+and therefore should have excellent accuracy.
+
+The PDF and mean, variance, skewness and kurtosis are also accurately evaluated using
+[@http://en.wikipedia.org/wiki/Analytical_expression analytical expressions].
+The CDF requires [@http://en.wikipedia.org/wiki/Owen%27s_T_function Owen's T function]
+that is evaluated using a Boost C++ __owens_t implementation of the algorithms of
+M. Patefield and D. Tandy, Journal of Statistical Software, 5(5), 1-25 (2000);
+the complicated accuracy of this function is discussed in detail at __owens_t.
+
+The median and mode are calculated by iterative root finding, and both will be less accurate.
+
+[h4 Implementation]
+
+In the following table, [xi] is the location of the distribution,
+and [omega] is its scale, and [alpha] is its shape.
+
+[table
+[[Function][Implementation Notes]]
+[[pdf][Using:[equation skew_normal_pdf] ]]
+[[cdf][Using: [equation skew_normal_cdf][br]
+where ['T(h,a)] is Owen's T function, and ['[Phi](x)] is the normal distribution. ]]
+[[cdf complement][Using: complement of normal distribution + 2 * Owens_t]]
+[[quantile][Maximum of the pdf is sought through searching the root of f'(x)=0]]
+[[quantile from the complement][-quantile(SN(-location [xi], scale [omega], -shape[alpha]), p)]]
+[[location][location [xi]]]
+[[scale][scale [omega]]]
+[[shape][shape [alpha]]]
+[[median][quantile(1/2)]]
+[[mean][[equation skew_normal_mean]]]
+[[mode][Maximum of the pdf is sought through searching the root of f'(x)=0]]
+[[variance][[equation skew_normal_variance] ]]
+[[skewness][[equation skew_normal_skewness] ]]
+[[kurtosis][kurtosis excess-3]]
+[[kurtosis excess] [ [equation skew_normal_kurt_ex] ]]
+] [/table]
+
+[endsect] [/section:skew_normal_dist skew_Normal]
+
+[/ skew_normal.qbk
+ Copyright 2012 Bejamin Sobotta, John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distributions/students_t.qbk b/doc/distributions/students_t.qbk
new file mode 100644
index 0000000..d9146e5
--- /dev/null
+++ b/doc/distributions/students_t.qbk
@@ -0,0 +1,188 @@
+[section:students_t_dist Students t Distribution]
+
+``#include <boost/math/distributions/students_t.hpp>``
+
+ namespace boost{ namespace math{
+
+ template <class RealType = double,
+ class ``__Policy`` = ``__policy_class`` >
+ class students_t_distribution;
+
+ typedef students_t_distribution<> students_t;
+
+ template <class RealType, class ``__Policy``>
+ class students_t_distribution
+ {
+ typedef RealType value_type;
+ typedef Policy policy_type;
+
+ // Constructor:
+ students_t_distribution(const RealType& v);
+
+ // Accessor:
+ RealType degrees_of_freedom()const;
+
+ // degrees of freedom estimation:
+ static RealType find_degrees_of_freedom(
+ RealType difference_from_mean,
+ RealType alpha,
+ RealType beta,
+ RealType sd,
+ RealType hint = 100);
+ };
+
+ }} // namespaces
+
+Student's t-distribution is a statistical distribution published by William Gosset in 1908.
+His employer, Guinness Breweries, required him to publish under a
+pseudonym (possibly to hide that they were using statistics to improve beer quality),
+so he chose "Student".
+
+Given N independent measurements, let
+
+[equation students_t_dist]
+
+where /M/ is the population mean, [' ''' μ '''] is the sample mean, and /s/ is the
+sample variance.
+
+[@https://en.wikipedia.org/wiki/Student%27s_t-distribution Student's t-distribution]
+is defined as the distribution of the random
+variable t which is - very loosely - the "best" that we can do not
+knowing the true standard deviation of the sample. It has the PDF:
+
+[equation students_t_ref1]
+
+The Student's t-distribution takes a single parameter: the number of
+degrees of freedom of the sample. When the degrees of freedom is
+/one/ then this distribution is the same as the Cauchy-distribution.
+As the number of degrees of freedom tends towards infinity, then this
+distribution approaches the normal-distribution. The following graph
+illustrates how the PDF varies with the degrees of freedom [nu]:
+
+[graph students_t_pdf]
+
+[h4 Member Functions]
+
+ students_t_distribution(const RealType& v);
+
+Constructs a Student's t-distribution with /v/ degrees of freedom.
+
+Requires /v/ > 0, including infinity (if RealType permits),
+otherwise calls __domain_error. Note that
+non-integral degrees of freedom are supported,
+and are meaningful under certain circumstances.
+
+ RealType degrees_of_freedom()const;
+
+returns the number of degrees of freedom of this distribution.
+
+ static RealType find_degrees_of_freedom(
+ RealType difference_from_mean,
+ RealType alpha,
+ RealType beta,
+ RealType sd,
+ RealType hint = 100);
+
+returns the number of degrees of freedom required to observe a significant
+result in the Student's t test when the mean differs from the "true"
+mean by /difference_from_mean/.
+
+[variablelist
+[[difference_from_mean][The difference between the true mean and the sample mean
+ that we wish to show is significant.]]
+[[alpha][The maximum acceptable probability of rejecting the null hypothesis
+ when it is in fact true.]]
+[[beta][The maximum acceptable probability of failing to reject the null hypothesis
+ when it is in fact false.]]
+[[sd][The sample standard deviation.]]
+[[hint][A hint for the location to start looking for the result, a good choice for this
+ would be the sample size of a previous borderline Student's t test.]]
+]
+
+[note
+Remember that for a two-sided test, you must divide alpha by two
+before calling this function.]
+
+For more information on this function see the
+[@http://www.itl.nist.gov/div898/handbook/prc/section2/prc222.htm
+NIST Engineering Statistics Handbook].
+
+[h4 Non-member Accessors]
+
+All the [link math_toolkit.dist_ref.nmp usual non-member accessor functions] that are generic to all
+distributions are supported: __usual_accessors.
+
+The domain of the random variable is \[-[infin], +[infin]\].
+
+[h4 Examples]
+
+Various [link math_toolkit.stat_tut.weg.st_eg worked examples] are available illustrating the use of the Student's t
+distribution.
+
+[h4 Accuracy]
+
+The normal distribution is implemented in terms of the
+[link math_toolkit.sf_beta.ibeta_function incomplete beta function]
+and [link math_toolkit.sf_beta.ibeta_inv_function its inverses],
+refer to accuracy data on those functions for more information.
+
+[h4 Implementation]
+
+In the following table /v/ is the degrees of freedom of the distribution,
+/t/ is the random variate, /p/ is the probability and /q = 1-p/.
+
+[table
+[[Function][Implementation Notes]]
+[[pdf][Using the relation: pdf = (v \/ (v + t[super 2]))[super (1+v)\/2 ] / (sqrt(v) * __beta(v\/2, 0.5)) ]]
+[[cdf][Using the relations:
+
+p = 1 - z /iff t > 0/
+
+p = z /otherwise/
+
+where z is given by:
+
+__ibeta(v \/ 2, 0.5, v \/ (v + t[super 2])) \/ 2 ['iff v < 2t[super 2]]
+
+__ibetac(0.5, v \/ 2, t[super 2 ] / (v + t[super 2]) \/ 2 /otherwise/]]
+[[cdf complement][Using the relation: q = cdf(-t) ]]
+[[quantile][Using the relation: t = sign(p - 0.5) * sqrt(v * y \/ x)
+
+where:
+
+x = __ibeta_inv(v \/ 2, 0.5, 2 * min(p, q))
+
+y = 1 - x
+
+The quantities /x/ and /y/ are both returned by __ibeta_inv
+without the subtraction implied above.]]
+[[quantile from the complement][Using the relation: t = -quantile(q)]]
+[[mode][0]]
+[[mean][0]]
+[[variance][if (v > 2) v \/ (v - 2) else NaN]]
+[[skewness][if (v > 3) 0 else NaN ]]
+[[kurtosis][if (v > 4) 3 * (v - 2) \/ (v - 4) else NaN]]
+[[kurtosis excess][if (v > 4) 6 \/ (df - 4) else NaN]]
+]
+
+If the moment index /k/ is less than /v/, then the moment is undefined.
+Evaluating the moment will throw a __domain_error unless ignored by a policy,
+when it will return `std::numeric_limits<>::quiet_NaN();`
+
+[h5:implementation Implementation]
+
+(By popular demand, we now support infinite argument and random deviate.
+But we have not implemented the return of infinity
+as suggested by [@http://en.wikipedia.org/wiki/Student%27s_t-distribution Wikipedia Student's t],
+instead throwing a domain error or return NaN.
+See also [@https://svn.boost.org/trac/boost/ticket/7177].)
+
+[endsect] [/section:students_t_dist Students t]
+
+[/ students_t.qbk
+ Copyright 2006, 2012, 2017 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distributions/students_t_examples.qbk b/doc/distributions/students_t_examples.qbk
new file mode 100644
index 0000000..483631e
--- /dev/null
+++ b/doc/distributions/students_t_examples.qbk
@@ -0,0 +1,781 @@
+
+[section:st_eg Student's t Distribution Examples]
+
+[section:tut_mean_intervals Calculating confidence intervals on the mean with the Students-t distribution]
+
+Let's say you have a sample mean, you may wish to know what confidence intervals
+you can place on that mean. Colloquially: "I want an interval that I can be
+P% sure contains the true mean". (On a technical point, note that
+the interval either contains the true mean or it does not: the
+meaning of the confidence level is subtly
+different from this colloquialism. More background information can be found on the
+[@http://www.itl.nist.gov/div898/handbook/eda/section3/eda352.htm NIST site]).
+
+The formula for the interval can be expressed as:
+
+[equation dist_tutorial4]
+
+Where, ['Y[sub s]] is the sample mean, /s/ is the sample standard deviation,
+/N/ is the sample size, /[alpha]/ is the desired significance level and
+['t[sub ([alpha]/2,N-1)]] is the upper critical value of the Students-t
+distribution with /N-1/ degrees of freedom.
+
+[note
+The quantity [alpha][space] is the maximum acceptable risk of falsely rejecting
+the null-hypothesis. The smaller the value of [alpha] the greater the
+strength of the test.
+
+The confidence level of the test is defined as 1 - [alpha], and often expressed
+as a percentage. So for example a significance level of 0.05, is equivalent
+to a 95% confidence level. Refer to
+[@http://www.itl.nist.gov/div898/handbook/prc/section1/prc14.htm
+"What are confidence intervals?"] in __handbook for more information.
+] [/ Note]
+
+[note
+The usual assumptions of
+[@http://en.wikipedia.org/wiki/Independent_and_identically-distributed_random_variables independent and identically distributed (i.i.d.)]
+variables and [@http://en.wikipedia.org/wiki/Normal_distribution normal distribution]
+of course apply here, as they do in other examples.
+]
+
+From the formula, it should be clear that:
+
+* The width of the confidence interval decreases as the sample size increases.
+* The width increases as the standard deviation increases.
+* The width increases as the ['confidence level increases] (0.5 towards 0.99999 - stronger).
+* The width increases as the ['significance level decreases] (0.5 towards 0.00000...01 - stronger).
+
+The following example code is taken from the example program
+[@../../example/students_t_single_sample.cpp students_t_single_sample.cpp].
+
+We'll begin by defining a procedure to calculate intervals for
+various confidence levels; the procedure will print these out
+as a table:
+
+ // Needed includes:
+ #include <boost/math/distributions/students_t.hpp>
+ #include <iostream>
+ #include <iomanip>
+ // Bring everything into global namespace for ease of use:
+ using namespace boost::math;
+ using namespace std;
+
+ void confidence_limits_on_mean(
+ double Sm, // Sm = Sample Mean.
+ double Sd, // Sd = Sample Standard Deviation.
+ unsigned Sn) // Sn = Sample Size.
+ {
+ using namespace std;
+ using namespace boost::math;
+
+ // Print out general info:
+ cout <<
+ "__________________________________\n"
+ "2-Sided Confidence Limits For Mean\n"
+ "__________________________________\n\n";
+ cout << setprecision(7);
+ cout << setw(40) << left << "Number of Observations" << "= " << Sn << "\n";
+ cout << setw(40) << left << "Mean" << "= " << Sm << "\n";
+ cout << setw(40) << left << "Standard Deviation" << "= " << Sd << "\n";
+
+We'll define a table of significance/risk levels for which we'll compute intervals:
+
+ double alpha[] = { 0.5, 0.25, 0.1, 0.05, 0.01, 0.001, 0.0001, 0.00001 };
+
+Note that these are the complements of the confidence/probability levels: 0.5, 0.75, 0.9 .. 0.99999).
+
+Next we'll declare the distribution object we'll need, note that
+the /degrees of freedom/ parameter is the sample size less one:
+
+ students_t dist(Sn - 1);
+
+Most of what follows in the program is pretty printing, so let's focus
+on the calculation of the interval. First we need the t-statistic,
+computed using the /quantile/ function and our significance level. Note
+that since the significance levels are the complement of the probability,
+we have to wrap the arguments in a call to /complement(...)/:
+
+ double T = quantile(complement(dist, alpha[i] / 2));
+
+Note that alpha was divided by two, since we'll be calculating
+both the upper and lower bounds: had we been interested in a single
+sided interval then we would have omitted this step.
+
+Now to complete the picture, we'll get the (one-sided) width of the
+interval from the t-statistic
+by multiplying by the standard deviation, and dividing by the square
+root of the sample size:
+
+ double w = T * Sd / sqrt(double(Sn));
+
+The two-sided interval is then the sample mean plus and minus this width.
+
+And apart from some more pretty-printing that completes the procedure.
+
+Let's take a look at some sample output, first using the
+[@http://www.itl.nist.gov/div898/handbook/eda/section4/eda428.htm
+Heat flow data] from the NIST site. The data set was collected
+by Bob Zarr of NIST in January, 1990 from a heat flow meter
+calibration and stability analysis.
+The corresponding dataplot
+output for this test can be found in
+[@http://www.itl.nist.gov/div898/handbook/eda/section3/eda352.htm
+section 3.5.2] of the __handbook.
+
+[pre'''
+ __________________________________
+ 2-Sided Confidence Limits For Mean
+ __________________________________
+
+ Number of Observations = 195
+ Mean = 9.26146
+ Standard Deviation = 0.02278881
+
+
+ ___________________________________________________________________
+ Confidence T Interval Lower Upper
+ Value (%) Value Width Limit Limit
+ ___________________________________________________________________
+ 50.000 0.676 1.103e-003 9.26036 9.26256
+ 75.000 1.154 1.883e-003 9.25958 9.26334
+ 90.000 1.653 2.697e-003 9.25876 9.26416
+ 95.000 1.972 3.219e-003 9.25824 9.26468
+ 99.000 2.601 4.245e-003 9.25721 9.26571
+ 99.900 3.341 5.453e-003 9.25601 9.26691
+ 99.990 3.973 6.484e-003 9.25498 9.26794
+ 99.999 4.537 7.404e-003 9.25406 9.26886
+''']
+
+As you can see the large sample size (195) and small standard deviation (0.023)
+have combined to give very small intervals, indeed we can be
+very confident that the true mean is 9.2.
+
+For comparison the next example data output is taken from
+['P.K.Hou, O. W. Lau & M.C. Wong, Analyst (1983) vol. 108, p 64.
+and from Statistics for Analytical Chemistry, 3rd ed. (1994), pp 54-55
+J. C. Miller and J. N. Miller, Ellis Horwood ISBN 0 13 0309907.]
+The values result from the determination of mercury by cold-vapour
+atomic absorption.
+
+[pre'''
+ __________________________________
+ 2-Sided Confidence Limits For Mean
+ __________________________________
+
+ Number of Observations = 3
+ Mean = 37.8000000
+ Standard Deviation = 0.9643650
+
+
+ ___________________________________________________________________
+ Confidence T Interval Lower Upper
+ Value (%) Value Width Limit Limit
+ ___________________________________________________________________
+ 50.000 0.816 0.455 37.34539 38.25461
+ 75.000 1.604 0.893 36.90717 38.69283
+ 90.000 2.920 1.626 36.17422 39.42578
+ 95.000 4.303 2.396 35.40438 40.19562
+ 99.000 9.925 5.526 32.27408 43.32592
+ 99.900 31.599 17.594 20.20639 55.39361
+ 99.990 99.992 55.673 -17.87346 93.47346
+ 99.999 316.225 176.067 -138.26683 213.86683
+''']
+
+This time the fact that there are only three measurements leads to
+much wider intervals, indeed such large intervals that it's hard
+to be very confident in the location of the mean.
+
+[endsect]
+
+[section:tut_mean_test Testing a sample mean for difference from a "true" mean]
+
+When calibrating or comparing a scientific instrument or measurement method of some kind,
+we want to be answer the question "Does an observed sample mean differ from the
+"true" mean in any significant way?". If it does, then we have evidence of
+a systematic difference. This question can be answered with a Students-t test:
+more information can be found
+[@http://www.itl.nist.gov/div898/handbook/eda/section3/eda352.htm
+on the NIST site].
+
+Of course, the assignment of "true" to one mean may be quite arbitrary,
+often this is simply a "traditional" method of measurement.
+
+The following example code is taken from the example program
+[@../../example/students_t_single_sample.cpp students_t_single_sample.cpp].
+
+We'll begin by defining a procedure to determine which of the
+possible hypothesis are rejected or not-rejected
+at a given significance level:
+
+[note
+Non-statisticians might say 'not-rejected' means 'accepted',
+(often of the null-hypothesis) implying, wrongly, that there really *IS* no difference,
+but statisticans eschew this to avoid implying that there is positive evidence of 'no difference'.
+'Not-rejected' here means there is *no evidence* of difference, but there still might well be a difference.
+For example, see [@http://en.wikipedia.org/wiki/Argument_from_ignorance argument from ignorance] and
+[@http://www.bmj.com/cgi/content/full/311/7003/485 Absence of evidence does not constitute evidence of absence.]
+] [/ note]
+
+
+ // Needed includes:
+ #include <boost/math/distributions/students_t.hpp>
+ #include <iostream>
+ #include <iomanip>
+ // Bring everything into global namespace for ease of use:
+ using namespace boost::math;
+ using namespace std;
+
+ void single_sample_t_test(double M, double Sm, double Sd, unsigned Sn, double alpha)
+ {
+ //
+ // M = true mean.
+ // Sm = Sample Mean.
+ // Sd = Sample Standard Deviation.
+ // Sn = Sample Size.
+ // alpha = Significance Level.
+
+Most of the procedure is pretty-printing, so let's just focus on the
+calculation, we begin by calculating the t-statistic:
+
+ // Difference in means:
+ double diff = Sm - M;
+ // Degrees of freedom:
+ unsigned v = Sn - 1;
+ // t-statistic:
+ double t_stat = diff * sqrt(double(Sn)) / Sd;
+
+Finally calculate the probability from the t-statistic. If we're interested
+in simply whether there is a difference (either less or greater) or not,
+we don't care about the sign of the t-statistic,
+and we take the complement of the probability for comparison
+to the significance level:
+
+ students_t dist(v);
+ double q = cdf(complement(dist, fabs(t_stat)));
+
+The procedure then prints out the results of the various tests
+that can be done, these
+can be summarised in the following table:
+
+[table
+[[Hypothesis][Test]]
+[[The Null-hypothesis: there is
+*no difference* in means]
+[Reject if complement of CDF for |t| < significance level / 2:
+
+`cdf(complement(dist, fabs(t))) < alpha / 2`]]
+
+[[The Alternative-hypothesis: there
+*is difference* in means]
+[Reject if complement of CDF for |t| > significance level / 2:
+
+`cdf(complement(dist, fabs(t))) > alpha / 2`]]
+
+[[The Alternative-hypothesis: the sample mean *is less* than
+the true mean.]
+[Reject if CDF of t > 1 - significance level:
+
+`cdf(complement(dist, t)) < alpha`]]
+
+[[The Alternative-hypothesis: the sample mean *is greater* than
+the true mean.]
+[Reject if complement of CDF of t < significance level:
+
+`cdf(dist, t) < alpha`]]
+]
+
+[note
+Notice that the comparisons are against `alpha / 2` for a two-sided test
+and against `alpha` for a one-sided test]
+
+Now that we have all the parts in place, let's take a look at some
+sample output, first using the
+[@http://www.itl.nist.gov/div898/handbook/eda/section4/eda428.htm
+Heat flow data] from the NIST site. The data set was collected
+by Bob Zarr of NIST in January, 1990 from a heat flow meter
+calibration and stability analysis. The corresponding dataplot
+output for this test can be found in
+[@http://www.itl.nist.gov/div898/handbook/eda/section3/eda352.htm
+section 3.5.2] of the __handbook.
+
+[pre
+__________________________________
+Student t test for a single sample
+__________________________________
+
+Number of Observations = 195
+Sample Mean = 9.26146
+Sample Standard Deviation = 0.02279
+Expected True Mean = 5.00000
+
+Sample Mean - Expected Test Mean = 4.26146
+Degrees of Freedom = 194
+T Statistic = 2611.28380
+Probability that difference is due to chance = 0.000e+000
+
+Results for Alternative Hypothesis and alpha = 0.0500
+
+Alternative Hypothesis Conclusion
+Mean != 5.000 NOT REJECTED
+Mean < 5.000 REJECTED
+Mean > 5.000 NOT REJECTED
+]
+
+You will note the line that says the probability that the difference is
+due to chance is zero. From a philosophical point of view, of course,
+the probability can never reach zero. However, in this case the calculated
+probability is smaller than the smallest representable double precision number,
+hence the appearance of a zero here. Whatever its "true" value is, we know it
+must be extraordinarily small, so the alternative hypothesis - that there is
+a difference in means - is not rejected.
+
+For comparison the next example data output is taken from
+['P.K.Hou, O. W. Lau & M.C. Wong, Analyst (1983) vol. 108, p 64.
+and from Statistics for Analytical Chemistry, 3rd ed. (1994), pp 54-55
+J. C. Miller and J. N. Miller, Ellis Horwood ISBN 0 13 0309907.]
+The values result from the determination of mercury by cold-vapour
+atomic absorption.
+
+[pre
+__________________________________
+Student t test for a single sample
+__________________________________
+
+Number of Observations = 3
+Sample Mean = 37.80000
+Sample Standard Deviation = 0.96437
+Expected True Mean = 38.90000
+
+Sample Mean - Expected Test Mean = -1.10000
+Degrees of Freedom = 2
+T Statistic = -1.97566
+Probability that difference is due to chance = 1.869e-001
+
+Results for Alternative Hypothesis and alpha = 0.0500
+
+Alternative Hypothesis Conclusion
+Mean != 38.900 REJECTED
+Mean < 38.900 NOT REJECTED
+Mean > 38.900 NOT REJECTED
+]
+
+As you can see the small number of measurements (3) has led to a large uncertainty
+in the location of the true mean. So even though there appears to be a difference
+between the sample mean and the expected true mean, we conclude that there
+is no significant difference, and are unable to reject the null hypothesis.
+However, if we were to lower the bar for acceptance down to alpha = 0.1
+(a 90% confidence level) we see a different output:
+
+[pre
+__________________________________
+Student t test for a single sample
+__________________________________
+
+Number of Observations = 3
+Sample Mean = 37.80000
+Sample Standard Deviation = 0.96437
+Expected True Mean = 38.90000
+
+Sample Mean - Expected Test Mean = -1.10000
+Degrees of Freedom = 2
+T Statistic = -1.97566
+Probability that difference is due to chance = 1.869e-001
+
+Results for Alternative Hypothesis and alpha = 0.1000
+
+Alternative Hypothesis Conclusion
+Mean != 38.900 REJECTED
+Mean < 38.900 NOT REJECTED
+Mean > 38.900 REJECTED
+]
+
+In this case, we really have a borderline result,
+and more data (and/or more accurate data),
+is needed for a more convincing conclusion.
+
+[endsect]
+
+[section:tut_mean_size Estimating how large a sample size would have to become
+in order to give a significant Students-t test result with a single sample test]
+
+Imagine you have conducted a Students-t test on a single sample in order
+to check for systematic errors in your measurements. Imagine that the
+result is borderline. At this point one might go off and collect more data,
+but it might be prudent to first ask the question "How much more?".
+The parameter estimators of the students_t_distribution class
+can provide this information.
+
+This section is based on the example code in
+[@../../example/students_t_single_sample.cpp students_t_single_sample.cpp]
+and we begin by defining a procedure that will print out a table of
+estimated sample sizes for various confidence levels:
+
+ // Needed includes:
+ #include <boost/math/distributions/students_t.hpp>
+ #include <iostream>
+ #include <iomanip>
+ // Bring everything into global namespace for ease of use:
+ using namespace boost::math;
+ using namespace std;
+
+ void single_sample_find_df(
+ double M, // M = true mean.
+ double Sm, // Sm = Sample Mean.
+ double Sd) // Sd = Sample Standard Deviation.
+ {
+
+Next we define a table of significance levels:
+
+ double alpha[] = { 0.5, 0.25, 0.1, 0.05, 0.01, 0.001, 0.0001, 0.00001 };
+
+Printing out the table of sample sizes required for various confidence levels
+begins with the table header:
+
+ cout << "\n\n"
+ "_______________________________________________________________\n"
+ "Confidence Estimated Estimated\n"
+ " Value (%) Sample Size Sample Size\n"
+ " (one sided test) (two sided test)\n"
+ "_______________________________________________________________\n";
+
+
+And now the important part: the sample sizes required. Class
+`students_t_distribution` has a static member function
+`find_degrees_of_freedom` that will calculate how large
+a sample size needs to be in order to give a definitive result.
+
+The first argument is the difference between the means that you
+wish to be able to detect, here it's the absolute value of the
+difference between the sample mean, and the true mean.
+
+Then come two probability values: alpha and beta. Alpha is the
+maximum acceptable risk of rejecting the null-hypothesis when it is
+in fact true. Beta is the maximum acceptable risk of failing to reject
+the null-hypothesis when in fact it is false.
+Also note that for a two-sided test, alpha must be divided by 2.
+
+The final parameter of the function is the standard deviation of the sample.
+
+In this example, we assume that alpha and beta are the same, and call
+`find_degrees_of_freedom` twice: once with alpha for a one-sided test,
+and once with alpha/2 for a two-sided test.
+
+ for(unsigned i = 0; i < sizeof(alpha)/sizeof(alpha[0]); ++i)
+ {
+ // Confidence value:
+ cout << fixed << setprecision(3) << setw(10) << right << 100 * (1-alpha[i]);
+ // calculate df for single sided test:
+ double df = students_t::find_degrees_of_freedom(
+ fabs(M - Sm), alpha[i], alpha[i], Sd);
+ // convert to sample size:
+ double size = ceil(df) + 1;
+ // Print size:
+ cout << fixed << setprecision(0) << setw(16) << right << size;
+ // calculate df for two sided test:
+ df = students_t::find_degrees_of_freedom(
+ fabs(M - Sm), alpha[i]/2, alpha[i], Sd);
+ // convert to sample size:
+ size = ceil(df) + 1;
+ // Print size:
+ cout << fixed << setprecision(0) << setw(16) << right << size << endl;
+ }
+ cout << endl;
+ }
+
+Let's now look at some sample output using data taken from
+['P.K.Hou, O. W. Lau & M.C. Wong, Analyst (1983) vol. 108, p 64.
+and from Statistics for Analytical Chemistry, 3rd ed. (1994), pp 54-55
+J. C. Miller and J. N. Miller, Ellis Horwood ISBN 0 13 0309907.]
+The values result from the determination of mercury by cold-vapour
+atomic absorption.
+
+Only three measurements were made, and the Students-t test above
+gave a borderline result, so this example
+will show us how many samples would need to be collected:
+
+[pre'''
+_____________________________________________________________
+Estimated sample sizes required for various confidence levels
+_____________________________________________________________
+
+True Mean = 38.90000
+Sample Mean = 37.80000
+Sample Standard Deviation = 0.96437
+
+
+_______________________________________________________________
+Confidence Estimated Estimated
+ Value (%) Sample Size Sample Size
+ (one sided test) (two sided test)
+_______________________________________________________________
+ 75.000 3 4
+ 90.000 7 9
+ 95.000 11 13
+ 99.000 20 22
+ 99.900 35 37
+ 99.990 50 53
+ 99.999 66 68
+''']
+
+So in this case, many more measurements would have had to be made,
+for example at the 95% level, 14 measurements in total for a two-sided test.
+
+[endsect]
+[section:two_sample_students_t Comparing the means of two samples with the Students-t test]
+
+Imagine that we have two samples, and we wish to determine whether
+their means are different or not. This situation often arises when
+determining whether a new process or treatment is better than an old one.
+
+In this example, we'll be using the
+[@http://www.itl.nist.gov/div898/handbook/eda/section3/eda3531.htm
+Car Mileage sample data] from the
+[@http://www.itl.nist.gov NIST website]. The data compares
+miles per gallon of US cars with miles per gallon of Japanese cars.
+
+The sample code is in
+[@../../example/students_t_two_samples.cpp students_t_two_samples.cpp].
+
+There are two ways in which this test can be conducted: we can assume
+that the true standard deviations of the two samples are equal or not.
+If the standard deviations are assumed to be equal, then the calculation
+of the t-statistic is greatly simplified, so we'll examine that case first.
+In real life we should verify whether this assumption is valid with a
+Chi-Squared test for equal variances.
+
+We begin by defining a procedure that will conduct our test assuming equal
+variances:
+
+ // Needed headers:
+ #include <boost/math/distributions/students_t.hpp>
+ #include <iostream>
+ #include <iomanip>
+ // Simplify usage:
+ using namespace boost::math;
+ using namespace std;
+
+ void two_samples_t_test_equal_sd(
+ double Sm1, // Sm1 = Sample 1 Mean.
+ double Sd1, // Sd1 = Sample 1 Standard Deviation.
+ unsigned Sn1, // Sn1 = Sample 1 Size.
+ double Sm2, // Sm2 = Sample 2 Mean.
+ double Sd2, // Sd2 = Sample 2 Standard Deviation.
+ unsigned Sn2, // Sn2 = Sample 2 Size.
+ double alpha) // alpha = Significance Level.
+ {
+
+
+Our procedure will begin by calculating the t-statistic, assuming
+equal variances the needed formulae are:
+
+[equation dist_tutorial1]
+
+where Sp is the "pooled" standard deviation of the two samples,
+and /v/ is the number of degrees of freedom of the two combined
+samples. We can now write the code to calculate the t-statistic:
+
+ // Degrees of freedom:
+ double v = Sn1 + Sn2 - 2;
+ cout << setw(55) << left << "Degrees of Freedom" << "= " << v << "\n";
+ // Pooled variance:
+ double sp = sqrt(((Sn1-1) * Sd1 * Sd1 + (Sn2-1) * Sd2 * Sd2) / v);
+ cout << setw(55) << left << "Pooled Standard Deviation" << "= " << sp << "\n";
+ // t-statistic:
+ double t_stat = (Sm1 - Sm2) / (sp * sqrt(1.0 / Sn1 + 1.0 / Sn2));
+ cout << setw(55) << left << "T Statistic" << "= " << t_stat << "\n";
+
+The next step is to define our distribution object, and calculate the
+complement of the probability:
+
+ students_t dist(v);
+ double q = cdf(complement(dist, fabs(t_stat)));
+ cout << setw(55) << left << "Probability that difference is due to chance" << "= "
+ << setprecision(3) << scientific << 2 * q << "\n\n";
+
+Here we've used the absolute value of the t-statistic, because we initially
+want to know simply whether there is a difference or not (a two-sided test).
+However, we can also test whether the mean of the second sample is greater
+or is less (one-sided test) than that of the first:
+all the possible tests are summed up in the following table:
+
+[table
+[[Hypothesis][Test]]
+[[The Null-hypothesis: there is
+*no difference* in means]
+[Reject if complement of CDF for |t| < significance level / 2:
+
+`cdf(complement(dist, fabs(t))) < alpha / 2`]]
+
+[[The Alternative-hypothesis: there is a
+*difference* in means]
+[Reject if complement of CDF for |t| > significance level / 2:
+
+`cdf(complement(dist, fabs(t))) < alpha / 2`]]
+
+[[The Alternative-hypothesis: Sample 1 Mean is *less* than
+Sample 2 Mean.]
+[Reject if CDF of t > significance level:
+
+`cdf(dist, t) > alpha`]]
+
+[[The Alternative-hypothesis: Sample 1 Mean is *greater* than
+Sample 2 Mean.]
+
+[Reject if complement of CDF of t > significance level:
+
+`cdf(complement(dist, t)) > alpha`]]
+]
+
+[note
+For a two-sided test we must compare against alpha / 2 and not alpha.]
+
+Most of the rest of the sample program is pretty-printing, so we'll
+skip over that, and take a look at the sample output for alpha=0.05
+(a 95% probability level). For comparison the dataplot output
+for the same data is in
+[@http://www.itl.nist.gov/div898/handbook/eda/section3/eda353.htm
+section 1.3.5.3] of the __handbook.
+
+[pre'''
+ ________________________________________________
+ Student t test for two samples (equal variances)
+ ________________________________________________
+
+ Number of Observations (Sample 1) = 249
+ Sample 1 Mean = 20.145
+ Sample 1 Standard Deviation = 6.4147
+ Number of Observations (Sample 2) = 79
+ Sample 2 Mean = 30.481
+ Sample 2 Standard Deviation = 6.1077
+ Degrees of Freedom = 326
+ Pooled Standard Deviation = 6.3426
+ T Statistic = -12.621
+ Probability that difference is due to chance = 5.273e-030
+
+ Results for Alternative Hypothesis and alpha = 0.0500'''
+
+ Alternative Hypothesis Conclusion
+ Sample 1 Mean != Sample 2 Mean NOT REJECTED
+ Sample 1 Mean < Sample 2 Mean NOT REJECTED
+ Sample 1 Mean > Sample 2 Mean REJECTED
+]
+
+So with a probability that the difference is due to chance of just
+5.273e-030, we can safely conclude that there is indeed a difference.
+
+The tests on the alternative hypothesis show that we must
+also reject the hypothesis that Sample 1 Mean is
+greater than that for Sample 2: in this case Sample 1 represents the
+miles per gallon for Japanese cars, and Sample 2 the miles per gallon for
+US cars, so we conclude that Japanese cars are on average more
+fuel efficient.
+
+Now that we have the simple case out of the way, let's look for a moment
+at the more complex one: that the standard deviations of the two samples
+are not equal. In this case the formula for the t-statistic becomes:
+
+[equation dist_tutorial2]
+
+And for the combined degrees of freedom we use the
+[@http://en.wikipedia.org/wiki/Welch-Satterthwaite_equation Welch-Satterthwaite]
+approximation:
+
+[equation dist_tutorial3]
+
+Note that this is one of the rare situations where the degrees-of-freedom
+parameter to the Student's t distribution is a real number, and not an
+integer value.
+
+[note
+Some statistical packages truncate the effective degrees of freedom to
+an integer value: this may be necessary if you are relying on lookup tables,
+but since our code fully supports non-integer degrees of freedom there is no
+need to truncate in this case. Also note that when the degrees of freedom
+is small then the Welch-Satterthwaite approximation may be a significant
+source of error.]
+
+Putting these formulae into code we get:
+
+ // Degrees of freedom:
+ double v = Sd1 * Sd1 / Sn1 + Sd2 * Sd2 / Sn2;
+ v *= v;
+ double t1 = Sd1 * Sd1 / Sn1;
+ t1 *= t1;
+ t1 /= (Sn1 - 1);
+ double t2 = Sd2 * Sd2 / Sn2;
+ t2 *= t2;
+ t2 /= (Sn2 - 1);
+ v /= (t1 + t2);
+ cout << setw(55) << left << "Degrees of Freedom" << "= " << v << "\n";
+ // t-statistic:
+ double t_stat = (Sm1 - Sm2) / sqrt(Sd1 * Sd1 / Sn1 + Sd2 * Sd2 / Sn2);
+ cout << setw(55) << left << "T Statistic" << "= " << t_stat << "\n";
+
+Thereafter the code and the tests are performed the same as before. Using
+are car mileage data again, here's what the output looks like:
+
+[pre'''
+ __________________________________________________
+ Student t test for two samples (unequal variances)
+ __________________________________________________
+
+ Number of Observations (Sample 1) = 249
+ Sample 1 Mean = 20.145
+ Sample 1 Standard Deviation = 6.4147
+ Number of Observations (Sample 2) = 79
+ Sample 2 Mean = 30.481
+ Sample 2 Standard Deviation = 6.1077
+ Degrees of Freedom = 136.87
+ T Statistic = -12.946
+ Probability that difference is due to chance = 1.571e-025
+
+ Results for Alternative Hypothesis and alpha = 0.0500'''
+
+ Alternative Hypothesis Conclusion
+ Sample 1 Mean != Sample 2 Mean NOT REJECTED
+ Sample 1 Mean < Sample 2 Mean NOT REJECTED
+ Sample 1 Mean > Sample 2 Mean REJECTED
+]
+
+This time allowing the variances in the two samples to differ has yielded
+a higher likelihood that the observed difference is down to chance alone
+(1.571e-025 compared to 5.273e-030 when equal variances were assumed).
+However, the conclusion remains the same: US cars are less fuel efficient
+than Japanese models.
+
+[endsect]
+[section:paired_st Comparing two paired samples with the Student's t distribution]
+
+Imagine that we have a before and after reading for each item in the sample:
+for example we might have measured blood pressure before and after administration
+of a new drug. We can't pool the results and compare the means before and after
+the change, because each patient will have a different baseline reading.
+Instead we calculate the difference between before and after measurements
+in each patient, and calculate the mean and standard deviation of the differences.
+To test whether a significant change has taken place, we can then test
+the null-hypothesis that the true mean is zero using the same procedure
+we used in the single sample cases previously discussed.
+
+That means we can:
+
+* [link math_toolkit.stat_tut.weg.st_eg.tut_mean_intervals Calculate confidence intervals of the mean].
+If the endpoints of the interval differ in sign then we are unable to reject
+the null-hypothesis that there is no change.
+* [link math_toolkit.stat_tut.weg.st_eg.tut_mean_test Test whether the true mean is zero]. If the
+result is consistent with a true mean of zero, then we are unable to reject the
+null-hypothesis that there is no change.
+* [link math_toolkit.stat_tut.weg.st_eg.tut_mean_size Calculate how many pairs of readings we would need
+in order to obtain a significant result].
+
+[endsect]
+
+[endsect][/section:st_eg Student's t]
+
+[/
+ Copyright 2006, 2012 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distributions/triangular.qbk b/doc/distributions/triangular.qbk
new file mode 100644
index 0000000..59c6329
--- /dev/null
+++ b/doc/distributions/triangular.qbk
@@ -0,0 +1,179 @@
+[section:triangular_dist Triangular Distribution]
+
+
+``#include <boost/math/distributions/triangular.hpp>``
+
+ namespace boost{ namespace math{
+ template <class RealType = double,
+ class ``__Policy`` = ``__policy_class`` >
+ class triangular_distribution;
+
+ typedef triangular_distribution<> triangular;
+
+ template <class RealType, class ``__Policy``>
+ class triangular_distribution
+ {
+ public:
+ typedef RealType value_type;
+ typedef Policy policy_type;
+
+ triangular_distribution(RealType lower = -1, RealType mode = 0) RealType upper = 1); // Constructor.
+ : m_lower(lower), m_mode(mode), m_upper(upper) // Default is -1, 0, +1 symmetric triangular distribution.
+ // Accessor functions.
+ RealType lower()const;
+ RealType mode()const;
+ RealType upper()const;
+ }; // class triangular_distribution
+
+ }} // namespaces
+
+The [@http://en.wikipedia.org/wiki/Triangular_distribution triangular distribution]
+is a [@http://en.wikipedia.org/wiki/Continuous_distribution continuous]
+[@http://en.wikipedia.org/wiki/Probability_distribution probability distribution]
+with a lower limit a,
+[@http://en.wikipedia.org/wiki/Mode_%28statistics%29 mode c],
+and upper limit b.
+
+The triangular distribution is often used where the distribution is only vaguely known,
+but, like the [@http://en.wikipedia.org/wiki/Uniform_distribution_%28continuous%29 uniform distribution],
+upper and limits are 'known', but a 'best guess', the mode or center point, is also added.
+It has been recommended as a
+[@http://www.worldscibooks.com/mathematics/etextbook/5720/5720_chap1.pdf proxy for the beta distribution.]
+The distribution is used in business decision making and project planning.
+
+The [@http://en.wikipedia.org/wiki/Triangular_distribution triangular distribution]
+is a distribution with the
+[@http://en.wikipedia.org/wiki/Probability_density_function probability density function]:
+
+__spaces f(x) =
+
+* 2(x-a)/(b-a) (c-a) for a <= x <= c
+
+* 2(b-x)/(b-a)(b-c) for c < x <= b
+
+Parameter ['a] (lower) can be any finite value.
+Parameter ['b] (upper) can be any finite value > a (lower).
+Parameter ['c] (mode) a <= c <= b. This is the most probable value.
+
+The [@http://en.wikipedia.org/wiki/Random_variate random variate] x must also be finite, and is supported lower <= x <= upper.
+
+The triangular distribution may be appropriate when an assumption of a normal distribution
+is unjustified because uncertainty is caused by rounding and quantization from analog to digital conversion.
+Upper and lower limits are known, and the most probable value lies midway.
+
+The distribution simplifies when the 'best guess' is either the lower or upper limit - a 90 degree angle triangle.
+The 001 triangular distribution which expresses an estimate that the lowest value is the most likely;
+for example, you believe that the next-day quoted delivery date is most likely
+(knowing that a quicker delivery is impossible - the postman only comes once a day),
+and that longer delays are decreasingly likely,
+and delivery is assumed to never take more than your upper limit.
+
+The following graph illustrates how the
+[@http://en.wikipedia.org/wiki/Probability_density_function probability density function PDF]
+varies with the various parameters:
+
+[graph triangular_pdf]
+
+and cumulative distribution function
+
+[graph triangular_cdf]
+
+[h4 Member Functions]
+
+ triangular_distribution(RealType lower = 0, RealType mode = 0 RealType upper = 1);
+
+Constructs a [@http://en.wikipedia.org/wiki/triangular_distribution triangular distribution]
+with lower /lower/ (a) and upper /upper/ (b).
+
+Requires that the /lower/, /mode/ and /upper/ parameters are all finite,
+otherwise calls __domain_error.
+
+[warning These constructors are slightly different from the analogs provided by __Mathworld
+[@http://reference.wolfram.com/language/ref/TriangularDistribution.html Triangular distribution],
+where
+
+[^TriangularDistribution\[{min, max}\]] represents a [*symmetric] triangular statistical distribution giving values between min and max.[br]
+[^TriangularDistribution\[\]] represents a [*symmetric] triangular statistical distribution giving values between 0 and 1.[br]
+[^TriangularDistribution\[{min, max}, c\]] represents a triangular distribution with mode at c (usually [*asymmetric]).[br]
+
+So, for example, to compute a variance using __WolframAlpha, use
+[^N\[variance\[TriangularDistribution{1, +2}\], 50\]]
+]
+
+The parameters of a distribution can be obtained using these member functions:
+
+ RealType lower()const;
+
+Returns the ['lower] parameter of this distribution (default -1).
+
+ RealType mode()const;
+
+Returns the ['mode] parameter of this distribution (default 0).
+
+ RealType upper()const;
+
+Returns the ['upper] parameter of this distribution (default+1).
+
+[h4 Non-member Accessors]
+
+All the [link math_toolkit.dist_ref.nmp usual non-member accessor functions] that are generic to all
+distributions are supported: __usual_accessors.
+
+The domain of the random variable is \lower\ to \upper\,
+and the supported range is lower <= x <= upper.
+
+[h4 Accuracy]
+
+The triangular distribution is implemented with simple arithmetic operators and so should have errors within an epsilon or two,
+except quantiles with arguments nearing the extremes of zero and unity.
+
+[h4 Implementation]
+
+In the following table, a is the /lower/ parameter of the distribution,
+c is the /mode/ parameter,
+b is the /upper/ parameter,
+/x/ is the random variate, /p/ is the probability and /q = 1-p/.
+
+[table
+[[Function][Implementation Notes]]
+[[pdf][Using the relation: pdf = 0 for x < mode, 2(x-a)\/(b-a)(c-a) else 2*(b-x)\/((b-a)(b-c))]]
+[[cdf][Using the relation: cdf = 0 for x < mode (x-a)[super 2]\/((b-a)(c-a)) else 1 - (b-x)[super 2]\/((b-a)(b-c))]]
+[[cdf complement][Using the relation: q = 1 - p ]]
+[[quantile][let p0 = (c-a)\/(b-a) the point of inflection on the cdf,
+then given probability p and q = 1-p:
+
+x = sqrt((b-a)(c-a)p) + a ; for p < p0
+
+x = c ; for p == p0
+
+x = b - sqrt((b-a)(b-c)q) ; for p > p0
+
+(See [@../../../../boost/math/distributions/triangular.hpp /boost/math/distributions/triangular.hpp] for details.)]]
+[[quantile from the complement][As quantile (See [@../../../../boost/math/distributions/triangular.hpp /boost/math/distributions/triangular.hpp] for details.)]]
+[[mean][(a + b + 3) \/ 3 ]]
+[[variance][(a[super 2]+b[super 2]+c[super 2] - ab - ac - bc)\/18]]
+[[mode][c]]
+[[skewness][(See [@../../../../boost/math/distributions/triangular.hpp /boost/math/distributions/triangular.hpp] for details). ]]
+[[kurtosis][12\/5]]
+[[kurtosis excess][-3\/5]]
+]
+
+Some 'known good' test values were obtained using __WolframAlpha.
+
+[h4 References]
+
+* [@http://en.wikipedia.org/wiki/Triangular_distribution Wikpedia triangular distribution]
+* [@http://mathworld.wolfram.com/TriangularDistribution.html Weisstein, Eric W. "Triangular Distribution." From MathWorld--A Wolfram Web Resource.]
+* Evans, M.; Hastings, N.; and Peacock, B. "Triangular Distribution." Ch. 40 in Statistical Distributions, 3rd ed. New York: Wiley, pp. 187-188, 2000, ISBN - 0471371246.
+* [@http://www.measurement.sk/2002/S1/Wimmer2.pdf Gejza Wimmer, Viktor Witkovsky and Tomas Duby,
+Measurement Science Review, Volume 2, Section 1, 2002, Proper Rounding Of The Measurement Results Under The Assumption Of Triangular Distribution.]
+
+[endsect][/section:triangular_dist triangular]
+
+[/
+ Copyright 2006 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distributions/uniform.qbk b/doc/distributions/uniform.qbk
new file mode 100644
index 0000000..04e1e79
--- /dev/null
+++ b/doc/distributions/uniform.qbk
@@ -0,0 +1,134 @@
+[section:uniform_dist Uniform Distribution]
+
+
+``#include <boost/math/distributions/uniform.hpp>``
+
+ namespace boost{ namespace math{
+ template <class RealType = double,
+ class ``__Policy`` = ``__policy_class`` >
+ class uniform_distribution;
+
+ typedef uniform_distribution<> uniform;
+
+ template <class RealType, class ``__Policy``>
+ class uniform_distribution
+ {
+ public:
+ typedef RealType value_type;
+
+ uniform_distribution(RealType lower = 0, RealType upper = 1); // Constructor.
+ : m_lower(lower), m_upper(upper) // Default is standard uniform distribution.
+ // Accessor functions.
+ RealType lower()const;
+ RealType upper()const;
+ }; // class uniform_distribution
+
+ }} // namespaces
+
+The uniform distribution, also known as a rectangular distribution,
+is a probability distribution that has constant probability.
+
+The [@http://en.wikipedia.org/wiki/Uniform_distribution_%28continuous%29 continuous uniform distribution]
+is a distribution with the
+[@http://en.wikipedia.org/wiki/Probability_density_function probability density function]:
+
+f(x) =
+
+* 1 / (upper - lower) for lower < x < upper
+
+* zero for x < lower or x > upper
+
+and in this implementation:
+
+* 1 / (upper - lower) for x = lower or x = upper
+
+The choice of x = lower or x = upper is made because statistical use of this distribution judged is most likely:
+the method of maximum likelihood uses this definition.
+
+There is also a [@http://en.wikipedia.org/wiki/Discrete_uniform_distribution *discrete* uniform distribution].
+
+Parameters lower and upper can be any finite value.
+
+The [@http://en.wikipedia.org/wiki/Random_variate random variate]
+x must also be finite, and is supported lower <= x <= upper.
+
+The lower parameter is also called the
+[@http://www.itl.nist.gov/div898/handbook/eda/section3/eda364.htm location parameter],
+[@http://en.wikipedia.org/wiki/Location_parameter that is where the origin of a plot will lie],
+and (upper - lower) is also called the [@http://en.wikipedia.org/wiki/Scale_parameter scale parameter].
+
+The following graph illustrates how the
+[@http://en.wikipedia.org/wiki/Probability_density_function probability density function PDF]
+varies with the shape parameter:
+
+[graph uniform_pdf]
+
+Likewise for the CDF:
+
+[graph uniform_cdf]
+
+[h4 Member Functions]
+
+ uniform_distribution(RealType lower = 0, RealType upper = 1);
+
+Constructs a [@http://en.wikipedia.org/wiki/uniform_distribution
+uniform distribution] with lower /lower/ (a) and upper /upper/ (b).
+
+Requires that the /lower/ and /upper/ parameters are both finite;
+otherwise if infinity or NaN then calls __domain_error.
+
+ RealType lower()const;
+
+Returns the /lower/ parameter of this distribution.
+
+ RealType upper()const;
+
+Returns the /upper/ parameter of this distribution.
+
+[h4 Non-member Accessors]
+
+All the [link math_toolkit.dist_ref.nmp usual non-member accessor functions]
+that are generic to all distributions are supported: __usual_accessors.
+
+The domain of the random variable is any finite value,
+but the supported range is only /lower/ <= x <= /upper/.
+
+[h4 Accuracy]
+
+The uniform distribution is implemented with simple arithmetic operators and so should have errors within an epsilon or two.
+
+[h4 Implementation]
+
+In the following table a is the /lower/ parameter of the distribution,
+b is the /upper/ parameter,
+/x/ is the random variate, /p/ is the probability and /q = 1-p/.
+
+[table
+[[Function][Implementation Notes]]
+[[pdf][Using the relation: pdf = 0 for x < a, 1 / (b - a) for a <= x <= b, 0 for x > b ]]
+[[cdf][Using the relation: cdf = 0 for x < a, (x - a) / (b - a) for a <= x <= b, 1 for x > b]]
+[[cdf complement][Using the relation: q = 1 - p, (b - x) / (b - a) ]]
+[[quantile][Using the relation: x = p * (b - a) + a; ]]
+[[quantile from the complement][x = -q * (b - a) + b ]]
+[[mean][(a + b) / 2 ]]
+[[variance][(b - a) [super 2] / 12 ]]
+[[mode][any value in \[a, b\] but a is chosen. (Would NaN be better?) ]]
+[[skewness][0]]
+[[kurtosis excess][-6/5 = -1.2 exactly. (kurtosis - 3)]]
+[[kurtosis][9/5]]
+]
+
+[h4 References]
+* [@http://en.wikipedia.org/wiki/Uniform_distribution_%28continuous%29 Wikpedia continuous uniform distribution]
+* [@http://mathworld.wolfram.com/UniformDistribution.html Weisstein, Weisstein, Eric W. "Uniform Distribution." From MathWorld--A Wolfram Web Resource.]
+* [@http://www.itl.nist.gov/div898/handbook/eda/section3/eda3662.htm]
+
+[endsect][/section:uniform_dist Uniform]
+
+[/
+ Copyright 2006 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
+
diff --git a/doc/distributions/weibull.qbk b/doc/distributions/weibull.qbk
new file mode 100644
index 0000000..0d57776
--- /dev/null
+++ b/doc/distributions/weibull.qbk
@@ -0,0 +1,132 @@
+[section:weibull_dist Weibull Distribution]
+
+
+``#include <boost/math/distributions/weibull.hpp>``
+
+ namespace boost{ namespace math{
+
+ template <class RealType = double,
+ class ``__Policy`` = ``__policy_class`` >
+ class weibull_distribution;
+
+ typedef weibull_distribution<> weibull;
+
+ template <class RealType, class ``__Policy``>
+ class weibull_distribution
+ {
+ public:
+ typedef RealType value_type;
+ typedef Policy policy_type;
+ // Construct:
+ weibull_distribution(RealType shape, RealType scale = 1)
+ // Accessors:
+ RealType shape()const;
+ RealType scale()const;
+ };
+
+ }} // namespaces
+
+The [@http://en.wikipedia.org/wiki/Weibull_distribution Weibull distribution]
+is a continuous distribution
+with the
+[@http://en.wikipedia.org/wiki/Probability_density_function probability density function]:
+
+f(x; [alpha], [beta]) = ([alpha]\/[beta]) * (x \/ [beta])[super [alpha] - 1] * e[super -(x\/[beta])[super [alpha]]]
+
+For shape parameter [alpha][space] > 0, and scale parameter [beta][space] > 0, and x > 0.
+
+The Weibull distribution is often used in the field of failure analysis;
+in particular it can mimic distributions where the failure rate varies over time.
+If the failure rate is:
+
+* constant over time, then [alpha][space] = 1, suggests that items are failing from random events.
+* decreases over time, then [alpha][space] < 1, suggesting "infant mortality".
+* increases over time, then [alpha][space] > 1, suggesting "wear out" - more likely to fail as time goes by.
+
+The following graph illustrates how the PDF varies with the shape parameter [alpha]:
+
+[graph weibull_pdf1]
+
+While this graph illustrates how the PDF varies with the scale parameter [beta]:
+
+[graph weibull_pdf2]
+
+[h4 Related distributions]
+
+When [alpha][space] = 3, the
+[@http://en.wikipedia.org/wiki/Weibull_distribution Weibull distribution] appears similar to the
+[@http://en.wikipedia.org/wiki/Normal_distribution normal distribution].
+When [alpha][space] = 1, the Weibull distribution reduces to the
+[@http://en.wikipedia.org/wiki/Exponential_distribution exponential distribution].
+The relationship of the types of extreme value distributions, of which the Weibull is but one, is
+discussed by
+[@http://www.worldscibooks.com/mathematics/p191.html Extreme Value Distributions, Theory and Applications
+Samuel Kotz & Saralees Nadarajah].
+
+
+[h4 Member Functions]
+
+ weibull_distribution(RealType shape, RealType scale = 1);
+
+Constructs a [@http://en.wikipedia.org/wiki/Weibull_distribution
+Weibull distribution] with shape /shape/ and scale /scale/.
+
+Requires that the /shape/ and /scale/ parameters are both greater than zero,
+otherwise calls __domain_error.
+
+ RealType shape()const;
+
+Returns the /shape/ parameter of this distribution.
+
+ RealType scale()const;
+
+Returns the /scale/ parameter of this distribution.
+
+[h4 Non-member Accessors]
+
+All the [link math_toolkit.dist_ref.nmp usual non-member accessor functions] that are generic to all
+distributions are supported: __usual_accessors.
+
+The domain of the random variable is \[0, [infin]\].
+
+[h4 Accuracy]
+
+The Weibull distribution is implemented in terms of the
+standard library `log` and `exp` functions plus __expm1 and __log1p
+and as such should have very low error rates.
+
+[h4 Implementation]
+
+
+In the following table [alpha][space] is the shape parameter of the distribution,
+[beta][space] is its scale parameter, /x/ is the random variate, /p/ is the probability
+and /q = 1-p/.
+
+[table
+[[Function][Implementation Notes]]
+[[pdf][Using the relation: pdf = [alpha][beta][super -[alpha] ]x[super [alpha] - 1] e[super -(x/beta)[super alpha]] ]]
+[[cdf][Using the relation: p = -__expm1(-(x\/[beta])[super [alpha]]) ]]
+[[cdf complement][Using the relation: q = e[super -(x\/[beta])[super [alpha]]] ]]
+[[quantile][Using the relation: x = [beta] * (-__log1p(-p))[super 1\/[alpha]] ]]
+[[quantile from the complement][Using the relation: x = [beta] * (-log(q))[super 1\/[alpha]] ]]
+[[mean][[beta] * [Gamma](1 + 1\/[alpha]) ]]
+[[variance][[beta][super 2]([Gamma](1 + 2\/[alpha]) - [Gamma][super 2](1 + 1\/[alpha])) ]]
+[[mode][[beta](([alpha] - 1) \/ [alpha])[super 1\/[alpha]] ]]
+[[skewness][Refer to [@http://mathworld.wolfram.com/WeibullDistribution.html Weisstein, Eric W. "Weibull Distribution." From MathWorld--A Wolfram Web Resource.] ]]
+[[kurtosis][Refer to [@http://mathworld.wolfram.com/WeibullDistribution.html Weisstein, Eric W. "Weibull Distribution." From MathWorld--A Wolfram Web Resource.] ]]
+[[kurtosis excess][Refer to [@http://mathworld.wolfram.com/WeibullDistribution.html Weisstein, Eric W. "Weibull Distribution." From MathWorld--A Wolfram Web Resource.] ]]
+]
+
+[h4 References]
+* [@http://en.wikipedia.org/wiki/Weibull_distribution ]
+* [@http://mathworld.wolfram.com/WeibullDistribution.html Weisstein, Eric W. "Weibull Distribution." From MathWorld--A Wolfram Web Resource.]
+* [@http://www.itl.nist.gov/div898/handbook/eda/section3/eda3668.htm Weibull in NIST Exploratory Data Analysis]
+
+[endsect][/section:weibull Weibull]
+
+[/
+ Copyright 2006 John Maddock and Paul A. Bristow.
+ Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file LICENSE_1_0.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt).
+]
diff --git a/doc/equations/acosh1.mml b/doc/equations/acosh1.mml
new file mode 100644
index 0000000..0d420a0
--- /dev/null
+++ b/doc/equations/acosh1.mml
@@ -0,0 +1,39 @@
+<?xml version='1.0'?>
+<!DOCTYPE html PUBLIC '-//W3C//DTD XHTML 1.1 plus MathML 2.0//EN'
+ 'http://www.w3.org/TR/MathML2/dtd/xhtml-math11-f.dtd'
+ [<!ENTITY mathml 'http://www.w3.org/1998/Math/MathML'>]>
+<html xmlns='http://www.w3.org/1999/xhtml'>
+<head><title>acosh1</title>
+<!-- MathML created with MathCast Equation Editor version 0.89 -->
+</head>
+<body>
+<math xmlns="http://www.w3.org/1998/Math/MathML" display="block">
+ <mrow>
+ <mtext>acosh</mtext>
+ <mfenced>
+ <mrow>
+ <mi>x</mi>
+ </mrow>
+ </mfenced>
+ <mo>=</mo>
+ <mi>ln</mi>
+ <mfenced>
+ <mrow>
+ <mi>x</mi>
+ <mo>+</mo>
+ <msqrt>
+ <mrow>
+ <msup>
+ <mi>x</mi>
+ <mn>2</mn>
+ </msup>
+ <mo>−</mo>
+ <mn>1</mn>
+ </mrow>
+ </msqrt>
+ </mrow>
+ </mfenced>
+ </mrow>
+</math>
+</body>
+</html>