Squashed 'third_party/boostorg/move/' content from commit d503fbe
Change-Id: I5f8ac37161a1044b02ffb1f59cf15622fc6acd17
git-subtree-dir: third_party/boostorg/move
git-subtree-split: d503fbe1c8334fa8885e67cb83c96aeaf3938555
diff --git a/include/boost/move/algo/adaptive_merge.hpp b/include/boost/move/algo/adaptive_merge.hpp
new file mode 100644
index 0000000..4de4007
--- /dev/null
+++ b/include/boost/move/algo/adaptive_merge.hpp
@@ -0,0 +1,343 @@
+//////////////////////////////////////////////////////////////////////////////
+//
+// (C) Copyright Ion Gaztanaga 2015-2016.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+//
+// See http://www.boost.org/libs/move for documentation.
+//
+//////////////////////////////////////////////////////////////////////////////
+
+#ifndef BOOST_MOVE_ADAPTIVE_MERGE_HPP
+#define BOOST_MOVE_ADAPTIVE_MERGE_HPP
+
+#include <boost/move/detail/config_begin.hpp>
+#include <boost/move/algo/detail/adaptive_sort_merge.hpp>
+
+namespace boost {
+namespace movelib {
+
+///@cond
+namespace detail_adaptive {
+
+template<class RandIt, class Compare, class XBuf>
+inline void adaptive_merge_combine_blocks( RandIt first
+ , typename iterator_traits<RandIt>::size_type len1
+ , typename iterator_traits<RandIt>::size_type len2
+ , typename iterator_traits<RandIt>::size_type collected
+ , typename iterator_traits<RandIt>::size_type n_keys
+ , typename iterator_traits<RandIt>::size_type l_block
+ , bool use_internal_buf
+ , bool xbuf_used
+ , Compare comp
+ , XBuf & xbuf
+ )
+{
+ typedef typename iterator_traits<RandIt>::size_type size_type;
+ size_type const len = len1+len2;
+ size_type const l_combine = len-collected;
+ size_type const l_combine1 = len1-collected;
+
+ if(n_keys){
+ RandIt const first_data = first+collected;
+ RandIt const keys = first;
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT_L2(" A combine: ", len);
+ if(xbuf_used){
+ if(xbuf.size() < l_block){
+ xbuf.initialize_until(l_block, *first);
+ }
+ BOOST_ASSERT(xbuf.size() >= l_block);
+ size_type n_block_a, n_block_b, l_irreg1, l_irreg2;
+ combine_params( keys, comp, l_combine
+ , l_combine1, l_block, xbuf
+ , n_block_a, n_block_b, l_irreg1, l_irreg2); //Outputs
+ op_merge_blocks_with_buf
+ (keys, comp, first_data, l_block, l_irreg1, n_block_a, n_block_b, l_irreg2, comp, move_op(), xbuf.data());
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT_L1(" A mrg xbf: ", len);
+ }
+ else{
+ size_type n_block_a, n_block_b, l_irreg1, l_irreg2;
+ combine_params( keys, comp, l_combine
+ , l_combine1, l_block, xbuf
+ , n_block_a, n_block_b, l_irreg1, l_irreg2); //Outputs
+ if(use_internal_buf){
+ op_merge_blocks_with_buf
+ (keys, comp, first_data, l_block, l_irreg1, n_block_a, n_block_b, l_irreg2, comp, swap_op(), first_data-l_block);
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT_L2(" A mrg buf: ", len);
+ }
+ else{
+ merge_blocks_bufferless
+ (keys, comp, first_data, l_block, l_irreg1, n_block_a, n_block_b, l_irreg2, comp);
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT_L1(" A mrg nbf: ", len);
+ }
+ }
+ }
+ else{
+ xbuf.shrink_to_fit(l_block);
+ if(xbuf.size() < l_block){
+ xbuf.initialize_until(l_block, *first);
+ }
+ size_type *const uint_keys = xbuf.template aligned_trailing<size_type>(l_block);
+ size_type n_block_a, n_block_b, l_irreg1, l_irreg2;
+ combine_params( uint_keys, less(), l_combine
+ , l_combine1, l_block, xbuf
+ , n_block_a, n_block_b, l_irreg1, l_irreg2, true); //Outputs
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT_L2(" A combine: ", len);
+ BOOST_ASSERT(xbuf.size() >= l_block);
+ op_merge_blocks_with_buf
+ (uint_keys, less(), first, l_block, l_irreg1, n_block_a, n_block_b, l_irreg2, comp, move_op(), xbuf.data());
+ xbuf.clear();
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT_L1(" A mrg buf: ", len);
+ }
+}
+
+template<class RandIt, class Compare, class XBuf>
+inline void adaptive_merge_final_merge( RandIt first
+ , typename iterator_traits<RandIt>::size_type len1
+ , typename iterator_traits<RandIt>::size_type len2
+ , typename iterator_traits<RandIt>::size_type collected
+ , typename iterator_traits<RandIt>::size_type l_intbuf
+ , typename iterator_traits<RandIt>::size_type l_block
+ , bool use_internal_buf
+ , bool xbuf_used
+ , Compare comp
+ , XBuf & xbuf
+ )
+{
+ typedef typename iterator_traits<RandIt>::size_type size_type;
+ (void)l_block;
+ size_type n_keys = collected-l_intbuf;
+ size_type len = len1+len2;
+ if(use_internal_buf){
+ if(xbuf_used){
+ xbuf.clear();
+ //Nothing to do
+ if(n_keys){
+ unstable_sort(first, first+n_keys, comp, xbuf);
+ stable_merge(first, first+n_keys, first+len, comp, xbuf);
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT_L2(" A key mrg: ", len);
+ }
+ }
+ else{
+ xbuf.clear();
+ unstable_sort(first, first+collected, comp, xbuf);
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT_L2(" A k/b srt: ", len);
+ stable_merge(first, first+collected, first+len, comp, xbuf);
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT_L2(" A k/b mrg: ", len);
+ }
+ }
+ else{
+ xbuf.clear();
+ unstable_sort(first, first+collected, comp, xbuf);
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT_L2(" A k/b srt: ", len);
+ stable_merge(first, first+collected, first+len1+len2, comp, xbuf);
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT_L2(" A k/b mrg: ", len);
+ }
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT_L1(" A fin mrg: ", len);
+}
+
+template<class SizeType>
+inline static SizeType adaptive_merge_n_keys_without_external_keys(SizeType l_block, SizeType len1, SizeType len2, SizeType l_intbuf)
+{
+ typedef SizeType size_type;
+ //This is the minimum number of keys to implement the ideal algorithm
+ size_type n_keys = len1/l_block+len2/l_block;
+ const size_type second_half_blocks = len2/l_block;
+ const size_type first_half_aux = len1-l_intbuf;
+ while(n_keys >= ((first_half_aux-n_keys)/l_block + second_half_blocks)){
+ --n_keys;
+ }
+ ++n_keys;
+ return n_keys;
+}
+
+template<class SizeType>
+inline static SizeType adaptive_merge_n_keys_with_external_keys(SizeType l_block, SizeType len1, SizeType len2, SizeType l_intbuf)
+{
+ typedef SizeType size_type;
+ //This is the minimum number of keys to implement the ideal algorithm
+ size_type n_keys = (len1-l_intbuf)/l_block + len2/l_block;
+ return n_keys;
+}
+
+template<class SizeType, class Xbuf>
+inline SizeType adaptive_merge_n_keys_intbuf(SizeType &rl_block, SizeType len1, SizeType len2, Xbuf & xbuf, SizeType &l_intbuf_inout)
+{
+ typedef SizeType size_type;
+ size_type l_block = rl_block;
+ size_type l_intbuf = xbuf.capacity() >= l_block ? 0u : l_block;
+
+ while(xbuf.capacity() >= l_block*2){
+ l_block *= 2;
+ }
+
+ //This is the minimum number of keys to implement the ideal algorithm
+ size_type n_keys = adaptive_merge_n_keys_without_external_keys(l_block, len1, len2, l_intbuf);
+ BOOST_ASSERT(n_keys >= ((len1-l_intbuf-n_keys)/l_block + len2/l_block));
+
+ if(xbuf.template supports_aligned_trailing<size_type>
+ ( l_block
+ , adaptive_merge_n_keys_with_external_keys(l_block, len1, len2, l_intbuf)))
+ {
+ n_keys = 0u;
+ }
+ l_intbuf_inout = l_intbuf;
+ rl_block = l_block;
+ return n_keys;
+}
+
+// Main explanation of the merge algorithm.
+//
+// csqrtlen = ceil(sqrt(len));
+//
+// * First, csqrtlen [to be used as buffer] + (len/csqrtlen - 1) [to be used as keys] => to_collect
+// unique elements are extracted from elements to be sorted and placed in the beginning of the range.
+//
+// * Step "combine_blocks": the leading (len1-to_collect) elements plus trailing len2 elements
+// are merged with a non-trivial ("smart") algorithm to form an ordered range trailing "len-to_collect" elements.
+//
+// Explanation of the "combine_blocks" step:
+//
+// * Trailing [first+to_collect, first+len1) elements are divided in groups of cqrtlen elements.
+// Remaining elements that can't form a group are grouped in front of those elements.
+// * Trailing [first+len1, first+len1+len2) elements are divided in groups of cqrtlen elements.
+// Remaining elements that can't form a group are grouped in the back of those elements.
+// * In parallel the following two steps are performed:
+// * Groups are selection-sorted by first or last element (depending whether they are going
+// to be merged to left or right) and keys are reordered accordingly as an imitation-buffer.
+// * Elements of each block pair are merged using the csqrtlen buffer taking into account
+// if they belong to the first half or second half (marked by the key).
+//
+// * In the final merge step leading "to_collect" elements are merged with rotations
+// with the rest of merged elements in the "combine_blocks" step.
+//
+// Corner cases:
+//
+// * If no "to_collect" elements can be extracted:
+//
+// * If more than a minimum number of elements is extracted
+// then reduces the number of elements used as buffer and keys in the
+// and "combine_blocks" steps. If "combine_blocks" has no enough keys due to this reduction
+// then uses a rotation based smart merge.
+//
+// * If the minimum number of keys can't be extracted, a rotation-based merge is performed.
+//
+// * If auxiliary memory is more or equal than min(len1, len2), a buffered merge is performed.
+//
+// * If the len1 or len2 are less than 2*csqrtlen then a rotation-based merge is performed.
+//
+// * If auxiliary memory is more than csqrtlen+n_keys*sizeof(std::size_t),
+// then no csqrtlen need to be extracted and "combine_blocks" will use integral
+// keys to combine blocks.
+template<class RandIt, class Compare, class XBuf>
+void adaptive_merge_impl
+ ( RandIt first
+ , typename iterator_traits<RandIt>::size_type len1
+ , typename iterator_traits<RandIt>::size_type len2
+ , Compare comp
+ , XBuf & xbuf
+ )
+{
+ typedef typename iterator_traits<RandIt>::size_type size_type;
+
+ if(xbuf.capacity() >= min_value<size_type>(len1, len2)){
+ buffered_merge(first, first+len1, first+(len1+len2), comp, xbuf);
+ }
+ else{
+ const size_type len = len1+len2;
+ //Calculate ideal parameters and try to collect needed unique keys
+ size_type l_block = size_type(ceil_sqrt(len));
+
+ //One range is not big enough to extract keys and the internal buffer so a
+ //rotation-based based merge will do just fine
+ if(len1 <= l_block*2 || len2 <= l_block*2){
+ merge_bufferless(first, first+len1, first+len1+len2, comp);
+ return;
+ }
+
+ //Detail the number of keys and internal buffer. If xbuf has enough memory, no
+ //internal buffer is needed so l_intbuf will remain 0.
+ size_type l_intbuf = 0;
+ size_type n_keys = adaptive_merge_n_keys_intbuf(l_block, len1, len2, xbuf, l_intbuf);
+ size_type const to_collect = l_intbuf+n_keys;
+ //Try to extract needed unique values from the first range
+ size_type const collected = collect_unique(first, first+len1, to_collect, comp, xbuf);
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT_L1("\n A collect: ", len);
+
+ //Not the minimum number of keys is not available on the first range, so fallback to rotations
+ if(collected != to_collect && collected < 4){
+ merge_bufferless(first, first+collected, first+len1, comp);
+ merge_bufferless(first, first + len1, first + len1 + len2, comp);
+ return;
+ }
+
+ //If not enough keys but more than minimum, adjust the internal buffer and key count
+ bool use_internal_buf = collected == to_collect;
+ if (!use_internal_buf){
+ l_intbuf = 0u;
+ n_keys = collected;
+ l_block = lblock_for_combine(l_intbuf, n_keys, len, use_internal_buf);
+ //If use_internal_buf is false, then then internal buffer will be zero and rotation-based combination will be used
+ l_intbuf = use_internal_buf ? l_block : 0u;
+ }
+
+ bool const xbuf_used = collected == to_collect && xbuf.capacity() >= l_block;
+ //Merge trailing elements using smart merges
+ adaptive_merge_combine_blocks(first, len1, len2, collected, n_keys, l_block, use_internal_buf, xbuf_used, comp, xbuf);
+ //Merge buffer and keys with the rest of the values
+ adaptive_merge_final_merge (first, len1, len2, collected, l_intbuf, l_block, use_internal_buf, xbuf_used, comp, xbuf);
+ }
+}
+
+} //namespace detail_adaptive {
+
+///@endcond
+
+//! <b>Effects</b>: Merges two consecutive sorted ranges [first, middle) and [middle, last)
+//! into one sorted range [first, last) according to the given comparison function comp.
+//! The algorithm is stable (if there are equivalent elements in the original two ranges,
+//! the elements from the first range (preserving their original order) precede the elements
+//! from the second range (preserving their original order).
+//!
+//! <b>Requires</b>:
+//! - RandIt must meet the requirements of ValueSwappable and RandomAccessIterator.
+//! - The type of dereferenced RandIt must meet the requirements of MoveAssignable and MoveConstructible.
+//!
+//! <b>Parameters</b>:
+//! - first: the beginning of the first sorted range.
+//! - middle: the end of the first sorted range and the beginning of the second
+//! - last: the end of the second sorted range
+//! - comp: comparison function object which returns true if the first argument is is ordered before the second.
+//! - uninitialized, uninitialized_len: raw storage starting on "uninitialized", able to hold "uninitialized_len"
+//! elements of type iterator_traits<RandIt>::value_type. Maximum performance is achieved when uninitialized_len
+//! is min(std::distance(first, middle), std::distance(middle, last)).
+//!
+//! <b>Throws</b>: If comp throws or the move constructor, move assignment or swap of the type
+//! of dereferenced RandIt throws.
+//!
+//! <b>Complexity</b>: Always K x O(N) comparisons and move assignments/constructors/swaps.
+//! Constant factor for comparisons and data movement is minimized when uninitialized_len
+//! is min(std::distance(first, middle), std::distance(middle, last)).
+//! Pretty good enough performance is achieved when uninitialized_len is
+//! ceil(sqrt(std::distance(first, last)))*2.
+//!
+//! <b>Caution</b>: Experimental implementation, not production-ready.
+template<class RandIt, class Compare>
+void adaptive_merge( RandIt first, RandIt middle, RandIt last, Compare comp
+ , typename iterator_traits<RandIt>::value_type* uninitialized = 0
+ , std::size_t uninitialized_len = 0)
+{
+ typedef typename iterator_traits<RandIt>::size_type size_type;
+ typedef typename iterator_traits<RandIt>::value_type value_type;
+
+ ::boost::movelib::detail_adaptive::adaptive_xbuf<value_type> xbuf(uninitialized, uninitialized_len);
+ ::boost::movelib::detail_adaptive::adaptive_merge_impl(first, size_type(middle - first), size_type(last - middle), comp, xbuf);
+}
+
+} //namespace movelib {
+} //namespace boost {
+
+#include <boost/move/detail/config_end.hpp>
+
+#endif //#define BOOST_MOVE_ADAPTIVE_MERGE_HPP
diff --git a/include/boost/move/algo/adaptive_sort.hpp b/include/boost/move/algo/adaptive_sort.hpp
new file mode 100644
index 0000000..4f50bf1
--- /dev/null
+++ b/include/boost/move/algo/adaptive_sort.hpp
@@ -0,0 +1,631 @@
+//////////////////////////////////////////////////////////////////////////////
+//
+// (C) Copyright Ion Gaztanaga 2015-2016.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+//
+// See http://www.boost.org/libs/move for documentation.
+//
+//////////////////////////////////////////////////////////////////////////////
+
+#ifndef BOOST_MOVE_ADAPTIVE_SORT_HPP
+#define BOOST_MOVE_ADAPTIVE_SORT_HPP
+
+#include <boost/move/detail/config_begin.hpp>
+#include <boost/move/algo/detail/adaptive_sort_merge.hpp>
+
+namespace boost {
+namespace movelib {
+
+///@cond
+namespace detail_adaptive {
+
+template<class RandIt>
+void move_data_backward( RandIt cur_pos
+ , typename iterator_traits<RandIt>::size_type const l_data
+ , RandIt new_pos
+ , bool const xbuf_used)
+{
+ //Move buffer to the total combination right
+ if(xbuf_used){
+ boost::move_backward(cur_pos, cur_pos+l_data, new_pos+l_data);
+ }
+ else{
+ boost::adl_move_swap_ranges_backward(cur_pos, cur_pos+l_data, new_pos+l_data);
+ //Rotate does less moves but it seems slower due to cache issues
+ //rotate_gcd(first-l_block, first+len-l_block, first+len);
+ }
+}
+
+template<class RandIt>
+void move_data_forward( RandIt cur_pos
+ , typename iterator_traits<RandIt>::size_type const l_data
+ , RandIt new_pos
+ , bool const xbuf_used)
+{
+ //Move buffer to the total combination right
+ if(xbuf_used){
+ boost::move(cur_pos, cur_pos+l_data, new_pos);
+ }
+ else{
+ boost::adl_move_swap_ranges(cur_pos, cur_pos+l_data, new_pos);
+ //Rotate does less moves but it seems slower due to cache issues
+ //rotate_gcd(first-l_block, first+len-l_block, first+len);
+ }
+}
+
+// build blocks of length 2*l_build_buf. l_build_buf is power of two
+// input: [0, l_build_buf) elements are buffer, rest unsorted elements
+// output: [0, l_build_buf) elements are buffer, blocks 2*l_build_buf and last subblock sorted
+//
+// First elements are merged from right to left until elements start
+// at first. All old elements [first, first + l_build_buf) are placed at the end
+// [first+len-l_build_buf, first+len). To achieve this:
+// - If we have external memory to merge, we save elements from the buffer
+// so that a non-swapping merge is used. Buffer elements are restored
+// at the end of the buffer from the external memory.
+//
+// - When the external memory is not available or it is insufficient
+// for a merge operation, left swap merging is used.
+//
+// Once elements are merged left to right in blocks of l_build_buf, then a single left
+// to right merge step is performed to achieve merged blocks of size 2K.
+// If external memory is available, usual merge is used, swap merging otherwise.
+//
+// As a last step, if auxiliary memory is available in-place merge is performed.
+// until all is merged or auxiliary memory is not large enough.
+template<class RandIt, class Compare, class XBuf>
+typename iterator_traits<RandIt>::size_type
+ adaptive_sort_build_blocks
+ ( RandIt const first
+ , typename iterator_traits<RandIt>::size_type const len
+ , typename iterator_traits<RandIt>::size_type const l_base
+ , typename iterator_traits<RandIt>::size_type const l_build_buf
+ , XBuf & xbuf
+ , Compare comp)
+{
+ typedef typename iterator_traits<RandIt>::size_type size_type;
+ BOOST_ASSERT(l_build_buf <= len);
+ BOOST_ASSERT(0 == ((l_build_buf / l_base)&(l_build_buf/l_base-1)));
+
+ //Place the start pointer after the buffer
+ RandIt first_block = first + l_build_buf;
+ size_type const elements_in_blocks = len - l_build_buf;
+
+ //////////////////////////////////
+ // Start of merge to left step
+ //////////////////////////////////
+ size_type l_merged = 0u;
+
+ BOOST_ASSERT(l_build_buf);
+ //If there is no enough buffer for the insertion sort step, just avoid the external buffer
+ size_type kbuf = min_value<size_type>(l_build_buf, size_type(xbuf.capacity()));
+ kbuf = kbuf < l_base ? 0 : kbuf;
+
+ if(kbuf){
+ //Backup internal buffer values in external buffer so they can be overwritten
+ xbuf.move_assign(first+l_build_buf-kbuf, kbuf);
+ l_merged = op_insertion_sort_step_left(first_block, elements_in_blocks, l_base, comp, move_op());
+
+ //Now combine them using the buffer. Elements from buffer can be
+ //overwritten since they've been saved to xbuf
+ l_merged = op_merge_left_step_multiple
+ ( first_block - l_merged, elements_in_blocks, l_merged, l_build_buf, kbuf - l_merged, comp, move_op());
+
+ //Restore internal buffer from external buffer unless kbuf was l_build_buf,
+ //in that case restoration will happen later
+ if(kbuf != l_build_buf){
+ boost::move(xbuf.data()+kbuf-l_merged, xbuf.data() + kbuf, first_block-l_merged+elements_in_blocks);
+ }
+ }
+ else{
+ l_merged = insertion_sort_step(first_block, elements_in_blocks, l_base, comp);
+ rotate_gcd(first_block - l_merged, first_block, first_block+elements_in_blocks);
+ }
+
+ //Now combine elements using the buffer. Elements from buffer can't be
+ //overwritten since xbuf was not big enough, so merge swapping elements.
+ l_merged = op_merge_left_step_multiple
+ (first_block - l_merged, elements_in_blocks, l_merged, l_build_buf, l_build_buf - l_merged, comp, swap_op());
+
+ BOOST_ASSERT(l_merged == l_build_buf);
+
+ //////////////////////////////////
+ // Start of merge to right step
+ //////////////////////////////////
+
+ //If kbuf is l_build_buf then we can merge right without swapping
+ //Saved data is still in xbuf
+ if(kbuf && kbuf == l_build_buf){
+ op_merge_right_step_once(first, elements_in_blocks, l_build_buf, comp, move_op());
+ //Restore internal buffer from external buffer if kbuf was l_build_buf.
+ //as this operation was previously delayed.
+ boost::move(xbuf.data(), xbuf.data() + kbuf, first);
+ }
+ else{
+ op_merge_right_step_once(first, elements_in_blocks, l_build_buf, comp, swap_op());
+ }
+ xbuf.clear();
+ //2*l_build_buf or total already merged
+ return min_value(elements_in_blocks, 2*l_build_buf);
+}
+
+template<class RandItKeys, class KeyCompare, class RandIt, class Compare, class XBuf>
+void adaptive_sort_combine_blocks
+ ( RandItKeys const keys
+ , KeyCompare key_comp
+ , RandIt const first
+ , typename iterator_traits<RandIt>::size_type const len
+ , typename iterator_traits<RandIt>::size_type const l_prev_merged
+ , typename iterator_traits<RandIt>::size_type const l_block
+ , bool const use_buf
+ , bool const xbuf_used
+ , XBuf & xbuf
+ , Compare comp
+ , bool merge_left)
+{
+ (void)xbuf;
+ typedef typename iterator_traits<RandIt>::size_type size_type;
+
+ size_type const l_reg_combined = 2*l_prev_merged;
+ size_type l_irreg_combined = 0;
+ size_type const l_total_combined = calculate_total_combined(len, l_prev_merged, &l_irreg_combined);
+ size_type const n_reg_combined = len/l_reg_combined;
+ RandIt combined_first = first;
+
+ (void)l_total_combined;
+ BOOST_ASSERT(l_total_combined <= len);
+
+ size_type const max_i = n_reg_combined + (l_irreg_combined != 0);
+
+ if(merge_left || !use_buf) {
+ for( size_type combined_i = 0; combined_i != max_i; ++combined_i, combined_first += l_reg_combined) {
+ //Now merge blocks
+ bool const is_last = combined_i==n_reg_combined;
+ size_type const l_cur_combined = is_last ? l_irreg_combined : l_reg_combined;
+
+ range_xbuf<RandIt, move_op> rbuf( (use_buf && xbuf_used) ? (combined_first-l_block) : combined_first, combined_first);
+ size_type n_block_a, n_block_b, l_irreg1, l_irreg2;
+ combine_params( keys, key_comp, l_cur_combined
+ , l_prev_merged, l_block, rbuf
+ , n_block_a, n_block_b, l_irreg1, l_irreg2); //Outputs
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT_L2(" A combpar: ", len + l_block);
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(is_sorted(combined_first, combined_first + n_block_a*l_block+l_irreg1, comp));
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(is_sorted(combined_first + n_block_a*l_block+l_irreg1, combined_first + n_block_a*l_block+l_irreg1+n_block_b*l_block+l_irreg2, comp));
+ if(!use_buf){
+ merge_blocks_bufferless
+ (keys, key_comp, combined_first, l_block, 0u, n_block_a, n_block_b, l_irreg2, comp);
+ }
+ else{
+ merge_blocks_left
+ (keys, key_comp, combined_first, l_block, 0u, n_block_a, n_block_b, l_irreg2, comp, xbuf_used);
+ }
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT_L2(" After merge_blocks_L: ", len + l_block);
+ }
+ }
+ else{
+ combined_first += l_reg_combined*(max_i-1);
+ for( size_type combined_i = max_i; combined_i--; combined_first -= l_reg_combined) {
+ bool const is_last = combined_i==n_reg_combined;
+ size_type const l_cur_combined = is_last ? l_irreg_combined : l_reg_combined;
+
+ RandIt const combined_last(combined_first+l_cur_combined);
+ range_xbuf<RandIt, move_op> rbuf(combined_last, xbuf_used ? (combined_last+l_block) : combined_last);
+ size_type n_block_a, n_block_b, l_irreg1, l_irreg2;
+ combine_params( keys, key_comp, l_cur_combined
+ , l_prev_merged, l_block, rbuf
+ , n_block_a, n_block_b, l_irreg1, l_irreg2); //Outputs
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT_L2(" A combpar: ", len + l_block);
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(is_sorted(combined_first, combined_first + n_block_a*l_block+l_irreg1, comp));
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(is_sorted(combined_first + n_block_a*l_block+l_irreg1, combined_first + n_block_a*l_block+l_irreg1+n_block_b*l_block+l_irreg2, comp));
+ merge_blocks_right
+ (keys, key_comp, combined_first, l_block, n_block_a, n_block_b, l_irreg2, comp, xbuf_used);
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT_L2(" After merge_blocks_R: ", len + l_block);
+ }
+ }
+}
+
+//Returns true if buffer is placed in
+//[buffer+len-l_intbuf, buffer+len). Otherwise, buffer is
+//[buffer,buffer+l_intbuf)
+template<class RandIt, class Compare, class XBuf>
+bool adaptive_sort_combine_all_blocks
+ ( RandIt keys
+ , typename iterator_traits<RandIt>::size_type &n_keys
+ , RandIt const buffer
+ , typename iterator_traits<RandIt>::size_type const l_buf_plus_data
+ , typename iterator_traits<RandIt>::size_type l_merged
+ , typename iterator_traits<RandIt>::size_type &l_intbuf
+ , XBuf & xbuf
+ , Compare comp)
+{
+ typedef typename iterator_traits<RandIt>::size_type size_type;
+ RandIt const first = buffer + l_intbuf;
+ size_type const l_data = l_buf_plus_data - l_intbuf;
+ size_type const l_unique = l_intbuf+n_keys;
+ //Backup data to external buffer once if possible
+ bool const common_xbuf = l_data > l_merged && l_intbuf && l_intbuf <= xbuf.capacity();
+ if(common_xbuf){
+ xbuf.move_assign(buffer, l_intbuf);
+ }
+
+ bool prev_merge_left = true;
+ size_type l_prev_total_combined = l_merged, l_prev_block = 0;
+ bool prev_use_internal_buf = true;
+
+ for( size_type n = 0; l_data > l_merged
+ ; l_merged*=2
+ , ++n){
+ //If l_intbuf is non-zero, use that internal buffer.
+ // Implies l_block == l_intbuf && use_internal_buf == true
+ //If l_intbuf is zero, see if half keys can be reused as a reduced emergency buffer,
+ // Implies l_block == n_keys/2 && use_internal_buf == true
+ //Otherwise, just give up and and use all keys to merge using rotations (use_internal_buf = false)
+ bool use_internal_buf = false;
+ size_type const l_block = lblock_for_combine(l_intbuf, n_keys, 2*l_merged, use_internal_buf);
+ BOOST_ASSERT(!l_intbuf || (l_block == l_intbuf));
+ BOOST_ASSERT(n == 0 || (!use_internal_buf || prev_use_internal_buf) );
+ BOOST_ASSERT(n == 0 || (!use_internal_buf || l_prev_block == l_block) );
+
+ bool const is_merge_left = (n&1) == 0;
+ size_type const l_total_combined = calculate_total_combined(l_data, l_merged);
+ if(n && prev_use_internal_buf && prev_merge_left){
+ if(is_merge_left || !use_internal_buf){
+ move_data_backward(first-l_prev_block, l_prev_total_combined, first, common_xbuf);
+ }
+ else{
+ //Put the buffer just after l_total_combined
+ RandIt const buf_end = first+l_prev_total_combined;
+ RandIt const buf_beg = buf_end-l_block;
+ if(l_prev_total_combined > l_total_combined){
+ size_type const l_diff = l_prev_total_combined - l_total_combined;
+ move_data_backward(buf_beg-l_diff, l_diff, buf_end-l_diff, common_xbuf);
+ }
+ else if(l_prev_total_combined < l_total_combined){
+ size_type const l_diff = l_total_combined - l_prev_total_combined;
+ move_data_forward(buf_end, l_diff, buf_beg, common_xbuf);
+ }
+ }
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT_L2(" After move_data : ", l_data + l_intbuf);
+ }
+
+ //Combine to form l_merged*2 segments
+ if(n_keys){
+ size_type upper_n_keys_this_iter = 2*l_merged/l_block;
+ if(upper_n_keys_this_iter > 256){
+ adaptive_sort_combine_blocks
+ ( keys, comp, !use_internal_buf || is_merge_left ? first : first-l_block
+ , l_data, l_merged, l_block, use_internal_buf, common_xbuf, xbuf, comp, is_merge_left);
+ }
+ else{
+ unsigned char uint_keys[256];
+ adaptive_sort_combine_blocks
+ ( uint_keys, less(), !use_internal_buf || is_merge_left ? first : first-l_block
+ , l_data, l_merged, l_block, use_internal_buf, common_xbuf, xbuf, comp, is_merge_left);
+ }
+ }
+ else{
+ size_type *const uint_keys = xbuf.template aligned_trailing<size_type>();
+ adaptive_sort_combine_blocks
+ ( uint_keys, less(), !use_internal_buf || is_merge_left ? first : first-l_block
+ , l_data, l_merged, l_block, use_internal_buf, common_xbuf, xbuf, comp, is_merge_left);
+ }
+
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT_L1(is_merge_left ? " After comb blocks L: " : " After comb blocks R: ", l_data + l_intbuf);
+ prev_merge_left = is_merge_left;
+ l_prev_total_combined = l_total_combined;
+ l_prev_block = l_block;
+ prev_use_internal_buf = use_internal_buf;
+ }
+ BOOST_ASSERT(l_prev_total_combined == l_data);
+ bool const buffer_right = prev_use_internal_buf && prev_merge_left;
+
+ l_intbuf = prev_use_internal_buf ? l_prev_block : 0u;
+ n_keys = l_unique - l_intbuf;
+ //Restore data from to external common buffer if used
+ if(common_xbuf){
+ if(buffer_right){
+ boost::move(xbuf.data(), xbuf.data() + l_intbuf, buffer+l_data);
+ }
+ else{
+ boost::move(xbuf.data(), xbuf.data() + l_intbuf, buffer);
+ }
+ }
+ return buffer_right;
+}
+
+
+template<class RandIt, class Compare, class XBuf>
+void adaptive_sort_final_merge( bool buffer_right
+ , RandIt const first
+ , typename iterator_traits<RandIt>::size_type const l_intbuf
+ , typename iterator_traits<RandIt>::size_type const n_keys
+ , typename iterator_traits<RandIt>::size_type const len
+ , XBuf & xbuf
+ , Compare comp)
+{
+ //BOOST_ASSERT(n_keys || xbuf.size() == l_intbuf);
+ xbuf.clear();
+
+ typedef typename iterator_traits<RandIt>::size_type size_type;
+ size_type const n_key_plus_buf = l_intbuf+n_keys;
+ if(buffer_right){
+ //Use stable sort as some buffer elements might not be unique (see non_unique_buf)
+ stable_sort(first+len-l_intbuf, first+len, comp, xbuf);
+ stable_merge(first+n_keys, first+len-l_intbuf, first+len, antistable<Compare>(comp), xbuf);
+ unstable_sort(first, first+n_keys, comp, xbuf);
+ stable_merge(first, first+n_keys, first+len, comp, xbuf);
+ }
+ else{
+ //Use stable sort as some buffer elements might not be unique (see non_unique_buf)
+ stable_sort(first, first+n_key_plus_buf, comp, xbuf);
+ if(xbuf.capacity() >= n_key_plus_buf){
+ buffered_merge(first, first+n_key_plus_buf, first+len, comp, xbuf);
+ }
+ else if(xbuf.capacity() >= min_value<size_type>(l_intbuf, n_keys)){
+ stable_merge(first+n_keys, first+n_key_plus_buf, first+len, comp, xbuf);
+ stable_merge(first, first+n_keys, first+len, comp, xbuf);
+ }
+ else{
+ stable_merge(first, first+n_key_plus_buf, first+len, comp, xbuf);
+ }
+ }
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT_L1(" After final_merge : ", len);
+}
+
+template<class RandIt, class Compare, class Unsigned, class XBuf>
+bool adaptive_sort_build_params
+ (RandIt first, Unsigned const len, Compare comp
+ , Unsigned &n_keys, Unsigned &l_intbuf, Unsigned &l_base, Unsigned &l_build_buf
+ , XBuf & xbuf
+ )
+{
+ typedef Unsigned size_type;
+
+ //Calculate ideal parameters and try to collect needed unique keys
+ l_base = 0u;
+
+ //Try to find a value near sqrt(len) that is 2^N*l_base where
+ //l_base <= AdaptiveSortInsertionSortThreshold. This property is important
+ //as build_blocks merges to the left iteratively duplicating the
+ //merged size and all the buffer must be used just before the final
+ //merge to right step. This guarantees "build_blocks" produces
+ //segments of size l_build_buf*2, maximizing the classic merge phase.
+ l_intbuf = size_type(ceil_sqrt_multiple(len, &l_base));
+
+ //The internal buffer can be expanded if there is enough external memory
+ while(xbuf.capacity() >= l_intbuf*2){
+ l_intbuf *= 2;
+ }
+
+ //This is the minimum number of keys to implement the ideal algorithm
+ //
+ //l_intbuf is used as buffer plus the key count
+ size_type n_min_ideal_keys = l_intbuf-1;
+ while(n_min_ideal_keys >= (len-l_intbuf-n_min_ideal_keys)/l_intbuf){
+ --n_min_ideal_keys;
+ }
+ n_min_ideal_keys += 1;
+ BOOST_ASSERT(n_min_ideal_keys <= l_intbuf);
+
+ if(xbuf.template supports_aligned_trailing<size_type>(l_intbuf, (len-l_intbuf-1)/l_intbuf+1)){
+ n_keys = 0u;
+ l_build_buf = l_intbuf;
+ }
+ else{
+ //Try to achieve a l_build_buf of length l_intbuf*2, so that we can merge with that
+ //l_intbuf*2 buffer in "build_blocks" and use half of them as buffer and the other half
+ //as keys in combine_all_blocks. In that case n_keys >= n_min_ideal_keys but by a small margin.
+ //
+ //If available memory is 2*sqrt(l), then only sqrt(l) unique keys are needed,
+ //(to be used for keys in combine_all_blocks) as the whole l_build_buf
+ //will be backuped in the buffer during build_blocks.
+ bool const non_unique_buf = xbuf.capacity() >= l_intbuf;
+ size_type const to_collect = non_unique_buf ? n_min_ideal_keys : l_intbuf*2;
+ size_type collected = collect_unique(first, first+len, to_collect, comp, xbuf);
+
+ //If available memory is 2*sqrt(l), then for "build_params"
+ //the situation is the same as if 2*l_intbuf were collected.
+ if(non_unique_buf && collected == n_min_ideal_keys){
+ l_build_buf = l_intbuf;
+ n_keys = n_min_ideal_keys;
+ }
+ else if(collected == 2*l_intbuf){
+ //l_intbuf*2 elements found. Use all of them in the build phase
+ l_build_buf = l_intbuf*2;
+ n_keys = l_intbuf;
+ }
+ else if(collected == (n_min_ideal_keys+l_intbuf)){
+ l_build_buf = l_intbuf;
+ n_keys = n_min_ideal_keys;
+ }
+ //If collected keys are not enough, try to fix n_keys and l_intbuf. If no fix
+ //is possible (due to very low unique keys), then go to a slow sort based on rotations.
+ else{
+ BOOST_ASSERT(collected < (n_min_ideal_keys+l_intbuf));
+ if(collected < 4){ //No combination possible with less that 4 keys
+ return false;
+ }
+ n_keys = l_intbuf;
+ while(n_keys&(n_keys-1)){
+ n_keys &= n_keys-1; // make it power or 2
+ }
+ while(n_keys > collected){
+ n_keys/=2;
+ }
+ //AdaptiveSortInsertionSortThreshold is always power of two so the minimum is power of two
+ l_base = min_value<Unsigned>(n_keys, AdaptiveSortInsertionSortThreshold);
+ l_intbuf = 0;
+ l_build_buf = n_keys;
+ }
+ BOOST_ASSERT((n_keys+l_intbuf) >= l_build_buf);
+ }
+
+ return true;
+}
+
+// Main explanation of the sort algorithm.
+//
+// csqrtlen = ceil(sqrt(len));
+//
+// * First, 2*csqrtlen unique elements elements are extracted from elements to be
+// sorted and placed in the beginning of the range.
+//
+// * Step "build_blocks": In this nearly-classic merge step, 2*csqrtlen unique elements
+// will be used as auxiliary memory, so trailing len-2*csqrtlen elements are
+// are grouped in blocks of sorted 4*csqrtlen elements. At the end of the step
+// 2*csqrtlen unique elements are again the leading elements of the whole range.
+//
+// * Step "combine_blocks": pairs of previously formed blocks are merged with a different
+// ("smart") algorithm to form blocks of 8*csqrtlen elements. This step is slower than the
+// "build_blocks" step and repeated iteratively (forming blocks of 16*csqrtlen, 32*csqrtlen
+// elements, etc) of until all trailing (len-2*csqrtlen) elements are merged.
+//
+// In "combine_blocks" len/csqrtlen elements used are as "keys" (markers) to
+// know if elements belong to the first or second block to be merged and another
+// leading csqrtlen elements are used as buffer. Explanation of the "combine_blocks" step:
+//
+// Iteratively until all trailing (len-2*csqrtlen) elements are merged:
+// Iteratively for each pair of previously merged block:
+// * Blocks are divided groups of csqrtlen elements and
+// 2*merged_block/csqrtlen keys are sorted to be used as markers
+// * Groups are selection-sorted by first or last element (depending whether they are going
+// to be merged to left or right) and keys are reordered accordingly as an imitation-buffer.
+// * Elements of each block pair are merged using the csqrtlen buffer taking into account
+// if they belong to the first half or second half (marked by the key).
+//
+// * In the final merge step leading elements (2*csqrtlen) are sorted and merged with
+// rotations with the rest of sorted elements in the "combine_blocks" step.
+//
+// Corner cases:
+//
+// * If no 2*csqrtlen elements can be extracted:
+//
+// * If csqrtlen+len/csqrtlen are extracted, then only csqrtlen elements are used
+// as buffer in the "build_blocks" step forming blocks of 2*csqrtlen elements. This
+// means that an additional "combine_blocks" step will be needed to merge all elements.
+//
+// * If no csqrtlen+len/csqrtlen elements can be extracted, but still more than a minimum,
+// then reduces the number of elements used as buffer and keys in the "build_blocks"
+// and "combine_blocks" steps. If "combine_blocks" has no enough keys due to this reduction
+// then uses a rotation based smart merge.
+//
+// * If the minimum number of keys can't be extracted, a rotation-based sorting is performed.
+//
+// * If auxiliary memory is more or equal than ceil(len/2), half-copying mergesort is used.
+//
+// * If auxiliary memory is more than csqrtlen+n_keys*sizeof(std::size_t),
+// then only csqrtlen elements need to be extracted and "combine_blocks" will use integral
+// keys to combine blocks.
+//
+// * If auxiliary memory is available, the "build_blocks" will be extended to build bigger blocks
+// using classic merge and "combine_blocks" will use bigger blocks when merging.
+template<class RandIt, class Compare, class XBuf>
+void adaptive_sort_impl
+ ( RandIt first
+ , typename iterator_traits<RandIt>::size_type const len
+ , Compare comp
+ , XBuf & xbuf
+ )
+{
+ typedef typename iterator_traits<RandIt>::size_type size_type;
+
+ //Small sorts go directly to insertion sort
+ if(len <= size_type(AdaptiveSortInsertionSortThreshold)){
+ insertion_sort(first, first + len, comp);
+ }
+ else if((len-len/2) <= xbuf.capacity()){
+ merge_sort(first, first+len, comp, xbuf.data());
+ }
+ else{
+ //Make sure it is at least four
+ BOOST_STATIC_ASSERT(AdaptiveSortInsertionSortThreshold >= 4);
+
+ size_type l_base = 0;
+ size_type l_intbuf = 0;
+ size_type n_keys = 0;
+ size_type l_build_buf = 0;
+
+ //Calculate and extract needed unique elements. If a minimum is not achieved
+ //fallback to a slow stable sort
+ if(!adaptive_sort_build_params(first, len, comp, n_keys, l_intbuf, l_base, l_build_buf, xbuf)){
+ stable_sort(first, first+len, comp, xbuf);
+ }
+ else{
+ BOOST_ASSERT(l_build_buf);
+ //Otherwise, continue the adaptive_sort
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT_L1("\n After collect_unique: ", len);
+ size_type const n_key_plus_buf = l_intbuf+n_keys;
+ //l_build_buf is always power of two if l_intbuf is zero
+ BOOST_ASSERT(l_intbuf || (0 == (l_build_buf & (l_build_buf-1))));
+
+ //Classic merge sort until internal buffer and xbuf are exhausted
+ size_type const l_merged = adaptive_sort_build_blocks
+ (first+n_key_plus_buf-l_build_buf, len-n_key_plus_buf+l_build_buf, l_base, l_build_buf, xbuf, comp);
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT_L1(" After build_blocks: ", len);
+
+ //Non-trivial merge
+ bool const buffer_right = adaptive_sort_combine_all_blocks
+ (first, n_keys, first+n_keys, len-n_keys, l_merged, l_intbuf, xbuf, comp);
+
+ //Sort keys and buffer and merge the whole sequence
+ adaptive_sort_final_merge(buffer_right, first, l_intbuf, n_keys, len, xbuf, comp);
+ }
+ }
+}
+
+} //namespace detail_adaptive {
+
+///@endcond
+
+//! <b>Effects</b>: Sorts the elements in the range [first, last) in ascending order according
+//! to comparison functor "comp". The sort is stable (order of equal elements
+//! is guaranteed to be preserved). Performance is improved if additional raw storage is
+//! provided.
+//!
+//! <b>Requires</b>:
+//! - RandIt must meet the requirements of ValueSwappable and RandomAccessIterator.
+//! - The type of dereferenced RandIt must meet the requirements of MoveAssignable and MoveConstructible.
+//!
+//! <b>Parameters</b>:
+//! - first, last: the range of elements to sort
+//! - comp: comparison function object which returns true if the first argument is is ordered before the second.
+//! - uninitialized, uninitialized_len: raw storage starting on "uninitialized", able to hold "uninitialized_len"
+//! elements of type iterator_traits<RandIt>::value_type. Maximum performance is achieved when uninitialized_len
+//! is ceil(std::distance(first, last)/2).
+//!
+//! <b>Throws</b>: If comp throws or the move constructor, move assignment or swap of the type
+//! of dereferenced RandIt throws.
+//!
+//! <b>Complexity</b>: Always K x O(Nxlog(N)) comparisons and move assignments/constructors/swaps.
+//! Comparisons are close to minimum even with no additional memory. Constant factor for data movement is minimized
+//! when uninitialized_len is ceil(std::distance(first, last)/2). Pretty good enough performance is achieved when
+//! ceil(sqrt(std::distance(first, last)))*2.
+//!
+//! <b>Caution</b>: Experimental implementation, not production-ready.
+template<class RandIt, class RandRawIt, class Compare>
+void adaptive_sort( RandIt first, RandIt last, Compare comp
+ , RandRawIt uninitialized
+ , std::size_t uninitialized_len)
+{
+ typedef typename iterator_traits<RandIt>::size_type size_type;
+ typedef typename iterator_traits<RandIt>::value_type value_type;
+
+ ::boost::movelib::detail_adaptive::adaptive_xbuf<value_type, RandRawIt> xbuf(uninitialized, uninitialized_len);
+ ::boost::movelib::detail_adaptive::adaptive_sort_impl(first, size_type(last - first), comp, xbuf);
+}
+
+template<class RandIt, class Compare>
+void adaptive_sort( RandIt first, RandIt last, Compare comp)
+{
+ typedef typename iterator_traits<RandIt>::value_type value_type;
+ adaptive_sort(first, last, comp, (value_type*)0, 0u);
+}
+
+} //namespace movelib {
+} //namespace boost {
+
+#include <boost/move/detail/config_end.hpp>
+
+#endif //#define BOOST_MOVE_ADAPTIVE_SORT_HPP
diff --git a/include/boost/move/algo/detail/adaptive_sort_merge.hpp b/include/boost/move/algo/detail/adaptive_sort_merge.hpp
new file mode 100644
index 0000000..4c28086
--- /dev/null
+++ b/include/boost/move/algo/detail/adaptive_sort_merge.hpp
@@ -0,0 +1,1690 @@
+//////////////////////////////////////////////////////////////////////////////
+//
+// (C) Copyright Ion Gaztanaga 2015-2016.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+//
+// See http://www.boost.org/libs/move for documentation.
+//
+//////////////////////////////////////////////////////////////////////////////
+//
+// Stable sorting that works in O(N*log(N)) worst time
+// and uses O(1) extra memory
+//
+//////////////////////////////////////////////////////////////////////////////
+//
+// The main idea of the adaptive_sort algorithm was developed by Andrey Astrelin
+// and explained in the article from the russian collaborative blog
+// Habrahabr (http://habrahabr.ru/post/205290/). The algorithm is based on
+// ideas from B-C. Huang and M. A. Langston explained in their article
+// "Fast Stable Merging and Sorting in Constant Extra Space (1989-1992)"
+// (http://comjnl.oxfordjournals.org/content/35/6/643.full.pdf).
+//
+// This implementation by Ion Gaztanaga uses previous ideas with additional changes:
+//
+// - Use of GCD-based rotation.
+// - Non power of two buffer-sizes.
+// - Tries to find sqrt(len)*2 unique keys, so that the merge sort
+// phase can form up to sqrt(len)*4 segments if enough keys are found.
+// - The merge-sort phase can take advantage of external memory to
+// save some additional combination steps.
+// - Combination phase: Blocks are selection sorted and merged in parallel.
+// - The combination phase is performed alternating merge to left and merge
+// to right phases minimizing swaps due to internal buffer repositioning.
+// - When merging blocks special optimizations are made to avoid moving some
+// elements twice.
+//
+// The adaptive_merge algorithm was developed by Ion Gaztanaga reusing some parts
+// from the sorting algorithm and implementing an additional block merge algorithm
+// without moving elements to left or right.
+//////////////////////////////////////////////////////////////////////////////
+#ifndef BOOST_MOVE_ADAPTIVE_SORT_MERGE_HPP
+#define BOOST_MOVE_ADAPTIVE_SORT_MERGE_HPP
+
+#include <boost/move/detail/config_begin.hpp>
+#include <boost/move/detail/reverse_iterator.hpp>
+#include <boost/move/algo/move.hpp>
+#include <boost/move/algo/detail/merge.hpp>
+#include <boost/move/adl_move_swap.hpp>
+#include <boost/move/algo/detail/insertion_sort.hpp>
+#include <boost/move/algo/detail/merge_sort.hpp>
+#include <boost/move/algo/detail/heap_sort.hpp>
+#include <boost/move/algo/detail/merge.hpp>
+#include <boost/move/algo/detail/is_sorted.hpp>
+#include <boost/assert.hpp>
+#include <boost/cstdint.hpp>
+
+#ifndef BOOST_MOVE_ADAPTIVE_SORT_STATS_LEVEL
+ #define BOOST_MOVE_ADAPTIVE_SORT_STATS_LEVEL 1
+#endif
+
+#ifdef BOOST_MOVE_ADAPTIVE_SORT_STATS
+ #if BOOST_MOVE_ADAPTIVE_SORT_STATS_LEVEL == 2
+ #define BOOST_MOVE_ADAPTIVE_SORT_PRINT_L1(STR, L) \
+ print_stats(STR, L)\
+ //
+
+ #define BOOST_MOVE_ADAPTIVE_SORT_PRINT_L2(STR, L) \
+ print_stats(STR, L)\
+ //
+ #else
+ #define BOOST_MOVE_ADAPTIVE_SORT_PRINT_L1(STR, L) \
+ print_stats(STR, L)\
+ //
+
+ #define BOOST_MOVE_ADAPTIVE_SORT_PRINT_L2(STR, L)
+ #endif
+#else
+ #define BOOST_MOVE_ADAPTIVE_SORT_PRINT_L1(STR, L)
+ #define BOOST_MOVE_ADAPTIVE_SORT_PRINT_L2(STR, L)
+#endif
+
+#ifdef BOOST_MOVE_ADAPTIVE_SORT_INVARIANTS
+ #define BOOST_MOVE_ADAPTIVE_SORT_INVARIANT BOOST_ASSERT
+#else
+ #define BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(L)
+#endif
+
+namespace boost {
+namespace movelib {
+
+#if defined(BOOST_MOVE_ADAPTIVE_SORT_INVARIANTS)
+
+bool is_sorted(::order_perf_type *first, ::order_perf_type *last, ::order_type_less)
+{
+ if (first != last) {
+ const order_perf_type *next = first, *cur(first);
+ while (++next != last) {
+ if (!(cur->key < next->key || (cur->key == next->key && cur->val < next->val)))
+ return false;
+ cur = next;
+ }
+ }
+ return true;
+}
+
+#endif //BOOST_MOVE_ADAPTIVE_SORT_INVARIANTS
+
+namespace detail_adaptive {
+
+static const std::size_t AdaptiveSortInsertionSortThreshold = 16;
+//static const std::size_t AdaptiveSortInsertionSortThreshold = 4;
+BOOST_STATIC_ASSERT((AdaptiveSortInsertionSortThreshold&(AdaptiveSortInsertionSortThreshold-1)) == 0);
+
+#if defined BOOST_HAS_INTPTR_T
+ typedef ::boost::uintptr_t uintptr_t;
+#else
+ typedef std::size_t uintptr_t;
+#endif
+
+template<class T>
+const T &min_value(const T &a, const T &b)
+{
+ return a < b ? a : b;
+}
+
+template<class T>
+const T &max_value(const T &a, const T &b)
+{
+ return a > b ? a : b;
+}
+
+template<class ForwardIt, class Pred, class V>
+typename iterator_traits<ForwardIt>::size_type
+ count_if_with(ForwardIt first, ForwardIt last, Pred pred, const V &v)
+{
+ typedef typename iterator_traits<ForwardIt>::size_type size_type;
+ size_type count = 0;
+ while(first != last) {
+ count += static_cast<size_type>(0 != pred(*first, v));
+ ++first;
+ }
+ return count;
+}
+
+template<class T, class RandRawIt = T*>
+class adaptive_xbuf
+{
+ adaptive_xbuf(const adaptive_xbuf &);
+ adaptive_xbuf & operator=(const adaptive_xbuf &);
+
+ public:
+ typedef RandRawIt iterator;
+
+ adaptive_xbuf()
+ : m_ptr(), m_size(0), m_capacity(0)
+ {}
+
+ adaptive_xbuf(RandRawIt raw_memory, std::size_t capacity)
+ : m_ptr(raw_memory), m_size(0), m_capacity(capacity)
+ {}
+
+ template<class RandIt>
+ void move_assign(RandIt first, std::size_t n)
+ {
+ if(n <= m_size){
+ boost::move(first, first+n, m_ptr);
+ std::size_t size = m_size;
+ while(size-- != n){
+ m_ptr[size].~T();
+ }
+ m_size = n;
+ }
+ else{
+ RandRawIt result = boost::move(first, first+m_size, m_ptr);
+ boost::uninitialized_move(first+m_size, first+n, result);
+ m_size = n;
+ }
+ }
+
+ template<class RandIt>
+ void push_back(RandIt first, std::size_t n)
+ {
+ BOOST_ASSERT(m_capacity - m_size >= n);
+ boost::uninitialized_move(first, first+n, m_ptr+m_size);
+ m_size += n;
+ }
+
+ template<class RandIt>
+ iterator add(RandIt it)
+ {
+ BOOST_ASSERT(m_size < m_capacity);
+ RandRawIt p_ret = m_ptr + m_size;
+ ::new(&*p_ret) T(::boost::move(*it));
+ ++m_size;
+ return p_ret;
+ }
+
+ template<class RandIt>
+ void insert(iterator pos, RandIt it)
+ {
+ if(pos == (m_ptr + m_size)){
+ this->add(it);
+ }
+ else{
+ this->add(m_ptr+m_size-1);
+ //m_size updated
+ boost::move_backward(pos, m_ptr+m_size-2, m_ptr+m_size-1);
+ *pos = boost::move(*it);
+ }
+ }
+
+ void set_size(std::size_t size)
+ {
+ m_size = size;
+ }
+
+ void shrink_to_fit(std::size_t const size)
+ {
+ if(m_size > size){
+ for(std::size_t szt_i = size; szt_i != m_size; ++szt_i){
+ m_ptr[szt_i].~T();
+ }
+ m_size = size;
+ }
+ }
+
+ void initialize_until(std::size_t const size, T &t)
+ {
+ BOOST_ASSERT(m_size < m_capacity);
+ if(m_size < size){
+ ::new((void*)&m_ptr[m_size]) T(::boost::move(t));
+ ++m_size;
+ for(; m_size != size; ++m_size){
+ ::new((void*)&m_ptr[m_size]) T(::boost::move(m_ptr[m_size-1]));
+ }
+ t = ::boost::move(m_ptr[m_size-1]);
+ }
+ }
+
+ private:
+ template<class RIt>
+ static bool is_raw_ptr(RIt)
+ {
+ return false;
+ }
+
+ static bool is_raw_ptr(T*)
+ {
+ return true;
+ }
+
+ public:
+ template<class U>
+ bool supports_aligned_trailing(std::size_t size, std::size_t trail_count) const
+ {
+ if(this->is_raw_ptr(this->data()) && m_capacity){
+ uintptr_t u_addr_sz = uintptr_t(&*(this->data()+size));
+ uintptr_t u_addr_cp = uintptr_t(&*(this->data()+this->capacity()));
+ u_addr_sz = ((u_addr_sz + sizeof(U)-1)/sizeof(U))*sizeof(U);
+ return (u_addr_cp >= u_addr_sz) && ((u_addr_cp - u_addr_sz)/sizeof(U) >= trail_count);
+ }
+ return false;
+ }
+
+ template<class U>
+ U *aligned_trailing() const
+ {
+ return this->aligned_trailing<U>(this->size());
+ }
+
+ template<class U>
+ U *aligned_trailing(std::size_t pos) const
+ {
+ uintptr_t u_addr = uintptr_t(&*(this->data()+pos));
+ u_addr = ((u_addr + sizeof(U)-1)/sizeof(U))*sizeof(U);
+ return (U*)u_addr;
+ }
+
+ ~adaptive_xbuf()
+ {
+ this->clear();
+ }
+
+ std::size_t capacity() const
+ { return m_capacity; }
+
+ iterator data() const
+ { return m_ptr; }
+
+ iterator end() const
+ { return m_ptr+m_size; }
+
+ std::size_t size() const
+ { return m_size; }
+
+ bool empty() const
+ { return !m_size; }
+
+ void clear()
+ {
+ this->shrink_to_fit(0u);
+ }
+
+ private:
+ RandRawIt m_ptr;
+ std::size_t m_size;
+ std::size_t m_capacity;
+};
+
+template<class Iterator, class Op>
+class range_xbuf
+{
+ range_xbuf(const range_xbuf &);
+ range_xbuf & operator=(const range_xbuf &);
+
+ public:
+ typedef typename iterator_traits<Iterator>::size_type size_type;
+ typedef Iterator iterator;
+
+ range_xbuf(Iterator first, Iterator last)
+ : m_first(first), m_last(first), m_cap(last)
+ {}
+
+ template<class RandIt>
+ void move_assign(RandIt first, std::size_t n)
+ {
+ BOOST_ASSERT(size_type(n) <= size_type(m_cap-m_first));
+ m_last = Op()(forward_t(), first, first+n, m_first);
+ }
+
+ ~range_xbuf()
+ {}
+
+ std::size_t capacity() const
+ { return m_cap-m_first; }
+
+ Iterator data() const
+ { return m_first; }
+
+ Iterator end() const
+ { return m_last; }
+
+ std::size_t size() const
+ { return m_last-m_first; }
+
+ bool empty() const
+ { return m_first == m_last; }
+
+ void clear()
+ {
+ m_last = m_first;
+ }
+
+ template<class RandIt>
+ iterator add(RandIt it)
+ {
+ Iterator pos(m_last);
+ *pos = boost::move(*it);
+ ++m_last;
+ return pos;
+ }
+
+ void set_size(std::size_t size)
+ {
+ m_last = m_first;
+ m_last += size;
+ }
+
+ private:
+ Iterator const m_first;
+ Iterator m_last;
+ Iterator const m_cap;
+};
+
+
+template<class RandIt, class Compare>
+RandIt skip_until_merge
+ ( RandIt first1, RandIt const last1
+ , const typename iterator_traits<RandIt>::value_type &next_key, Compare comp)
+{
+ while(first1 != last1 && !comp(next_key, *first1)){
+ ++first1;
+ }
+ return first1;
+}
+
+
+template<class RandItKeys, class RandIt>
+void swap_and_update_key
+ ( RandItKeys const key_next
+ , RandItKeys const key_range2
+ , RandItKeys &key_mid
+ , RandIt const begin
+ , RandIt const end
+ , RandIt const with)
+{
+ if(begin != with){
+ ::boost::adl_move_swap_ranges(begin, end, with);
+ ::boost::adl_move_swap(*key_next, *key_range2);
+ if(key_next == key_mid){
+ key_mid = key_range2;
+ }
+ else if(key_mid == key_range2){
+ key_mid = key_next;
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+//
+// MERGE BUFFERLESS
+//
+///////////////////////////////////////////////////////////////////////////////
+
+// [first1, last1) merge [last1,last2) -> [first1,last2)
+template<class RandIt, class Compare>
+RandIt partial_merge_bufferless_impl
+ (RandIt first1, RandIt last1, RandIt const last2, bool *const pis_range1_A, Compare comp)
+{
+ if(last1 == last2){
+ return first1;
+ }
+ bool const is_range1_A = *pis_range1_A;
+ if(first1 != last1 && comp(*last1, last1[-1])){
+ do{
+ RandIt const old_last1 = last1;
+ last1 = boost::movelib::lower_bound(last1, last2, *first1, comp);
+ first1 = rotate_gcd(first1, old_last1, last1);//old_last1 == last1 supported
+ if(last1 == last2){
+ return first1;
+ }
+ do{
+ ++first1;
+ } while(last1 != first1 && !comp(*last1, *first1) );
+ } while(first1 != last1);
+ }
+ *pis_range1_A = !is_range1_A;
+ return last1;
+}
+
+// [first1, last1) merge [last1,last2) -> [first1,last2)
+template<class RandIt, class Compare>
+RandIt partial_merge_bufferless
+ (RandIt first1, RandIt last1, RandIt const last2, bool *const pis_range1_A, Compare comp)
+{
+ return *pis_range1_A ? partial_merge_bufferless_impl(first1, last1, last2, pis_range1_A, comp)
+ : partial_merge_bufferless_impl(first1, last1, last2, pis_range1_A, antistable<Compare>(comp));
+}
+
+template<class SizeType>
+static SizeType needed_keys_count(SizeType n_block_a, SizeType n_block_b)
+{
+ return n_block_a + n_block_b;
+}
+
+template<class RandItKeys, class KeyCompare, class RandIt, class Compare>
+typename iterator_traits<RandIt>::size_type
+ find_next_block
+ ( RandItKeys key_first
+ , KeyCompare key_comp
+ , RandIt const first
+ , typename iterator_traits<RandIt>::size_type const l_block
+ , typename iterator_traits<RandIt>::size_type const ix_first_block
+ , typename iterator_traits<RandIt>::size_type const ix_last_block
+ , Compare comp)
+{
+ typedef typename iterator_traits<RandIt>::size_type size_type;
+ typedef typename iterator_traits<RandIt>::value_type value_type;
+ typedef typename iterator_traits<RandItKeys>::value_type key_type;
+ BOOST_ASSERT(ix_first_block <= ix_last_block);
+ size_type ix_min_block = 0u;
+ for (size_type szt_i = ix_first_block; szt_i < ix_last_block; ++szt_i) {
+ const value_type &min_val = first[ix_min_block*l_block];
+ const value_type &cur_val = first[szt_i*l_block];
+ const key_type &min_key = key_first[ix_min_block];
+ const key_type &cur_key = key_first[szt_i];
+
+ bool const less_than_minimum = comp(cur_val, min_val) ||
+ (!comp(min_val, cur_val) && key_comp(cur_key, min_key));
+
+ if (less_than_minimum) {
+ ix_min_block = szt_i;
+ }
+ }
+ return ix_min_block;
+}
+
+template<class RandItKeys, class KeyCompare, class RandIt, class Compare>
+void merge_blocks_bufferless
+ ( RandItKeys key_first
+ , KeyCompare key_comp
+ , RandIt const first
+ , typename iterator_traits<RandIt>::size_type const l_block
+ , typename iterator_traits<RandIt>::size_type const l_irreg1
+ , typename iterator_traits<RandIt>::size_type const n_block_a
+ , typename iterator_traits<RandIt>::size_type const n_block_b
+ , typename iterator_traits<RandIt>::size_type const l_irreg2
+ , Compare comp)
+{
+ typedef typename iterator_traits<RandIt>::size_type size_type;
+ size_type const key_count = needed_keys_count(n_block_a, n_block_b); (void)key_count;
+ //BOOST_ASSERT(n_block_a || n_block_b);
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(boost::movelib::is_sorted_and_unique(key_first, key_first + key_count, key_comp));
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(!n_block_b || n_block_a == count_if_with(key_first, key_first + key_count, key_comp, key_first[n_block_a]));
+
+ size_type n_bef_irreg2 = 0;
+ bool l_irreg_pos_count = true;
+ RandItKeys key_mid(key_first + n_block_a);
+ RandIt const first_irr2 = first + l_irreg1 + (n_block_a+n_block_b)*l_block;
+ RandIt const last_irr2 = first_irr2 + l_irreg2;
+
+ { //Selection sort blocks
+ size_type n_block_left = n_block_b + n_block_a;
+ RandItKeys key_range2(key_first);
+
+ size_type min_check = n_block_a == n_block_left ? 0u : n_block_a;
+ size_type max_check = min_value(min_check+1, n_block_left);
+ for (RandIt f = first+l_irreg1; n_block_left; --n_block_left, ++key_range2, f += l_block, min_check -= min_check != 0, max_check -= max_check != 0) {
+ size_type const next_key_idx = find_next_block(key_range2, key_comp, f, l_block, min_check, max_check, comp);
+ RandItKeys const key_next(key_range2 + next_key_idx);
+ max_check = min_value(max_value(max_check, next_key_idx+2), n_block_left);
+
+ RandIt const first_min = f + next_key_idx*l_block;
+
+ //Check if irregular b block should go here.
+ //If so, break to the special code handling the irregular block
+ if (l_irreg_pos_count && l_irreg2 && comp(*first_irr2, *first_min)){
+ l_irreg_pos_count = false;
+ }
+ n_bef_irreg2 += l_irreg_pos_count;
+
+ swap_and_update_key(key_next, key_range2, key_mid, f, f + l_block, first_min);
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(is_sorted(f, f+l_block, comp));
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(is_sorted(first_min, first_min + l_block, comp));
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT((f == (first+l_irreg1)) || !comp(*f, *(f-l_block)));
+ }
+ }
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(is_sorted(first+l_irreg1+n_bef_irreg2*l_block, first_irr2, comp));
+
+ RandIt first1 = first;
+ RandIt last1 = first+l_irreg1;
+ RandItKeys const key_end (key_first+n_bef_irreg2);
+ bool is_range1_A = true;
+
+ for( ; key_first != key_end; ++key_first){
+ bool is_range2_A = key_mid == (key_first+key_count) || key_comp(*key_first, *key_mid);
+ first1 = is_range1_A == is_range2_A
+ ? last1 : partial_merge_bufferless(first1, last1, last1 + l_block, &is_range1_A, comp);
+ last1 += l_block;
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(is_sorted(first, first1, comp));
+ }
+
+ merge_bufferless(is_range1_A ? first1 : last1, first_irr2, last_irr2, comp);
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(is_sorted(first, last_irr2, comp));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+//
+// BUFFERED MERGE
+//
+///////////////////////////////////////////////////////////////////////////////
+template<class RandIt, class Compare, class Op, class Buf>
+void op_buffered_merge
+ ( RandIt first, RandIt const middle, RandIt last
+ , Compare comp, Op op
+ , Buf &xbuf)
+{
+ if(first != middle && middle != last && comp(*middle, middle[-1])){
+ typedef typename iterator_traits<RandIt>::size_type size_type;
+ size_type const len1 = size_type(middle-first);
+ size_type const len2 = size_type(last-middle);
+ if(len1 <= len2){
+ first = boost::movelib::upper_bound(first, middle, *middle, comp);
+ xbuf.move_assign(first, size_type(middle-first));
+ op_merge_with_right_placed
+ (xbuf.data(), xbuf.end(), first, middle, last, comp, op);
+ }
+ else{
+ last = boost::movelib::lower_bound(middle, last, middle[-1], comp);
+ xbuf.move_assign(middle, size_type(last-middle));
+ op_merge_with_left_placed
+ (first, middle, last, xbuf.data(), xbuf.end(), comp, op);
+ }
+ }
+}
+
+template<class RandIt, class Compare, class XBuf>
+void buffered_merge
+ ( RandIt first, RandIt const middle, RandIt last
+ , Compare comp
+ , XBuf &xbuf)
+{
+ op_buffered_merge(first, middle, last, comp, move_op(), xbuf);
+}
+
+// Complexity: 2*distance(first, last)+max_collected^2/2
+//
+// Tries to collect at most n_keys unique elements from [first, last),
+// in the begining of the range, and ordered according to comp
+//
+// Returns the number of collected keys
+template<class RandIt, class Compare, class XBuf>
+typename iterator_traits<RandIt>::size_type
+ collect_unique
+ ( RandIt const first, RandIt const last
+ , typename iterator_traits<RandIt>::size_type const max_collected, Compare comp
+ , XBuf & xbuf)
+{
+ typedef typename iterator_traits<RandIt>::size_type size_type;
+ size_type h = 0;
+ if(max_collected){
+ ++h; // first key is always here
+ RandIt h0 = first;
+ RandIt u = first; ++u;
+ RandIt search_end = u;
+
+ if(xbuf.capacity() >= max_collected){
+ typename XBuf::iterator const ph0 = xbuf.add(first);
+ while(u != last && h < max_collected){
+ typename XBuf::iterator const r = boost::movelib::lower_bound(ph0, xbuf.end(), *u, comp);
+ //If key not found add it to [h, h+h0)
+ if(r == xbuf.end() || comp(*u, *r) ){
+ RandIt const new_h0 = boost::move(search_end, u, h0);
+ search_end = u;
+ ++search_end;
+ ++h;
+ xbuf.insert(r, u);
+ h0 = new_h0;
+ }
+ ++u;
+ }
+ boost::move_backward(first, h0, h0+h);
+ boost::move(xbuf.data(), xbuf.end(), first);
+ }
+ else{
+ while(u != last && h < max_collected){
+ RandIt const r = boost::movelib::lower_bound(h0, search_end, *u, comp);
+ //If key not found add it to [h, h+h0)
+ if(r == search_end || comp(*u, *r) ){
+ RandIt const new_h0 = rotate_gcd(h0, search_end, u);
+ search_end = u;
+ ++search_end;
+ ++h;
+ rotate_gcd(r+(new_h0-h0), u, search_end);
+ h0 = new_h0;
+ }
+ ++u;
+ }
+ rotate_gcd(first, h0, h0+h);
+ }
+ }
+ return h;
+}
+
+template<class Unsigned>
+Unsigned floor_sqrt(Unsigned const n)
+{
+ Unsigned x = n;
+ Unsigned y = x/2 + (x&1);
+ while (y < x){
+ x = y;
+ y = (x + n / x)/2;
+ }
+ return x;
+}
+
+template<class Unsigned>
+Unsigned ceil_sqrt(Unsigned const n)
+{
+ Unsigned r = floor_sqrt(n);
+ return r + Unsigned((n%r) != 0);
+}
+
+template<class Unsigned>
+Unsigned floor_merge_multiple(Unsigned const n, Unsigned &base, Unsigned &pow)
+{
+ Unsigned s = n;
+ Unsigned p = 0;
+ while(s > AdaptiveSortInsertionSortThreshold){
+ s /= 2;
+ ++p;
+ }
+ base = s;
+ pow = p;
+ return s << p;
+}
+
+template<class Unsigned>
+Unsigned ceil_merge_multiple(Unsigned const n, Unsigned &base, Unsigned &pow)
+{
+ Unsigned fm = floor_merge_multiple(n, base, pow);
+
+ if(fm != n){
+ if(base < AdaptiveSortInsertionSortThreshold){
+ ++base;
+ }
+ else{
+ base = AdaptiveSortInsertionSortThreshold/2 + 1;
+ ++pow;
+ }
+ }
+ return base << pow;
+}
+
+template<class Unsigned>
+Unsigned ceil_sqrt_multiple(Unsigned const n, Unsigned *pbase = 0)
+{
+ Unsigned const r = ceil_sqrt(n);
+ Unsigned pow = 0;
+ Unsigned base = 0;
+ Unsigned const res = ceil_merge_multiple(r, base, pow);
+ if(pbase) *pbase = base;
+ return res;
+}
+
+struct less
+{
+ template<class T>
+ bool operator()(const T &l, const T &r)
+ { return l < r; }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+//
+// MERGE BLOCKS
+//
+///////////////////////////////////////////////////////////////////////////////
+
+//#define ADAPTIVE_SORT_MERGE_SLOW_STABLE_SORT_IS_NLOGN
+
+#if defined ADAPTIVE_SORT_MERGE_SLOW_STABLE_SORT_IS_NLOGN
+template<class RandIt, class Compare>
+void slow_stable_sort
+ ( RandIt const first, RandIt const last, Compare comp)
+{
+ boost::movelib::inplace_stable_sort(first, last, comp);
+}
+
+#else //ADAPTIVE_SORT_MERGE_SLOW_STABLE_SORT_IS_NLOGN
+
+template<class RandIt, class Compare>
+void slow_stable_sort
+ ( RandIt const first, RandIt const last, Compare comp)
+{
+ typedef typename iterator_traits<RandIt>::size_type size_type;
+ size_type L = size_type(last - first);
+ { //Use insertion sort to merge first elements
+ size_type m = 0;
+ while((L - m) > size_type(AdaptiveSortInsertionSortThreshold)){
+ insertion_sort(first+m, first+m+size_type(AdaptiveSortInsertionSortThreshold), comp);
+ m += AdaptiveSortInsertionSortThreshold;
+ }
+ insertion_sort(first+m, last, comp);
+ }
+
+ size_type h = AdaptiveSortInsertionSortThreshold;
+ for(bool do_merge = L > h; do_merge; h*=2){
+ do_merge = (L - h) > h;
+ size_type p0 = 0;
+ if(do_merge){
+ size_type const h_2 = 2*h;
+ while((L-p0) > h_2){
+ merge_bufferless(first+p0, first+p0+h, first+p0+h_2, comp);
+ p0 += h_2;
+ }
+ }
+ if((L-p0) > h){
+ merge_bufferless(first+p0, first+p0+h, last, comp);
+ }
+ }
+}
+
+#endif //ADAPTIVE_SORT_MERGE_SLOW_STABLE_SORT_IS_NLOGN
+
+//Returns new l_block and updates use_buf
+template<class Unsigned>
+Unsigned lblock_for_combine
+ (Unsigned const l_block, Unsigned const n_keys, Unsigned const l_data, bool &use_buf)
+{
+ BOOST_ASSERT(l_data > 1);
+
+ //We need to guarantee lblock >= l_merged/(n_keys/2) keys for the combination.
+ //We have at least 4 keys guaranteed (which are the minimum to merge 2 ranges)
+ //If l_block != 0, then n_keys is already enough to merge all blocks in all
+ //phases as we've found all needed keys for that buffer and length before.
+ //If l_block == 0 then see if half keys can be used as buffer and the rest
+ //as keys guaranteeing that n_keys >= (2*l_merged)/lblock =
+ if(!l_block){
+ //If l_block == 0 then n_keys is power of two
+ //(guaranteed by build_params(...))
+ BOOST_ASSERT(n_keys >= 4);
+ //BOOST_ASSERT(0 == (n_keys &(n_keys-1)));
+
+ //See if half keys are at least 4 and if half keys fulfill
+ Unsigned const new_buf = n_keys/2;
+ Unsigned const new_keys = n_keys-new_buf;
+ use_buf = new_keys >= 4 && new_keys >= l_data/new_buf;
+ if(use_buf){
+ return new_buf;
+ }
+ else{
+ return l_data/n_keys;
+ }
+ }
+ else{
+ use_buf = true;
+ return l_block;
+ }
+}
+
+template<class RandIt, class Compare, class XBuf>
+void stable_sort( RandIt first, RandIt last, Compare comp, XBuf & xbuf)
+{
+ typedef typename iterator_traits<RandIt>::size_type size_type;
+ size_type const len = size_type(last - first);
+ size_type const half_len = len/2 + (len&1);
+ if(std::size_t(xbuf.capacity() - xbuf.size()) >= half_len) {
+ merge_sort(first, last, comp, xbuf.data()+xbuf.size());
+ }
+ else{
+ slow_stable_sort(first, last, comp);
+ }
+}
+
+template<class RandIt, class Comp, class XBuf>
+void unstable_sort( RandIt first, RandIt last
+ , Comp comp
+ , XBuf & xbuf)
+{
+ heap_sort(first, last, comp);(void)xbuf;
+}
+
+template<class RandIt, class Compare, class XBuf>
+void stable_merge
+ ( RandIt first, RandIt const middle, RandIt last
+ , Compare comp
+ , XBuf &xbuf)
+{
+ BOOST_ASSERT(xbuf.empty());
+ typedef typename iterator_traits<RandIt>::size_type size_type;
+ size_type const len1 = size_type(middle-first);
+ size_type const len2 = size_type(last-middle);
+ size_type const l_min = min_value(len1, len2);
+ if(xbuf.capacity() >= l_min){
+ buffered_merge(first, middle, last, comp, xbuf);
+ xbuf.clear();
+ }
+ else{
+ merge_bufferless(first, middle, last, comp);
+ }
+}
+
+template<class RandIt, class Comp, class XBuf>
+void initialize_keys( RandIt first, RandIt last
+ , Comp comp
+ , XBuf & xbuf)
+{
+ unstable_sort(first, last, comp, xbuf);
+ BOOST_ASSERT(boost::movelib::is_sorted_and_unique(first, last, comp));
+}
+
+template<class RandIt, class U>
+void initialize_keys( RandIt first, RandIt last
+ , less
+ , U &)
+{
+ typedef typename iterator_traits<RandIt>::value_type value_type;
+ std::size_t count = std::size_t(last - first);
+ for(std::size_t i = 0; i != count; ++i){
+ *first = static_cast<value_type>(i);
+ ++first;
+ }
+}
+
+template <class Unsigned>
+Unsigned calculate_total_combined(Unsigned const len, Unsigned const l_prev_merged, Unsigned *pl_irreg_combined = 0)
+{
+ typedef Unsigned size_type;
+
+ size_type const l_combined = 2*l_prev_merged;
+ size_type l_irreg_combined = len%l_combined;
+ size_type l_total_combined = len;
+ if(l_irreg_combined <= l_prev_merged){
+ l_total_combined -= l_irreg_combined;
+ l_irreg_combined = 0;
+ }
+ if(pl_irreg_combined)
+ *pl_irreg_combined = l_irreg_combined;
+ return l_total_combined;
+}
+
+template<class RandItKeys, class KeyCompare, class SizeType, class XBuf>
+void combine_params
+ ( RandItKeys const keys
+ , KeyCompare key_comp
+ , SizeType l_combined
+ , SizeType const l_prev_merged
+ , SizeType const l_block
+ , XBuf & xbuf
+ //Output
+ , SizeType &n_block_a
+ , SizeType &n_block_b
+ , SizeType &l_irreg1
+ , SizeType &l_irreg2
+ //Options
+ , bool do_initialize_keys = true)
+{
+ typedef SizeType size_type;
+
+ //Initial parameters for selection sort blocks
+ l_irreg1 = l_prev_merged%l_block;
+ l_irreg2 = (l_combined-l_irreg1)%l_block;
+ BOOST_ASSERT(((l_combined-l_irreg1-l_irreg2)%l_block) == 0);
+ size_type const n_reg_block = (l_combined-l_irreg1-l_irreg2)/l_block;
+ n_block_a = l_prev_merged/l_block;
+ n_block_b = n_reg_block - n_block_a;
+ BOOST_ASSERT(n_reg_block>=n_block_a);
+
+ //Key initialization
+ if (do_initialize_keys) {
+ initialize_keys(keys, keys + needed_keys_count(n_block_a, n_block_b), key_comp, xbuf);
+ }
+}
+
+
+
+//////////////////////////////////
+//
+// partial_merge
+//
+//////////////////////////////////
+template<class InputIt1, class InputIt2, class OutputIt, class Compare, class Op>
+OutputIt op_partial_merge_impl
+ (InputIt1 &r_first1, InputIt1 const last1, InputIt2 &r_first2, InputIt2 const last2, OutputIt d_first, Compare comp, Op op)
+{
+ InputIt1 first1(r_first1);
+ InputIt2 first2(r_first2);
+ if(first2 != last2 && last1 != first1)
+ while(1){
+ if(comp(*first2, *first1)) {
+ op(first2++, d_first++);
+ if(first2 == last2){
+ break;
+ }
+ }
+ else{
+ op(first1++, d_first++);
+ if(first1 == last1){
+ break;
+ }
+ }
+ }
+ r_first1 = first1;
+ r_first2 = first2;
+ return d_first;
+}
+
+template<class InputIt1, class InputIt2, class OutputIt, class Compare, class Op>
+OutputIt op_partial_merge
+ (InputIt1 &r_first1, InputIt1 const last1, InputIt2 &r_first2, InputIt2 const last2, OutputIt d_first, Compare comp, Op op, bool is_stable)
+{
+ return is_stable ? op_partial_merge_impl(r_first1, last1, r_first2, last2, d_first, comp, op)
+ : op_partial_merge_impl(r_first1, last1, r_first2, last2, d_first, antistable<Compare>(comp), op);
+}
+
+//////////////////////////////////
+//////////////////////////////////
+//////////////////////////////////
+//
+// op_partial_merge_and_save
+//
+//////////////////////////////////
+//////////////////////////////////
+//////////////////////////////////
+template<class InputIt1, class InputIt2, class OutputIt, class Compare, class Op>
+OutputIt op_partial_merge_and_swap_impl
+ (InputIt1 &r_first1, InputIt1 const last1, InputIt2 &r_first2, InputIt2 const last2, InputIt2 &r_first_min, OutputIt d_first, Compare comp, Op op)
+{
+ InputIt1 first1(r_first1);
+ InputIt2 first2(r_first2);
+
+ if(first2 != last2 && last1 != first1) {
+ InputIt2 first_min(r_first_min);
+ bool non_empty_ranges = true;
+ do{
+ if(comp(*first_min, *first1)) {
+ op(three_way_t(), first2++, first_min++, d_first++);
+ non_empty_ranges = first2 != last2;
+ }
+ else{
+ op(first1++, d_first++);
+ non_empty_ranges = first1 != last1;
+ }
+ } while(non_empty_ranges);
+ r_first_min = first_min;
+ r_first1 = first1;
+ r_first2 = first2;
+ }
+ return d_first;
+}
+
+template<class RandIt, class InputIt2, class OutputIt, class Compare, class Op>
+OutputIt op_partial_merge_and_swap
+ (RandIt &r_first1, RandIt const last1, InputIt2 &r_first2, InputIt2 const last2, InputIt2 &r_first_min, OutputIt d_first, Compare comp, Op op, bool is_stable)
+{
+ return is_stable ? op_partial_merge_and_swap_impl(r_first1, last1, r_first2, last2, r_first_min, d_first, comp, op)
+ : op_partial_merge_and_swap_impl(r_first1, last1, r_first2, last2, r_first_min, d_first, antistable<Compare>(comp), op);
+}
+
+template<class RandIt1, class RandIt2, class RandItB, class Compare, class Op>
+RandItB op_buffered_partial_merge_and_swap_to_range1_and_buffer
+ ( RandIt1 first1, RandIt1 const last1
+ , RandIt2 &rfirst2, RandIt2 const last2, RandIt2 &rfirst_min
+ , RandItB &rfirstb, Compare comp, Op op )
+{
+ RandItB firstb = rfirstb;
+ RandItB lastb = firstb;
+ RandIt2 first2 = rfirst2;
+
+ //Move to buffer while merging
+ //Three way moves need less moves when op is swap_op so use it
+ //when merging elements from range2 to the destination occupied by range1
+ if(first1 != last1 && first2 != last2){
+ RandIt2 first_min = rfirst_min;
+ op(four_way_t(), first2++, first_min++, first1++, lastb++);
+
+ while(first1 != last1){
+ if(first2 == last2){
+ lastb = op(forward_t(), first1, last1, firstb);
+ break;
+ }
+
+ if(comp(*first_min, *firstb)){
+ op( four_way_t(), first2++, first_min++, first1++, lastb++);
+ }
+ else{
+ op(three_way_t(), firstb++, first1++, lastb++);
+ }
+ }
+ rfirst2 = first2;
+ rfirstb = firstb;
+ rfirst_min = first_min;
+ }
+
+ return lastb;
+}
+
+template<class RandIt1, class RandIt2, class RandItB, class Compare, class Op>
+RandItB op_buffered_partial_merge_to_range1_and_buffer
+ ( RandIt1 first1, RandIt1 const last1
+ , RandIt2 &rfirst2, RandIt2 const last2
+ , RandItB &rfirstb, Compare comp, Op op )
+{
+ RandItB firstb = rfirstb;
+ RandItB lastb = firstb;
+ RandIt2 first2 = rfirst2;
+
+ //Move to buffer while merging
+ //Three way moves need less moves when op is swap_op so use it
+ //when merging elements from range2 to the destination occupied by range1
+ if(first1 != last1 && first2 != last2){
+ op(three_way_t(), first2++, first1++, lastb++);
+
+ while(true){
+ if(first1 == last1){
+ break;
+ }
+ if(first2 == last2){
+ lastb = op(forward_t(), first1, last1, firstb);
+ break;
+ }
+ if (comp(*first2, *firstb)) {
+ op(three_way_t(), first2++, first1++, lastb++);
+ }
+ else {
+ op(three_way_t(), firstb++, first1++, lastb++);
+ }
+ }
+ rfirst2 = first2;
+ rfirstb = firstb;
+ }
+
+ return lastb;
+}
+
+template<class RandIt, class RandItBuf, class Compare, class Op>
+RandIt op_partial_merge_and_save_impl
+ ( RandIt first1, RandIt const last1, RandIt &rfirst2, RandIt last2, RandIt first_min
+ , RandItBuf &buf_first1_in_out, RandItBuf &buf_last1_in_out
+ , Compare comp, Op op
+ )
+{
+ RandItBuf buf_first1 = buf_first1_in_out;
+ RandItBuf buf_last1 = buf_last1_in_out;
+ RandIt first2(rfirst2);
+
+ bool const do_swap = first2 != first_min;
+ if(buf_first1 == buf_last1){
+ //Skip any element that does not need to be moved
+ RandIt new_first1 = skip_until_merge(first1, last1, *first_min, comp);
+ buf_first1 += (new_first1-first1);
+ first1 = new_first1;
+ buf_last1 = do_swap ? op_buffered_partial_merge_and_swap_to_range1_and_buffer(first1, last1, first2, last2, first_min, buf_first1, comp, op)
+ : op_buffered_partial_merge_to_range1_and_buffer (first1, last1, first2, last2, buf_first1, comp, op);
+ first1 = last1;
+ }
+ else{
+ BOOST_ASSERT((last1-first1) == (buf_last1 - buf_first1));
+ }
+
+ //Now merge from buffer
+ first1 = do_swap ? op_partial_merge_and_swap_impl(buf_first1, buf_last1, first2, last2, first_min, first1, comp, op)
+ : op_partial_merge_impl (buf_first1, buf_last1, first2, last2, first1, comp, op);
+ buf_first1_in_out = buf_first1;
+ buf_last1_in_out = buf_last1;
+ rfirst2 = first2;
+ return first1;
+}
+
+template<class RandIt, class RandItBuf, class Compare, class Op>
+RandIt op_partial_merge_and_save
+ ( RandIt first1, RandIt const last1, RandIt &rfirst2, RandIt last2, RandIt first_min
+ , RandItBuf &buf_first1_in_out
+ , RandItBuf &buf_last1_in_out
+ , Compare comp
+ , Op op
+ , bool is_stable)
+{
+ return is_stable
+ ? op_partial_merge_and_save_impl
+ (first1, last1, rfirst2, last2, first_min, buf_first1_in_out, buf_last1_in_out, comp, op)
+ : op_partial_merge_and_save_impl
+ (first1, last1, rfirst2, last2, first_min, buf_first1_in_out, buf_last1_in_out, antistable<Compare>(comp), op)
+ ;
+}
+
+//////////////////////////////////
+//////////////////////////////////
+//////////////////////////////////
+//
+// op_merge_blocks_with_irreg
+//
+//////////////////////////////////
+//////////////////////////////////
+//////////////////////////////////
+
+template<class RandItKeys, class KeyCompare, class RandIt, class RandIt2, class OutputIt, class Compare, class Op>
+OutputIt op_merge_blocks_with_irreg
+ ( RandItKeys key_first
+ , RandItKeys key_mid
+ , KeyCompare key_comp
+ , RandIt first_reg
+ , RandIt2 &first_irr
+ , RandIt2 const last_irr
+ , OutputIt dest
+ , typename iterator_traits<RandIt>::size_type const l_block
+ , typename iterator_traits<RandIt>::size_type n_block_left
+ , typename iterator_traits<RandIt>::size_type min_check
+ , typename iterator_traits<RandIt>::size_type max_check
+ , Compare comp, bool const is_stable, Op op)
+{
+ typedef typename iterator_traits<RandIt>::size_type size_type;
+
+ for(; n_block_left; --n_block_left, ++key_first, min_check -= min_check != 0, max_check -= max_check != 0){
+ size_type next_key_idx = find_next_block(key_first, key_comp, first_reg, l_block, min_check, max_check, comp);
+ max_check = min_value(max_value(max_check, next_key_idx+2), n_block_left);
+ RandIt const last_reg = first_reg + l_block;
+ RandIt first_min = first_reg + next_key_idx*l_block;
+ RandIt const last_min = first_min + l_block; (void)last_min;
+
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(is_sorted(first_reg, last_reg, comp));
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(!next_key_idx || is_sorted(first_min, last_min, comp));
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT((!next_key_idx || !comp(*first_reg, *first_min )));
+
+ OutputIt orig_dest = dest; (void)orig_dest;
+ dest = next_key_idx ? op_partial_merge_and_swap(first_irr, last_irr, first_reg, last_reg, first_min, dest, comp, op, is_stable)
+ : op_partial_merge (first_irr, last_irr, first_reg, last_reg, dest, comp, op, is_stable);
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(is_sorted(orig_dest, dest, comp));
+
+ if(first_reg == dest){
+ dest = next_key_idx ? ::boost::adl_move_swap_ranges(first_min, last_min, first_reg)
+ : last_reg;
+ }
+ else{
+ dest = next_key_idx ? op(three_way_forward_t(), first_reg, last_reg, first_min, dest)
+ : op(forward_t(), first_reg, last_reg, dest);
+ }
+
+ RandItKeys const key_next(key_first + next_key_idx);
+ swap_and_update_key(key_next, key_first, key_mid, last_reg, last_reg, first_min);
+
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(is_sorted(orig_dest, dest, comp));
+ first_reg = last_reg;
+ }
+ return dest;
+}
+
+//////////////////////////////////
+//////////////////////////////////
+//////////////////////////////////
+//
+// op_merge_blocks_left/right
+//
+//////////////////////////////////
+//////////////////////////////////
+//////////////////////////////////
+
+template<class RandItKeys, class KeyCompare, class RandIt, class Compare, class Op>
+void op_merge_blocks_left
+ ( RandItKeys const key_first
+ , KeyCompare key_comp
+ , RandIt const first
+ , typename iterator_traits<RandIt>::size_type const l_block
+ , typename iterator_traits<RandIt>::size_type const l_irreg1
+ , typename iterator_traits<RandIt>::size_type const n_block_a
+ , typename iterator_traits<RandIt>::size_type const n_block_b
+ , typename iterator_traits<RandIt>::size_type const l_irreg2
+ , Compare comp, Op op)
+{
+ typedef typename iterator_traits<RandIt>::size_type size_type;
+ size_type const key_count = needed_keys_count(n_block_a, n_block_b); (void)key_count;
+// BOOST_ASSERT(n_block_a || n_block_b);
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(boost::movelib::is_sorted_and_unique(key_first, key_first + key_count, key_comp));
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(!n_block_b || n_block_a == count_if_with(key_first, key_first + key_count, key_comp, key_first[n_block_a]));
+
+ size_type n_block_b_left = n_block_b;
+ size_type n_block_a_left = n_block_a;
+ size_type n_block_left = n_block_b + n_block_a;
+ RandItKeys key_mid(key_first + n_block_a);
+
+ RandIt buffer = first - l_block;
+ RandIt first1 = first;
+ RandIt last1 = first1 + l_irreg1;
+ RandIt first2 = last1;
+ RandIt const irreg2 = first2 + n_block_left*l_block;
+ bool is_range1_A = true;
+
+ RandItKeys key_range2(key_first);
+
+ ////////////////////////////////////////////////////////////////////////////
+ //Process all regular blocks before the irregular B block
+ ////////////////////////////////////////////////////////////////////////////
+ size_type min_check = n_block_a == n_block_left ? 0u : n_block_a;
+ size_type max_check = min_value(min_check+1, n_block_left);
+ for (; n_block_left; --n_block_left, ++key_range2, min_check -= min_check != 0, max_check -= max_check != 0) {
+ size_type const next_key_idx = find_next_block(key_range2, key_comp, first2, l_block, min_check, max_check, comp);
+ max_check = min_value(max_value(max_check, next_key_idx+2), n_block_left);
+ RandIt const first_min = first2 + next_key_idx*l_block;
+ RandIt const last_min = first_min + l_block; (void)last_min;
+ RandIt const last2 = first2 + l_block;
+
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(is_sorted(first1, last1, comp));
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(is_sorted(first2, last2, comp));
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(!n_block_left || is_sorted(first_min, last_min, comp));
+
+ //Check if irregular b block should go here.
+ //If so, break to the special code handling the irregular block
+ if (!n_block_b_left &&
+ ( (l_irreg2 && comp(*irreg2, *first_min)) || (!l_irreg2 && is_range1_A)) ){
+ break;
+ }
+
+ RandItKeys const key_next(key_range2 + next_key_idx);
+ bool const is_range2_A = key_mid == (key_first+key_count) || key_comp(*key_next, *key_mid);
+
+ bool const is_buffer_middle = last1 == buffer;
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT( ( is_buffer_middle && size_type(first2-buffer) == l_block && buffer == last1) ||
+ (!is_buffer_middle && size_type(first1-buffer) == l_block && first2 == last1));
+
+ if(is_range1_A == is_range2_A){
+ BOOST_ASSERT((first1 == last1) || !comp(*first_min, last1[-1]));
+ if(!is_buffer_middle){
+ buffer = op(forward_t(), first1, last1, buffer);
+ }
+ swap_and_update_key(key_next, key_range2, key_mid, first2, last2, first_min);
+ first1 = first2;
+ last1 = last2;
+ }
+ else {
+ RandIt unmerged;
+ RandIt buf_beg;
+ RandIt buf_end;
+ if(is_buffer_middle){
+ buf_end = buf_beg = first2 - (last1-first1);
+ unmerged = op_partial_merge_and_save( first1, last1, first2, last2, first_min
+ , buf_beg, buf_end, comp, op, is_range1_A);
+ }
+ else{
+ buf_beg = first1;
+ buf_end = last1;
+ unmerged = op_partial_merge_and_save
+ (buffer, buffer+(last1-first1), first2, last2, first_min, buf_beg, buf_end, comp, op, is_range1_A);
+ }
+ (void)unmerged;
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(is_sorted(first-l_block, unmerged, comp));
+
+ swap_and_update_key( key_next, key_range2, key_mid, first2, last2
+ , last_min - size_type(last2 - first2));
+
+ if(buf_beg != buf_end){ //range2 exhausted: is_buffer_middle for the next iteration
+ first1 = buf_beg;
+ last1 = buf_end;
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(buf_end == (last2-l_block));
+ buffer = last1;
+ }
+ else{ //range1 exhausted: !is_buffer_middle for the next iteration
+ first1 = first2;
+ last1 = last2;
+ buffer = first2 - l_block;
+ is_range1_A = is_range2_A;
+ }
+ }
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT( (is_range2_A && n_block_a_left) || (!is_range2_A && n_block_b_left));
+ is_range2_A ? --n_block_a_left : --n_block_b_left;
+ first2 = last2;
+ }
+
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(!n_block_b || n_block_a == count_if_with(key_first, key_range2 + n_block_left, key_comp, *key_mid));
+ BOOST_ASSERT(!n_block_b_left);
+
+ ////////////////////////////////////////////////////////////////////////////
+ //Process remaining range 1 left before the irregular B block
+ ////////////////////////////////////////////////////////////////////////////
+ bool const is_buffer_middle = last1 == buffer;
+ RandIt first_irr2 = irreg2;
+ RandIt const last_irr2 = first_irr2 + l_irreg2;
+ if(l_irreg2 && is_range1_A){
+ if(is_buffer_middle){
+ first1 = skip_until_merge(first1, last1, *first_irr2, comp);
+ //Even if we copy backward, no overlapping occurs so use forward copy
+ //that can be faster specially with trivial types
+ RandIt const new_first1 = first2 - (last1 - first1);
+ op(forward_t(), first1, last1, new_first1);
+ first1 = new_first1;
+ last1 = first2;
+ buffer = first1 - l_block;
+ }
+ buffer = op_partial_merge_impl(first1, last1, first_irr2, last_irr2, buffer, comp, op);
+ buffer = op(forward_t(), first1, last1, buffer);
+ }
+ else if(!is_buffer_middle){
+ buffer = op(forward_t(), first1, last1, buffer);
+ }
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(is_sorted(first-l_block, buffer, comp));
+
+ ////////////////////////////////////////////////////////////////////////////
+ //Process irregular B block and remaining A blocks
+ ////////////////////////////////////////////////////////////////////////////
+ buffer = op_merge_blocks_with_irreg
+ ( key_range2, key_mid, key_comp, first2, first_irr2, last_irr2
+ , buffer, l_block, n_block_left, min_check, max_check, comp, false, op);
+ buffer = op(forward_t(), first_irr2, last_irr2, buffer);(void)buffer;
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(is_sorted(first-l_block, buffer, comp));
+}
+
+// first - first element to merge.
+// first[-l_block, 0) - buffer (if use_buf == true)
+// l_block - length of regular blocks. First nblocks are stable sorted by 1st elements and key-coded
+// keys - sequence of keys, in same order as blocks. key<midkey means stream A
+// n_bef_irreg2/n_aft_irreg2 are regular blocks
+// l_irreg2 is a irregular block, that is to be combined after n_bef_irreg2 blocks and before n_aft_irreg2 blocks
+// If l_irreg2==0 then n_aft_irreg2==0 (no irregular blocks).
+template<class RandItKeys, class KeyCompare, class RandIt, class Compare>
+void merge_blocks_left
+ ( RandItKeys const key_first
+ , KeyCompare key_comp
+ , RandIt const first
+ , typename iterator_traits<RandIt>::size_type const l_block
+ , typename iterator_traits<RandIt>::size_type const l_irreg1
+ , typename iterator_traits<RandIt>::size_type const n_block_a
+ , typename iterator_traits<RandIt>::size_type const n_block_b
+ , typename iterator_traits<RandIt>::size_type const l_irreg2
+ , Compare comp
+ , bool const xbuf_used)
+{
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(!n_block_b || n_block_a == count_if_with(key_first, key_first + needed_keys_count(n_block_a, n_block_b), key_comp, key_first[n_block_a]));
+ if(xbuf_used){
+ op_merge_blocks_left
+ (key_first, key_comp, first, l_block, l_irreg1, n_block_a, n_block_b, l_irreg2, comp, move_op());
+ }
+ else{
+ op_merge_blocks_left
+ (key_first, key_comp, first, l_block, l_irreg1, n_block_a, n_block_b, l_irreg2, comp, swap_op());
+ }
+}
+
+// first - first element to merge.
+// [first+l_block*(n_bef_irreg2+n_aft_irreg2)+l_irreg2, first+l_block*(n_bef_irreg2+n_aft_irreg2+1)+l_irreg2) - buffer
+// l_block - length of regular blocks. First nblocks are stable sorted by 1st elements and key-coded
+// keys - sequence of keys, in same order as blocks. key<midkey means stream A
+// n_bef_irreg2/n_aft_irreg2 are regular blocks
+// l_irreg2 is a irregular block, that is to be combined after n_bef_irreg2 blocks and before n_aft_irreg2 blocks
+// If l_irreg2==0 then n_aft_irreg2==0 (no irregular blocks).
+template<class RandItKeys, class KeyCompare, class RandIt, class Compare>
+void merge_blocks_right
+ ( RandItKeys const key_first
+ , KeyCompare key_comp
+ , RandIt const first
+ , typename iterator_traits<RandIt>::size_type const l_block
+ , typename iterator_traits<RandIt>::size_type const n_block_a
+ , typename iterator_traits<RandIt>::size_type const n_block_b
+ , typename iterator_traits<RandIt>::size_type const l_irreg2
+ , Compare comp
+ , bool const xbuf_used)
+{
+ merge_blocks_left
+ ( (make_reverse_iterator)(key_first + needed_keys_count(n_block_a, n_block_b))
+ , inverse<KeyCompare>(key_comp)
+ , (make_reverse_iterator)(first + ((n_block_a+n_block_b)*l_block+l_irreg2))
+ , l_block
+ , l_irreg2
+ , n_block_b
+ , n_block_a
+ , 0
+ , inverse<Compare>(comp), xbuf_used);
+}
+
+//////////////////////////////////
+//////////////////////////////////
+//////////////////////////////////
+//
+// op_merge_blocks_with_buf
+//
+//////////////////////////////////
+//////////////////////////////////
+//////////////////////////////////
+template<class RandItKeys, class KeyCompare, class RandIt, class Compare, class Op, class RandItBuf>
+void op_merge_blocks_with_buf
+ ( RandItKeys key_first
+ , KeyCompare key_comp
+ , RandIt const first
+ , typename iterator_traits<RandIt>::size_type const l_block
+ , typename iterator_traits<RandIt>::size_type const l_irreg1
+ , typename iterator_traits<RandIt>::size_type const n_block_a
+ , typename iterator_traits<RandIt>::size_type const n_block_b
+ , typename iterator_traits<RandIt>::size_type const l_irreg2
+ , Compare comp
+ , Op op
+ , RandItBuf const buf_first)
+{
+ typedef typename iterator_traits<RandIt>::size_type size_type;
+ size_type const key_count = needed_keys_count(n_block_a, n_block_b); (void)key_count;
+ //BOOST_ASSERT(n_block_a || n_block_b);
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(boost::movelib::is_sorted_and_unique(key_first, key_first + key_count, key_comp));
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(!n_block_b || n_block_a == count_if_with(key_first, key_first + key_count, key_comp, key_first[n_block_a]));
+
+ size_type n_block_b_left = n_block_b;
+ size_type n_block_a_left = n_block_a;
+ size_type n_block_left = n_block_b + n_block_a;
+ RandItKeys key_mid(key_first + n_block_a);
+
+ RandItBuf buffer = buf_first;
+ RandItBuf buffer_end = buffer;
+ RandIt first1 = first;
+ RandIt last1 = first1 + l_irreg1;
+ RandIt first2 = last1;
+ RandIt const first_irr2 = first2 + n_block_left*l_block;
+ bool is_range1_A = true;
+
+ RandItKeys key_range2(key_first);
+
+ ////////////////////////////////////////////////////////////////////////////
+ //Process all regular blocks before the irregular B block
+ ////////////////////////////////////////////////////////////////////////////
+ size_type min_check = n_block_a == n_block_left ? 0u : n_block_a;
+ size_type max_check = min_value(min_check+1, n_block_left);
+ for (; n_block_left; --n_block_left, ++key_range2, min_check -= min_check != 0, max_check -= max_check != 0) {
+ size_type const next_key_idx = find_next_block(key_range2, key_comp, first2, l_block, min_check, max_check, comp);
+ max_check = min_value(max_value(max_check, next_key_idx+2), n_block_left);
+ RandIt first_min = first2 + next_key_idx*l_block;
+ RandIt const last_min = first_min + l_block; (void)last_min;
+ RandIt const last2 = first2 + l_block;
+
+ bool const buffer_empty = buffer == buffer_end; (void)buffer_empty;
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(buffer_empty ? is_sorted(first1, last1, comp) : is_sorted(buffer, buffer_end, comp));
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(is_sorted(first2, last2, comp));
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(!n_block_left || is_sorted(first_min, last_min, comp));
+
+ //Check if irregular b block should go here.
+ //If so, break to the special code handling the irregular block
+ if (!n_block_b_left &&
+ ( (l_irreg2 && comp(*first_irr2, *first_min)) || (!l_irreg2 && is_range1_A)) ){
+ break;
+ }
+
+ RandItKeys const key_next(key_range2 + next_key_idx);
+ bool const is_range2_A = key_mid == (key_first+key_count) || key_comp(*key_next, *key_mid);
+
+ if(is_range1_A == is_range2_A){
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT((first1 == last1) || (buffer_empty ? !comp(*first_min, last1[-1]) : !comp(*first_min, buffer_end[-1])));
+ //If buffered, put those elements in place
+ RandIt res = op(forward_t(), buffer, buffer_end, first1);
+ buffer = buffer_end = buf_first;
+ BOOST_ASSERT(buffer_empty || res == last1); (void)res;
+ swap_and_update_key(key_next, key_range2, key_mid, first2, last2, first_min);
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(is_sorted(first2, last2, comp));
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(is_sorted(first_min, last_min, comp));
+ first1 = first2;
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(is_sorted(first, first1, comp));
+ }
+ else {
+ RandIt const unmerged = op_partial_merge_and_save(first1, last1, first2, last2, first_min, buffer, buffer_end, comp, op, is_range1_A);
+ bool const is_range_1_empty = buffer == buffer_end;
+ BOOST_ASSERT(is_range_1_empty || (buffer_end-buffer) == (last1+l_block-unmerged));
+ if(is_range_1_empty){
+ buffer = buffer_end = buf_first;
+ first_min = last_min - (last2 - first2);
+ }
+ else{
+ first_min = last_min;
+ }
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(!is_range_1_empty || (last_min-first_min) == (last2-unmerged));
+ swap_and_update_key(key_next, key_range2, key_mid, first2, last2, first_min);
+
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(is_sorted(first_min, last_min, comp));
+ is_range1_A ^= is_range_1_empty;
+ first1 = unmerged;
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(is_sorted(first, unmerged, comp));
+ }
+ BOOST_ASSERT( (is_range2_A && n_block_a_left) || (!is_range2_A && n_block_b_left));
+ is_range2_A ? --n_block_a_left : --n_block_b_left;
+ last1 += l_block;
+ first2 = last2;
+ }
+
+ RandIt res = op(forward_t(), buffer, buffer_end, first1); (void)res;
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(is_sorted(first, res, comp));
+
+ ////////////////////////////////////////////////////////////////////////////
+ //Process irregular B block and remaining A blocks
+ ////////////////////////////////////////////////////////////////////////////
+ RandIt const last_irr2 = first_irr2 + l_irreg2;
+ op(forward_t(), first_irr2, first_irr2+l_irreg2, buf_first);
+ buffer = buf_first;
+ buffer_end = buffer+l_irreg2;
+
+ reverse_iterator<RandItBuf> rbuf_beg(buffer_end);
+ RandIt dest = op_merge_blocks_with_irreg
+ ((make_reverse_iterator)(key_first + n_block_b + n_block_a), (make_reverse_iterator)(key_mid), inverse<KeyCompare>(key_comp)
+ , (make_reverse_iterator)(first_irr2), rbuf_beg
+ , (make_reverse_iterator)(buffer), (make_reverse_iterator)(last_irr2)
+ , l_block, n_block_left, 0, n_block_left
+ , inverse<Compare>(comp), true, op).base();
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(is_sorted(dest, last_irr2, comp));
+
+ buffer_end = rbuf_beg.base();
+ BOOST_ASSERT((dest-last1) == (buffer_end-buffer));
+ op_merge_with_left_placed(is_range1_A ? first1 : last1, last1, dest, buffer, buffer_end, comp, op);
+
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(is_sorted(first, last_irr2, comp));
+}
+
+//////////////////////////////////
+//////////////////////////////////
+//////////////////////////////////
+//
+// op_insertion_sort_step_left/right
+//
+//////////////////////////////////
+//////////////////////////////////
+//////////////////////////////////
+
+template<class RandIt, class Compare, class Op>
+typename iterator_traits<RandIt>::size_type
+ op_insertion_sort_step_left
+ ( RandIt const first
+ , typename iterator_traits<RandIt>::size_type const length
+ , typename iterator_traits<RandIt>::size_type const step
+ , Compare comp, Op op)
+{
+ typedef typename iterator_traits<RandIt>::size_type size_type;
+ size_type const s = min_value<size_type>(step, AdaptiveSortInsertionSortThreshold);
+ size_type m = 0;
+
+ while((length - m) > s){
+ insertion_sort_op(first+m, first+m+s, first+m-s, comp, op);
+ m += s;
+ }
+ insertion_sort_op(first+m, first+length, first+m-s, comp, op);
+ return s;
+}
+
+template<class RandIt, class Compare, class Op>
+void op_merge_right_step_once
+ ( RandIt first_block
+ , typename iterator_traits<RandIt>::size_type const elements_in_blocks
+ , typename iterator_traits<RandIt>::size_type const l_build_buf
+ , Compare comp
+ , Op op)
+{
+ typedef typename iterator_traits<RandIt>::size_type size_type;
+ size_type restk = elements_in_blocks%(2*l_build_buf);
+ size_type p = elements_in_blocks - restk;
+ BOOST_ASSERT(0 == (p%(2*l_build_buf)));
+
+ if(restk <= l_build_buf){
+ op(backward_t(),first_block+p, first_block+p+restk, first_block+p+restk+l_build_buf);
+ }
+ else{
+ op_merge_right(first_block+p, first_block+p+l_build_buf, first_block+p+restk, first_block+p+restk+l_build_buf, comp, op);
+ }
+ while(p>0){
+ p -= 2*l_build_buf;
+ op_merge_right(first_block+p, first_block+p+l_build_buf, first_block+p+2*l_build_buf, first_block+p+3*l_build_buf, comp, op);
+ }
+}
+
+
+//////////////////////////////////
+//////////////////////////////////
+//////////////////////////////////
+//
+// insertion_sort_step
+//
+//////////////////////////////////
+//////////////////////////////////
+//////////////////////////////////
+template<class RandIt, class Compare>
+typename iterator_traits<RandIt>::size_type
+ insertion_sort_step
+ ( RandIt const first
+ , typename iterator_traits<RandIt>::size_type const length
+ , typename iterator_traits<RandIt>::size_type const step
+ , Compare comp)
+{
+ typedef typename iterator_traits<RandIt>::size_type size_type;
+ size_type const s = min_value<size_type>(step, AdaptiveSortInsertionSortThreshold);
+ size_type m = 0;
+
+ while((length - m) > s){
+ insertion_sort(first+m, first+m+s, comp);
+ m += s;
+ }
+ insertion_sort(first+m, first+length, comp);
+ return s;
+}
+
+//////////////////////////////////
+//////////////////////////////////
+//////////////////////////////////
+//
+// op_merge_left_step_multiple
+//
+//////////////////////////////////
+//////////////////////////////////
+//////////////////////////////////
+template<class RandIt, class Compare, class Op>
+typename iterator_traits<RandIt>::size_type
+ op_merge_left_step_multiple
+ ( RandIt first_block
+ , typename iterator_traits<RandIt>::size_type const elements_in_blocks
+ , typename iterator_traits<RandIt>::size_type l_merged
+ , typename iterator_traits<RandIt>::size_type const l_build_buf
+ , typename iterator_traits<RandIt>::size_type l_left_space
+ , Compare comp
+ , Op op)
+{
+ typedef typename iterator_traits<RandIt>::size_type size_type;
+ for(; l_merged < l_build_buf && l_left_space >= l_merged; l_merged*=2){
+ size_type p0=0;
+ RandIt pos = first_block;
+ while((elements_in_blocks - p0) > 2*l_merged) {
+ op_merge_left(pos-l_merged, pos, pos+l_merged, pos+2*l_merged, comp, op);
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(is_sorted(pos-l_merged, pos+l_merged, comp));
+ p0 += 2*l_merged;
+ pos = first_block+p0;
+ }
+ if((elements_in_blocks-p0) > l_merged) {
+ op_merge_left(pos-l_merged, pos, pos+l_merged, first_block+elements_in_blocks, comp, op);
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(is_sorted(pos-l_merged, pos-l_merged+(first_block+elements_in_blocks-pos), comp));
+ }
+ else {
+ op(forward_t(), pos, first_block+elements_in_blocks, pos-l_merged);
+ BOOST_MOVE_ADAPTIVE_SORT_INVARIANT(is_sorted(pos-l_merged, first_block+elements_in_blocks-l_merged, comp));
+ }
+ first_block -= l_merged;
+ l_left_space -= l_merged;
+ }
+ return l_merged;
+}
+
+
+} //namespace detail_adaptive {
+} //namespace movelib {
+} //namespace boost {
+
+#include <boost/move/detail/config_end.hpp>
+
+#endif //#define BOOST_MOVE_ADAPTIVE_SORT_MERGE_HPP
diff --git a/include/boost/move/algo/detail/basic_op.hpp b/include/boost/move/algo/detail/basic_op.hpp
new file mode 100644
index 0000000..ea5faf0
--- /dev/null
+++ b/include/boost/move/algo/detail/basic_op.hpp
@@ -0,0 +1,121 @@
+//////////////////////////////////////////////////////////////////////////////
+//
+// (C) Copyright Ion Gaztanaga 2015-2016.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+//
+// See http://www.boost.org/libs/move for documentation.
+//
+//////////////////////////////////////////////////////////////////////////////
+#ifndef BOOST_MOVE_ALGO_BASIC_OP
+#define BOOST_MOVE_ALGO_BASIC_OP
+
+#ifndef BOOST_CONFIG_HPP
+# include <boost/config.hpp>
+#endif
+#
+#if defined(BOOST_HAS_PRAGMA_ONCE)
+# pragma once
+#endif
+
+#include <boost/move/utility_core.hpp>
+#include <boost/move/adl_move_swap.hpp>
+#include <boost/move/detail/iterator_traits.hpp>
+
+namespace boost {
+namespace movelib {
+
+struct forward_t{};
+struct backward_t{};
+struct three_way_t{};
+struct three_way_forward_t{};
+struct four_way_t{};
+
+struct move_op
+{
+ template <class SourceIt, class DestinationIt>
+ BOOST_MOVE_FORCEINLINE void operator()(SourceIt source, DestinationIt dest)
+ { *dest = ::boost::move(*source); }
+
+ template <class SourceIt, class DestinationIt>
+ BOOST_MOVE_FORCEINLINE DestinationIt operator()(forward_t, SourceIt first, SourceIt last, DestinationIt dest_begin)
+ { return ::boost::move(first, last, dest_begin); }
+
+ template <class SourceIt, class DestinationIt>
+ BOOST_MOVE_FORCEINLINE DestinationIt operator()(backward_t, SourceIt first, SourceIt last, DestinationIt dest_last)
+ { return ::boost::move_backward(first, last, dest_last); }
+
+ template <class SourceIt, class DestinationIt1, class DestinationIt2>
+ BOOST_MOVE_FORCEINLINE void operator()(three_way_t, SourceIt srcit, DestinationIt1 dest1it, DestinationIt2 dest2it)
+ {
+ *dest2it = boost::move(*dest1it);
+ *dest1it = boost::move(*srcit);
+ }
+
+ template <class SourceIt, class DestinationIt1, class DestinationIt2>
+ DestinationIt2 operator()(three_way_forward_t, SourceIt srcit, SourceIt srcitend, DestinationIt1 dest1it, DestinationIt2 dest2it)
+ {
+ //Destination2 range can overlap SourceIt range so avoid boost::move
+ while(srcit != srcitend){
+ this->operator()(three_way_t(), srcit++, dest1it++, dest2it++);
+ }
+ return dest2it;
+ }
+
+ template <class SourceIt, class DestinationIt1, class DestinationIt2, class DestinationIt3>
+ BOOST_MOVE_FORCEINLINE void operator()(four_way_t, SourceIt srcit, DestinationIt1 dest1it, DestinationIt2 dest2it, DestinationIt3 dest3it)
+ {
+ *dest3it = boost::move(*dest2it);
+ *dest2it = boost::move(*dest1it);
+ *dest1it = boost::move(*srcit);
+ }
+};
+
+struct swap_op
+{
+ template <class SourceIt, class DestinationIt>
+ BOOST_MOVE_FORCEINLINE void operator()(SourceIt source, DestinationIt dest)
+ { boost::adl_move_swap(*dest, *source); }
+
+ template <class SourceIt, class DestinationIt>
+ BOOST_MOVE_FORCEINLINE DestinationIt operator()(forward_t, SourceIt first, SourceIt last, DestinationIt dest_begin)
+ { return boost::adl_move_swap_ranges(first, last, dest_begin); }
+
+ template <class SourceIt, class DestinationIt>
+ BOOST_MOVE_FORCEINLINE DestinationIt operator()(backward_t, SourceIt first, SourceIt last, DestinationIt dest_begin)
+ { return boost::adl_move_swap_ranges_backward(first, last, dest_begin); }
+
+ template <class SourceIt, class DestinationIt1, class DestinationIt2>
+ BOOST_MOVE_FORCEINLINE void operator()(three_way_t, SourceIt srcit, DestinationIt1 dest1it, DestinationIt2 dest2it)
+ {
+ typename ::boost::movelib::iterator_traits<SourceIt>::value_type tmp(boost::move(*dest2it));
+ *dest2it = boost::move(*dest1it);
+ *dest1it = boost::move(*srcit);
+ *srcit = boost::move(tmp);
+ }
+
+ template <class SourceIt, class DestinationIt1, class DestinationIt2>
+ DestinationIt2 operator()(three_way_forward_t, SourceIt srcit, SourceIt srcitend, DestinationIt1 dest1it, DestinationIt2 dest2it)
+ {
+ while(srcit != srcitend){
+ this->operator()(three_way_t(), srcit++, dest1it++, dest2it++);
+ }
+ return dest2it;
+ }
+
+ template <class SourceIt, class DestinationIt1, class DestinationIt2, class DestinationIt3>
+ BOOST_MOVE_FORCEINLINE void operator()(four_way_t, SourceIt srcit, DestinationIt1 dest1it, DestinationIt2 dest2it, DestinationIt3 dest3it)
+ {
+ typename ::boost::movelib::iterator_traits<SourceIt>::value_type tmp(boost::move(*dest3it));
+ *dest3it = boost::move(*dest2it);
+ *dest2it = boost::move(*dest1it);
+ *dest1it = boost::move(*srcit);
+ *srcit = boost::move(tmp);
+ }
+};
+
+
+}} //namespace boost::movelib
+
+#endif //BOOST_MOVE_ALGO_BASIC_OP
diff --git a/include/boost/move/algo/detail/heap_sort.hpp b/include/boost/move/algo/detail/heap_sort.hpp
new file mode 100644
index 0000000..5474d9f
--- /dev/null
+++ b/include/boost/move/algo/detail/heap_sort.hpp
@@ -0,0 +1,111 @@
+//////////////////////////////////////////////////////////////////////////////
+//
+// (C) Copyright Ion Gaztanaga 2017-2018.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+//
+// See http://www.boost.org/libs/move for documentation.
+//
+//////////////////////////////////////////////////////////////////////////////
+
+//! \file
+
+#ifndef BOOST_MOVE_DETAIL_HEAP_SORT_HPP
+#define BOOST_MOVE_DETAIL_HEAP_SORT_HPP
+
+#ifndef BOOST_CONFIG_HPP
+# include <boost/config.hpp>
+#endif
+#
+#if defined(BOOST_HAS_PRAGMA_ONCE)
+# pragma once
+#endif
+
+#include <boost/move/detail/config_begin.hpp>
+#include <boost/move/detail/workaround.hpp>
+#include <boost/move/detail/iterator_traits.hpp>
+#include <boost/move/algo/detail/is_sorted.hpp>
+#include <boost/move/utility_core.hpp>
+
+namespace boost { namespace movelib{
+
+template <class RandomAccessIterator, class Compare>
+class heap_sort_helper
+{
+ typedef typename boost::movelib::iterator_traits<RandomAccessIterator>::size_type size_type;
+ typedef typename boost::movelib::iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ static void adjust_heap(RandomAccessIterator first, size_type hole_index, size_type const len, value_type &value, Compare comp)
+ {
+ size_type const top_index = hole_index;
+ size_type second_child = 2 * (hole_index + 1);
+
+ while (second_child < len) {
+ if (comp(*(first + second_child), *(first + (second_child - 1))))
+ second_child--;
+ *(first + hole_index) = boost::move(*(first + second_child));
+ hole_index = second_child;
+ second_child = 2 * (second_child + 1);
+ }
+ if (second_child == len) {
+ *(first + hole_index) = boost::move(*(first + (second_child - 1)));
+ hole_index = second_child - 1;
+ }
+
+ { //push_heap-like ending
+ size_type parent = (hole_index - 1) / 2;
+ while (hole_index > top_index && comp(*(first + parent), value)) {
+ *(first + hole_index) = boost::move(*(first + parent));
+ hole_index = parent;
+ parent = (hole_index - 1) / 2;
+ }
+ *(first + hole_index) = boost::move(value);
+ }
+ }
+
+ static void make_heap(RandomAccessIterator first, RandomAccessIterator last, Compare comp)
+ {
+ size_type const len = size_type(last - first);
+ if (len > 1) {
+ size_type parent = len/2u - 1u;
+
+ do {
+ value_type v(boost::move(*(first + parent)));
+ adjust_heap(first, parent, len, v, comp);
+ }while (parent--);
+ }
+ }
+
+ static void sort_heap(RandomAccessIterator first, RandomAccessIterator last, Compare comp)
+ {
+ size_type len = size_type(last - first);
+ while (len > 1) {
+ //move biggest to the safe zone
+ --last;
+ value_type v(boost::move(*last));
+ *last = boost::move(*first);
+ adjust_heap(first, size_type(0), --len, v, comp);
+ }
+ }
+
+ public:
+ static void sort(RandomAccessIterator first, RandomAccessIterator last, Compare comp)
+ {
+ make_heap(first, last, comp);
+ sort_heap(first, last, comp);
+ BOOST_ASSERT(boost::movelib::is_sorted(first, last, comp));
+ }
+};
+
+template <class RandomAccessIterator, class Compare>
+BOOST_MOVE_FORCEINLINE void heap_sort(RandomAccessIterator first, RandomAccessIterator last, Compare comp)
+{
+ heap_sort_helper<RandomAccessIterator, Compare>::sort(first, last, comp);
+}
+
+}} //namespace boost { namespace movelib{
+
+#include <boost/move/detail/config_end.hpp>
+
+#endif //#ifndef BOOST_MOVE_DETAIL_HEAP_SORT_HPP
diff --git a/include/boost/move/algo/detail/insertion_sort.hpp b/include/boost/move/algo/detail/insertion_sort.hpp
new file mode 100644
index 0000000..5c378c3
--- /dev/null
+++ b/include/boost/move/algo/detail/insertion_sort.hpp
@@ -0,0 +1,128 @@
+//////////////////////////////////////////////////////////////////////////////
+//
+// (C) Copyright Ion Gaztanaga 2014-2014.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+//
+// See http://www.boost.org/libs/move for documentation.
+//
+//////////////////////////////////////////////////////////////////////////////
+
+//! \file
+
+#ifndef BOOST_MOVE_DETAIL_INSERT_SORT_HPP
+#define BOOST_MOVE_DETAIL_INSERT_SORT_HPP
+
+#ifndef BOOST_CONFIG_HPP
+# include <boost/config.hpp>
+#endif
+#
+#if defined(BOOST_HAS_PRAGMA_ONCE)
+# pragma once
+#endif
+
+#include <boost/move/utility_core.hpp>
+#include <boost/move/algo/move.hpp>
+#include <boost/move/detail/iterator_traits.hpp>
+#include <boost/move/adl_move_swap.hpp>
+#include <boost/move/utility_core.hpp>
+#include <boost/move/detail/placement_new.hpp>
+#include <boost/move/detail/destruct_n.hpp>
+#include <boost/move/algo/detail/basic_op.hpp>
+#include <boost/move/detail/placement_new.hpp>
+#include <boost/move/detail/iterator_to_raw_pointer.hpp>
+
+namespace boost { namespace movelib{
+
+// @cond
+
+template <class Compare, class ForwardIterator, class BirdirectionalIterator, class Op>
+void insertion_sort_op(ForwardIterator first1, ForwardIterator last1, BirdirectionalIterator first2, Compare comp, Op op)
+{
+ if (first1 != last1){
+ BirdirectionalIterator last2 = first2;
+ op(first1, last2);
+ for (++last2; ++first1 != last1; ++last2){
+ BirdirectionalIterator j2 = last2;
+ BirdirectionalIterator i2 = j2;
+ if (comp(*first1, *--i2)){
+ op(i2, j2);
+ for (--j2; i2 != first2 && comp(*first1, *--i2); --j2) {
+ op(i2, j2);
+ }
+ }
+ op(first1, j2);
+ }
+ }
+}
+
+template <class Compare, class ForwardIterator, class BirdirectionalIterator>
+void insertion_sort_swap(ForwardIterator first1, ForwardIterator last1, BirdirectionalIterator first2, Compare comp)
+{
+ insertion_sort_op(first1, last1, first2, comp, swap_op());
+}
+
+
+template <class Compare, class ForwardIterator, class BirdirectionalIterator>
+void insertion_sort_copy(ForwardIterator first1, ForwardIterator last1, BirdirectionalIterator first2, Compare comp)
+{
+ insertion_sort_op(first1, last1, first2, comp, move_op());
+}
+
+// @endcond
+
+template <class Compare, class BirdirectionalIterator>
+void insertion_sort(BirdirectionalIterator first, BirdirectionalIterator last, Compare comp)
+{
+ typedef typename boost::movelib::iterator_traits<BirdirectionalIterator>::value_type value_type;
+ if (first != last){
+ BirdirectionalIterator i = first;
+ for (++i; i != last; ++i){
+ BirdirectionalIterator j = i;
+ if (comp(*i, *--j)) {
+ value_type tmp(::boost::move(*i));
+ *i = ::boost::move(*j);
+ for (BirdirectionalIterator k = j; k != first && comp(tmp, *--k); --j) {
+ *j = ::boost::move(*k);
+ }
+ *j = ::boost::move(tmp);
+ }
+ }
+ }
+}
+
+template <class Compare, class BirdirectionalIterator, class BirdirectionalRawIterator>
+void insertion_sort_uninitialized_copy
+ (BirdirectionalIterator first1, BirdirectionalIterator const last1
+ , BirdirectionalRawIterator const first2
+ , Compare comp)
+{
+ typedef typename iterator_traits<BirdirectionalIterator>::value_type value_type;
+ if (first1 != last1){
+ BirdirectionalRawIterator last2 = first2;
+ ::new((iterator_to_raw_pointer)(last2), boost_move_new_t()) value_type(::boost::move(*first1));
+ destruct_n<value_type, BirdirectionalRawIterator> d(first2);
+ d.incr();
+ for (++last2; ++first1 != last1; ++last2){
+ BirdirectionalRawIterator j2 = last2;
+ BirdirectionalRawIterator k2 = j2;
+ if (comp(*first1, *--k2)){
+ ::new((iterator_to_raw_pointer)(j2), boost_move_new_t()) value_type(::boost::move(*k2));
+ d.incr();
+ for (--j2; k2 != first2 && comp(*first1, *--k2); --j2)
+ *j2 = ::boost::move(*k2);
+ *j2 = ::boost::move(*first1);
+ }
+ else{
+ ::new((iterator_to_raw_pointer)(j2), boost_move_new_t()) value_type(::boost::move(*first1));
+ d.incr();
+ }
+ }
+ d.release();
+ }
+}
+
+}} //namespace boost { namespace movelib{
+
+#endif //#ifndef BOOST_MOVE_DETAIL_INSERT_SORT_HPP
diff --git a/include/boost/move/algo/detail/is_sorted.hpp b/include/boost/move/algo/detail/is_sorted.hpp
new file mode 100644
index 0000000..d3dccfc
--- /dev/null
+++ b/include/boost/move/algo/detail/is_sorted.hpp
@@ -0,0 +1,55 @@
+#ifndef BOOST_MOVE_DETAIL_IS_SORTED_HPP
+#define BOOST_MOVE_DETAIL_IS_SORTED_HPP
+///////////////////////////////////////////////////////////////////////////////
+//
+// (C) Copyright Ion Gaztanaga 2017-2018. Distributed under the Boost
+// Software License, Version 1.0. (See accompanying file
+// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+// See http://www.boost.org/libs/container for documentation.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef BOOST_CONFIG_HPP
+# include <boost/config.hpp>
+#endif
+
+#if defined(BOOST_HAS_PRAGMA_ONCE)
+# pragma once
+#endif
+
+namespace boost {
+namespace movelib {
+
+template<class ForwardIt, class Pred>
+bool is_sorted(ForwardIt const first, ForwardIt last, Pred pred)
+{
+ if (first != last) {
+ ForwardIt next = first, cur(first);
+ while (++next != last) {
+ if (pred(*next, *cur))
+ return false;
+ cur = next;
+ }
+ }
+ return true;
+}
+
+template<class ForwardIt, class Pred>
+bool is_sorted_and_unique(ForwardIt first, ForwardIt last, Pred pred)
+{
+ if (first != last) {
+ ForwardIt next = first;
+ while (++next != last) {
+ if (!pred(*first, *next))
+ return false;
+ first = next;
+ }
+ }
+ return true;
+}
+
+} //namespace movelib {
+} //namespace boost {
+
+#endif //BOOST_MOVE_DETAIL_IS_SORTED_HPP
diff --git a/include/boost/move/algo/detail/merge.hpp b/include/boost/move/algo/detail/merge.hpp
new file mode 100644
index 0000000..8607735
--- /dev/null
+++ b/include/boost/move/algo/detail/merge.hpp
@@ -0,0 +1,553 @@
+//////////////////////////////////////////////////////////////////////////////
+//
+// (C) Copyright Ion Gaztanaga 2015-2016.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+//
+// See http://www.boost.org/libs/move for documentation.
+//
+//////////////////////////////////////////////////////////////////////////////
+#ifndef BOOST_MOVE_MERGE_HPP
+#define BOOST_MOVE_MERGE_HPP
+
+#include <boost/move/algo/move.hpp>
+#include <boost/move/adl_move_swap.hpp>
+#include <boost/move/algo/detail/basic_op.hpp>
+#include <boost/move/detail/iterator_traits.hpp>
+#include <boost/move/detail/destruct_n.hpp>
+#include <boost/move/algo/predicate.hpp>
+#include <boost/move/detail/iterator_to_raw_pointer.hpp>
+#include <boost/assert.hpp>
+
+namespace boost {
+namespace movelib {
+
+// @cond
+
+/*
+template<typename Unsigned>
+inline Unsigned gcd(Unsigned x, Unsigned y)
+{
+ if(0 == ((x &(x-1)) | (y & (y-1)))){
+ return x < y ? x : y;
+ }
+ else{
+ do
+ {
+ Unsigned t = x % y;
+ x = y;
+ y = t;
+ } while (y);
+ return x;
+ }
+}
+*/
+
+//Modified version from "An Optimal In-Place Array Rotation Algorithm", Ching-Kuang Shene
+template<typename Unsigned>
+Unsigned gcd(Unsigned x, Unsigned y)
+{
+ if(0 == ((x &(x-1)) | (y & (y-1)))){
+ return x < y ? x : y;
+ }
+ else{
+ Unsigned z = 1;
+ while((!(x&1)) & (!(y&1))){
+ z <<=1, x>>=1, y>>=1;
+ }
+ while(x && y){
+ if(!(x&1))
+ x >>=1;
+ else if(!(y&1))
+ y >>=1;
+ else if(x >=y)
+ x = (x-y) >> 1;
+ else
+ y = (y-x) >> 1;
+ }
+ return z*(x+y);
+ }
+}
+
+template<typename RandIt>
+RandIt rotate_gcd(RandIt first, RandIt middle, RandIt last)
+{
+ typedef typename iterator_traits<RandIt>::size_type size_type;
+ typedef typename iterator_traits<RandIt>::value_type value_type;
+
+ if(first == middle)
+ return last;
+ if(middle == last)
+ return first;
+ const size_type middle_pos = size_type(middle - first);
+ RandIt ret = last - middle_pos;
+ if (middle == ret){
+ boost::adl_move_swap_ranges(first, middle, middle);
+ }
+ else{
+ const size_type length = size_type(last - first);
+ for( RandIt it_i(first), it_gcd(it_i + gcd(length, middle_pos))
+ ; it_i != it_gcd
+ ; ++it_i){
+ value_type temp(boost::move(*it_i));
+ RandIt it_j = it_i;
+ RandIt it_k = it_j+middle_pos;
+ do{
+ *it_j = boost::move(*it_k);
+ it_j = it_k;
+ size_type const left = size_type(last - it_j);
+ it_k = left > middle_pos ? it_j + middle_pos : first + (middle_pos - left);
+ } while(it_k != it_i);
+ *it_j = boost::move(temp);
+ }
+ }
+ return ret;
+}
+
+template <class RandIt, class T, class Compare>
+RandIt lower_bound
+ (RandIt first, const RandIt last, const T& key, Compare comp)
+{
+ typedef typename iterator_traits
+ <RandIt>::size_type size_type;
+ size_type len = size_type(last - first);
+ RandIt middle;
+
+ while (len) {
+ size_type step = len >> 1;
+ middle = first;
+ middle += step;
+
+ if (comp(*middle, key)) {
+ first = ++middle;
+ len -= step + 1;
+ }
+ else{
+ len = step;
+ }
+ }
+ return first;
+}
+
+template <class RandIt, class T, class Compare>
+RandIt upper_bound
+ (RandIt first, const RandIt last, const T& key, Compare comp)
+{
+ typedef typename iterator_traits
+ <RandIt>::size_type size_type;
+ size_type len = size_type(last - first);
+ RandIt middle;
+
+ while (len) {
+ size_type step = len >> 1;
+ middle = first;
+ middle += step;
+
+ if (!comp(key, *middle)) {
+ first = ++middle;
+ len -= step + 1;
+ }
+ else{
+ len = step;
+ }
+ }
+ return first;
+}
+
+
+template<class RandIt, class Compare, class Op>
+void op_merge_left( RandIt buf_first
+ , RandIt first1
+ , RandIt const last1
+ , RandIt const last2
+ , Compare comp
+ , Op op)
+{
+ for(RandIt first2=last1; first2 != last2; ++buf_first){
+ if(first1 == last1){
+ op(forward_t(), first2, last2, buf_first);
+ return;
+ }
+ else if(comp(*first2, *first1)){
+ op(first2, buf_first);
+ ++first2;
+ }
+ else{
+ op(first1, buf_first);
+ ++first1;
+ }
+ }
+ if(buf_first != first1){//In case all remaining elements are in the same place
+ //(e.g. buffer is exactly the size of the second half
+ //and all elements from the second half are less)
+ op(forward_t(), first1, last1, buf_first);
+ }
+}
+
+// [buf_first, first1) -> buffer
+// [first1, last1) merge [last1,last2) -> [buf_first,buf_first+(last2-first1))
+// Elements from buffer are moved to [last2 - (first1-buf_first), last2)
+// Note: distance(buf_first, first1) >= distance(last1, last2), so no overlapping occurs
+template<class RandIt, class Compare>
+void merge_left
+ (RandIt buf_first, RandIt first1, RandIt const last1, RandIt const last2, Compare comp)
+{
+ op_merge_left(buf_first, first1, last1, last2, comp, move_op());
+}
+
+// [buf_first, first1) -> buffer
+// [first1, last1) merge [last1,last2) -> [buf_first,buf_first+(last2-first1))
+// Elements from buffer are swapped to [last2 - (first1-buf_first), last2)
+// Note: distance(buf_first, first1) >= distance(last1, last2), so no overlapping occurs
+template<class RandIt, class Compare>
+void swap_merge_left
+ (RandIt buf_first, RandIt first1, RandIt const last1, RandIt const last2, Compare comp)
+{
+ op_merge_left(buf_first, first1, last1, last2, comp, swap_op());
+}
+
+template<class RandIt, class Compare, class Op>
+void op_merge_right
+ (RandIt const first1, RandIt last1, RandIt last2, RandIt buf_last, Compare comp, Op op)
+{
+ RandIt const first2 = last1;
+ while(first1 != last1){
+ if(last2 == first2){
+ op(backward_t(), first1, last1, buf_last);
+ return;
+ }
+ --last2;
+ --last1;
+ --buf_last;
+ if(comp(*last2, *last1)){
+ op(last1, buf_last);
+ ++last2;
+ }
+ else{
+ op(last2, buf_last);
+ ++last1;
+ }
+ }
+ if(last2 != buf_last){ //In case all remaining elements are in the same place
+ //(e.g. buffer is exactly the size of the first half
+ //and all elements from the second half are less)
+ op(backward_t(), first2, last2, buf_last);
+ }
+}
+
+// [last2, buf_last) - buffer
+// [first1, last1) merge [last1,last2) -> [first1+(buf_last-last2), buf_last)
+// Note: distance[last2, buf_last) >= distance[first1, last1), so no overlapping occurs
+template<class RandIt, class Compare>
+void merge_right
+ (RandIt first1, RandIt last1, RandIt last2, RandIt buf_last, Compare comp)
+{
+ op_merge_right(first1, last1, last2, buf_last, comp, move_op());
+}
+
+// [last2, buf_last) - buffer
+// [first1, last1) merge [last1,last2) -> [first1+(buf_last-last2), buf_last)
+// Note: distance[last2, buf_last) >= distance[first1, last1), so no overlapping occurs
+template<class RandIt, class Compare>
+void swap_merge_right
+ (RandIt first1, RandIt last1, RandIt last2, RandIt buf_last, Compare comp)
+{
+ op_merge_right(first1, last1, last2, buf_last, comp, swap_op());
+}
+
+//Complexity: min(len1,len2)^2 + max(len1,len2)
+template<class RandIt, class Compare>
+void merge_bufferless_ON2(RandIt first, RandIt middle, RandIt last, Compare comp)
+{
+ if((middle - first) < (last - middle)){
+ while(first != middle){
+ RandIt const old_last1 = middle;
+ middle = boost::movelib::lower_bound(middle, last, *first, comp);
+ first = rotate_gcd(first, old_last1, middle);
+ if(middle == last){
+ break;
+ }
+ do{
+ ++first;
+ } while(first != middle && !comp(*middle, *first));
+ }
+ }
+ else{
+ while(middle != last){
+ RandIt p = boost::movelib::upper_bound(first, middle, last[-1], comp);
+ last = rotate_gcd(p, middle, last);
+ middle = p;
+ if(middle == first){
+ break;
+ }
+ --p;
+ do{
+ --last;
+ } while(middle != last && !comp(last[-1], *p));
+ }
+ }
+}
+
+static const std::size_t MergeBufferlessONLogNRotationThreshold = 32;
+
+template <class RandIt, class Distance, class Compare>
+void merge_bufferless_ONlogN_recursive
+ (RandIt first, RandIt middle, RandIt last, Distance len1, Distance len2, Compare comp)
+{
+ typedef typename iterator_traits<RandIt>::size_type size_type;
+
+ while(1) {
+ //trivial cases
+ if (!len2) {
+ return;
+ }
+ else if (!len1) {
+ return;
+ }
+ else if (size_type(len1 | len2) == 1u) {
+ if (comp(*middle, *first))
+ adl_move_swap(*first, *middle);
+ return;
+ }
+ else if(size_type(len1+len2) < MergeBufferlessONLogNRotationThreshold){
+ merge_bufferless_ON2(first, middle, last, comp);
+ return;
+ }
+
+ RandIt first_cut = first;
+ RandIt second_cut = middle;
+ Distance len11 = 0;
+ Distance len22 = 0;
+ if (len1 > len2) {
+ len11 = len1 / 2;
+ first_cut += len11;
+ second_cut = boost::movelib::lower_bound(middle, last, *first_cut, comp);
+ len22 = size_type(second_cut - middle);
+ }
+ else {
+ len22 = len2 / 2;
+ second_cut += len22;
+ first_cut = boost::movelib::upper_bound(first, middle, *second_cut, comp);
+ len11 = size_type(first_cut - first);
+ }
+ RandIt new_middle = rotate_gcd(first_cut, middle, second_cut);
+
+ //Avoid one recursive call doing a manual tail call elimination on the biggest range
+ const Distance len_internal = len11+len22;
+ if( len_internal < (len1 + len2 - len_internal) ) {
+ merge_bufferless_ONlogN_recursive(first, first_cut, new_middle, len11, len22, comp);
+ first = new_middle;
+ middle = second_cut;
+ len1 -= len11;
+ len2 -= len22;
+ }
+ else {
+ merge_bufferless_ONlogN_recursive(new_middle, second_cut, last, len1 - len11, len2 - len22, comp);
+ middle = first_cut;
+ last = new_middle;
+ len1 = len11;
+ len2 = len22;
+ }
+ }
+}
+
+//Complexity: NlogN
+template<class RandIt, class Compare>
+void merge_bufferless_ONlogN(RandIt first, RandIt middle, RandIt last, Compare comp)
+{
+ merge_bufferless_ONlogN_recursive
+ (first, middle, last, middle - first, last - middle, comp);
+}
+
+template<class RandIt, class Compare>
+void merge_bufferless(RandIt first, RandIt middle, RandIt last, Compare comp)
+{
+ #define BOOST_ADAPTIVE_MERGE_NLOGN_MERGE
+ #ifdef BOOST_ADAPTIVE_MERGE_NLOGN_MERGE
+ merge_bufferless_ONlogN(first, middle, last, comp);
+ #else
+ merge_bufferless_ON2(first, middle, last, comp);
+ #endif //BOOST_ADAPTIVE_MERGE_NLOGN_MERGE
+}
+
+// [r_first, r_last) are already in the right part of the destination range.
+template <class Compare, class InputIterator, class InputOutIterator, class Op>
+void op_merge_with_right_placed
+ ( InputIterator first, InputIterator last
+ , InputOutIterator dest_first, InputOutIterator r_first, InputOutIterator r_last
+ , Compare comp, Op op)
+{
+ BOOST_ASSERT((last - first) == (r_first - dest_first));
+ while ( first != last ) {
+ if (r_first == r_last) {
+ InputOutIterator end = op(forward_t(), first, last, dest_first);
+ BOOST_ASSERT(end == r_last);
+ (void)end;
+ return;
+ }
+ else if (comp(*r_first, *first)) {
+ op(r_first, dest_first);
+ ++r_first;
+ }
+ else {
+ op(first, dest_first);
+ ++first;
+ }
+ ++dest_first;
+ }
+ // Remaining [r_first, r_last) already in the correct place
+}
+
+template <class Compare, class InputIterator, class InputOutIterator>
+void swap_merge_with_right_placed
+ ( InputIterator first, InputIterator last
+ , InputOutIterator dest_first, InputOutIterator r_first, InputOutIterator r_last
+ , Compare comp)
+{
+ op_merge_with_right_placed(first, last, dest_first, r_first, r_last, comp, swap_op());
+}
+
+// [first, last) are already in the right part of the destination range.
+template <class Compare, class Op, class BidirIterator, class BidirOutIterator>
+void op_merge_with_left_placed
+ ( BidirOutIterator const first, BidirOutIterator last, BidirOutIterator dest_last
+ , BidirIterator const r_first, BidirIterator r_last
+ , Compare comp, Op op)
+{
+ BOOST_ASSERT((dest_last - last) == (r_last - r_first));
+ while( r_first != r_last ) {
+ if(first == last) {
+ BidirOutIterator res = op(backward_t(), r_first, r_last, dest_last);
+ BOOST_ASSERT(last == res);
+ (void)res;
+ return;
+ }
+ --r_last;
+ --last;
+ if(comp(*r_last, *last)){
+ ++r_last;
+ --dest_last;
+ op(last, dest_last);
+ }
+ else{
+ ++last;
+ --dest_last;
+ op(r_last, dest_last);
+ }
+ }
+ // Remaining [first, last) already in the correct place
+}
+
+// @endcond
+
+// [irst, last) are already in the right part of the destination range.
+template <class Compare, class BidirIterator, class BidirOutIterator>
+void merge_with_left_placed
+ ( BidirOutIterator const first, BidirOutIterator last, BidirOutIterator dest_last
+ , BidirIterator const r_first, BidirIterator r_last
+ , Compare comp)
+{
+ op_merge_with_left_placed(first, last, dest_last, r_first, r_last, comp, move_op());
+}
+
+// [r_first, r_last) are already in the right part of the destination range.
+template <class Compare, class InputIterator, class InputOutIterator>
+void merge_with_right_placed
+ ( InputIterator first, InputIterator last
+ , InputOutIterator dest_first, InputOutIterator r_first, InputOutIterator r_last
+ , Compare comp)
+{
+ op_merge_with_right_placed(first, last, dest_first, r_first, r_last, comp, move_op());
+}
+
+// [r_first, r_last) are already in the right part of the destination range.
+// [dest_first, r_first) is uninitialized memory
+template <class Compare, class InputIterator, class InputOutIterator>
+void uninitialized_merge_with_right_placed
+ ( InputIterator first, InputIterator last
+ , InputOutIterator dest_first, InputOutIterator r_first, InputOutIterator r_last
+ , Compare comp)
+{
+ BOOST_ASSERT((last - first) == (r_first - dest_first));
+ typedef typename iterator_traits<InputOutIterator>::value_type value_type;
+ InputOutIterator const original_r_first = r_first;
+
+ destruct_n<value_type, InputOutIterator> d(dest_first);
+
+ while ( first != last && dest_first != original_r_first ) {
+ if (r_first == r_last) {
+ for(; dest_first != original_r_first; ++dest_first, ++first){
+ ::new((iterator_to_raw_pointer)(dest_first)) value_type(::boost::move(*first));
+ d.incr();
+ }
+ d.release();
+ InputOutIterator end = ::boost::move(first, last, original_r_first);
+ BOOST_ASSERT(end == r_last);
+ (void)end;
+ return;
+ }
+ else if (comp(*r_first, *first)) {
+ ::new((iterator_to_raw_pointer)(dest_first)) value_type(::boost::move(*r_first));
+ d.incr();
+ ++r_first;
+ }
+ else {
+ ::new((iterator_to_raw_pointer)(dest_first)) value_type(::boost::move(*first));
+ d.incr();
+ ++first;
+ }
+ ++dest_first;
+ }
+ d.release();
+ merge_with_right_placed(first, last, original_r_first, r_first, r_last, comp);
+}
+
+/*
+// [r_first, r_last) are already in the right part of the destination range.
+// [dest_first, r_first) is uninitialized memory
+template <class Compare, class BidirOutIterator, class BidirIterator>
+void uninitialized_merge_with_left_placed
+ ( BidirOutIterator dest_first, BidirOutIterator r_first, BidirOutIterator r_last
+ , BidirIterator first, BidirIterator last
+ , Compare comp)
+{
+ BOOST_ASSERT((last - first) == (r_last - r_first));
+ typedef typename iterator_traits<BidirOutIterator>::value_type value_type;
+ BidirOutIterator const original_r_last = r_last;
+
+ destruct_n<value_type> d(&*dest_last);
+
+ while ( first != last && dest_first != original_r_first ) {
+ if (r_first == r_last) {
+ for(; dest_first != original_r_first; ++dest_first, ++first){
+ ::new(&*dest_first) value_type(::boost::move(*first));
+ d.incr();
+ }
+ d.release();
+ BidirOutIterator end = ::boost::move(first, last, original_r_first);
+ BOOST_ASSERT(end == r_last);
+ (void)end;
+ return;
+ }
+ else if (comp(*r_first, *first)) {
+ ::new(&*dest_first) value_type(::boost::move(*r_first));
+ d.incr();
+ ++r_first;
+ }
+ else {
+ ::new(&*dest_first) value_type(::boost::move(*first));
+ d.incr();
+ ++first;
+ }
+ ++dest_first;
+ }
+ d.release();
+ merge_with_right_placed(first, last, original_r_first, r_first, r_last, comp);
+}
+*/
+
+} //namespace movelib {
+} //namespace boost {
+
+#endif //#define BOOST_MOVE_MERGE_HPP
diff --git a/include/boost/move/algo/detail/merge_sort.hpp b/include/boost/move/algo/detail/merge_sort.hpp
new file mode 100644
index 0000000..62d185a
--- /dev/null
+++ b/include/boost/move/algo/detail/merge_sort.hpp
@@ -0,0 +1,139 @@
+//////////////////////////////////////////////////////////////////////////////
+//
+// (C) Copyright Ion Gaztanaga 2015-2016.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+//
+// See http://www.boost.org/libs/move for documentation.
+//
+//////////////////////////////////////////////////////////////////////////////
+
+//! \file
+
+#ifndef BOOST_MOVE_DETAIL_MERGE_SORT_HPP
+#define BOOST_MOVE_DETAIL_MERGE_SORT_HPP
+
+#ifndef BOOST_CONFIG_HPP
+# include <boost/config.hpp>
+#endif
+#
+#if defined(BOOST_HAS_PRAGMA_ONCE)
+# pragma once
+#endif
+
+#include <boost/move/detail/config_begin.hpp>
+#include <boost/move/detail/workaround.hpp>
+
+#include <boost/move/utility_core.hpp>
+#include <boost/move/algo/move.hpp>
+#include <boost/move/algo/detail/merge.hpp>
+#include <boost/move/detail/iterator_traits.hpp>
+#include <boost/move/adl_move_swap.hpp>
+#include <boost/move/detail/destruct_n.hpp>
+#include <boost/move/algo/detail/insertion_sort.hpp>
+#include <cassert>
+
+namespace boost {
+namespace movelib {
+
+// @cond
+
+static const unsigned MergeSortInsertionSortThreshold = 16;
+
+template <class RandIt, class Compare>
+void inplace_stable_sort(RandIt first, RandIt last, Compare comp)
+{
+ typedef typename iterator_traits<RandIt>::size_type size_type;
+ if (size_type(last - first) <= size_type(MergeSortInsertionSortThreshold)) {
+ insertion_sort(first, last, comp);
+ return;
+ }
+ RandIt middle = first + (last - first) / 2;
+ inplace_stable_sort(first, middle, comp);
+ inplace_stable_sort(middle, last, comp);
+ merge_bufferless_ONlogN_recursive
+ (first, middle, last, size_type(middle - first), size_type(last - middle), comp);
+}
+
+// @endcond
+
+template<class RandIt, class RandIt2, class Compare>
+void merge_sort_copy( RandIt first, RandIt last
+ , RandIt2 dest, Compare comp)
+{
+ typedef typename iterator_traits<RandIt>::size_type size_type;
+
+ size_type const count = size_type(last - first);
+ if(count <= MergeSortInsertionSortThreshold){
+ insertion_sort_copy(first, last, dest, comp);
+ }
+ else{
+ size_type const half = count/2;
+ merge_sort_copy(first + half, last , dest+half , comp);
+ merge_sort_copy(first , first + half, first + half, comp);
+ merge_with_right_placed
+ ( first + half, first + half + half
+ , dest, dest+half, dest + count
+ , comp);
+ }
+}
+
+template<class RandIt, class RandItRaw, class Compare>
+void merge_sort_uninitialized_copy( RandIt first, RandIt last
+ , RandItRaw uninitialized
+ , Compare comp)
+{
+ typedef typename iterator_traits<RandIt>::size_type size_type;
+ typedef typename iterator_traits<RandIt>::value_type value_type;
+
+ size_type const count = size_type(last - first);
+ if(count <= MergeSortInsertionSortThreshold){
+ insertion_sort_uninitialized_copy(first, last, uninitialized, comp);
+ }
+ else{
+ size_type const half = count/2;
+ merge_sort_uninitialized_copy(first + half, last, uninitialized + half, comp);
+ destruct_n<value_type, RandItRaw> d(uninitialized+half);
+ d.incr(count-half);
+ merge_sort_copy(first, first + half, first + half, comp);
+ uninitialized_merge_with_right_placed
+ ( first + half, first + half + half
+ , uninitialized, uninitialized+half, uninitialized+count
+ , comp);
+ d.release();
+ }
+}
+
+template<class RandIt, class RandItRaw, class Compare>
+void merge_sort( RandIt first, RandIt last, Compare comp
+ , RandItRaw uninitialized)
+{
+ typedef typename iterator_traits<RandIt>::size_type size_type;
+ typedef typename iterator_traits<RandIt>::value_type value_type;
+
+ size_type const count = size_type(last - first);
+ if(count <= MergeSortInsertionSortThreshold){
+ insertion_sort(first, last, comp);
+ }
+ else{
+ size_type const half = count/2;
+ size_type const rest = count - half;
+ RandIt const half_it = first + half;
+ RandIt const rest_it = first + rest;
+
+ merge_sort_uninitialized_copy(half_it, last, uninitialized, comp);
+ destruct_n<value_type, RandItRaw> d(uninitialized);
+ d.incr(rest);
+ merge_sort_copy(first, half_it, rest_it, comp);
+ merge_with_right_placed
+ ( uninitialized, uninitialized + rest
+ , first, rest_it, last, antistable<Compare>(comp));
+ }
+}
+
+}} //namespace boost { namespace movelib{
+
+#include <boost/move/detail/config_end.hpp>
+
+#endif //#ifndef BOOST_MOVE_DETAIL_MERGE_SORT_HPP
diff --git a/include/boost/move/algo/detail/pdqsort.hpp b/include/boost/move/algo/detail/pdqsort.hpp
new file mode 100644
index 0000000..b6a1278
--- /dev/null
+++ b/include/boost/move/algo/detail/pdqsort.hpp
@@ -0,0 +1,334 @@
+//////////////////////////////////////////////////////////////////////////////
+//
+// (C) Copyright Orson Peters 2017.
+// (C) Copyright Ion Gaztanaga 2017-2018.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+//
+// See http://www.boost.org/libs/move for documentation.
+//
+//////////////////////////////////////////////////////////////////////////////
+//
+// This implementation of Pattern-defeating quicksort (pdqsort) was written
+// by Orson Peters, and discussed in the Boost mailing list:
+// http://boost.2283326.n4.nabble.com/sort-pdqsort-td4691031.html
+//
+// This implementation is the adaptation by Ion Gaztanaga of code originally in GitHub
+// with permission from the author to relicense it under the Boost Software License
+// (see the Boost mailing list for details).
+//
+// The original copyright statement is pasted here for completeness:
+//
+// pdqsort.h - Pattern-defeating quicksort.
+// Copyright (c) 2015 Orson Peters
+// This software is provided 'as-is', without any express or implied warranty. In no event will the
+// authors be held liable for any damages arising from the use of this software.
+// Permission is granted to anyone to use this software for any purpose, including commercial
+// applications, and to alter it and redistribute it freely, subject to the following restrictions:
+// 1. The origin of this software must not be misrepresented; you must not claim that you wrote the
+// original software. If you use this software in a product, an acknowledgment in the product
+// documentation would be appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be misrepresented as
+// being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+//
+//////////////////////////////////////////////////////////////////////////////
+
+#ifndef BOOST_MOVE_ALGO_PDQSORT_HPP
+#define BOOST_MOVE_ALGO_PDQSORT_HPP
+
+#ifndef BOOST_CONFIG_HPP
+# include <boost/config.hpp>
+#endif
+#
+#if defined(BOOST_HAS_PRAGMA_ONCE)
+# pragma once
+#endif
+
+#include <boost/move/detail/config_begin.hpp>
+#include <boost/move/detail/workaround.hpp>
+#include <boost/move/utility_core.hpp>
+#include <boost/move/algo/detail/insertion_sort.hpp>
+#include <boost/move/algo/detail/heap_sort.hpp>
+#include <boost/move/detail/iterator_traits.hpp>
+
+#include <boost/move/adl_move_swap.hpp>
+#include <cstddef>
+
+namespace boost {
+namespace movelib {
+
+namespace pdqsort_detail {
+
+ //A simple pair implementation to avoid including <utility>
+ template<class T1, class T2>
+ struct pair
+ {
+ pair()
+ {}
+
+ pair(const T1 &t1, const T2 &t2)
+ : first(t1), second(t2)
+ {}
+
+ T1 first;
+ T2 second;
+ };
+
+ enum {
+ // Partitions below this size are sorted using insertion sort.
+ insertion_sort_threshold = 24,
+
+ // Partitions above this size use Tukey's ninther to select the pivot.
+ ninther_threshold = 128,
+
+ // When we detect an already sorted partition, attempt an insertion sort that allows this
+ // amount of element moves before giving up.
+ partial_insertion_sort_limit = 8,
+
+ // Must be multiple of 8 due to loop unrolling, and < 256 to fit in unsigned char.
+ block_size = 64,
+
+ // Cacheline size, assumes power of two.
+ cacheline_size = 64
+
+ };
+
+ // Returns floor(log2(n)), assumes n > 0.
+ template<class Unsigned>
+ Unsigned log2(Unsigned n) {
+ Unsigned log = 0;
+ while (n >>= 1) ++log;
+ return log;
+ }
+
+ // Attempts to use insertion sort on [begin, end). Will return false if more than
+ // partial_insertion_sort_limit elements were moved, and abort sorting. Otherwise it will
+ // successfully sort and return true.
+ template<class Iter, class Compare>
+ inline bool partial_insertion_sort(Iter begin, Iter end, Compare comp) {
+ typedef typename boost::movelib::iterator_traits<Iter>::value_type T;
+ typedef typename boost::movelib::iterator_traits<Iter>::size_type size_type;
+ if (begin == end) return true;
+
+ size_type limit = 0;
+ for (Iter cur = begin + 1; cur != end; ++cur) {
+ if (limit > partial_insertion_sort_limit) return false;
+
+ Iter sift = cur;
+ Iter sift_1 = cur - 1;
+
+ // Compare first so we can avoid 2 moves for an element already positioned correctly.
+ if (comp(*sift, *sift_1)) {
+ T tmp = boost::move(*sift);
+
+ do { *sift-- = boost::move(*sift_1); }
+ while (sift != begin && comp(tmp, *--sift_1));
+
+ *sift = boost::move(tmp);
+ limit += size_type(cur - sift);
+ }
+ }
+
+ return true;
+ }
+
+ template<class Iter, class Compare>
+ inline void sort2(Iter a, Iter b, Compare comp) {
+ if (comp(*b, *a)) boost::adl_move_iter_swap(a, b);
+ }
+
+ // Sorts the elements *a, *b and *c using comparison function comp.
+ template<class Iter, class Compare>
+ inline void sort3(Iter a, Iter b, Iter c, Compare comp) {
+ sort2(a, b, comp);
+ sort2(b, c, comp);
+ sort2(a, b, comp);
+ }
+
+ // Partitions [begin, end) around pivot *begin using comparison function comp. Elements equal
+ // to the pivot are put in the right-hand partition. Returns the position of the pivot after
+ // partitioning and whether the passed sequence already was correctly partitioned. Assumes the
+ // pivot is a median of at least 3 elements and that [begin, end) is at least
+ // insertion_sort_threshold long.
+ template<class Iter, class Compare>
+ pdqsort_detail::pair<Iter, bool> partition_right(Iter begin, Iter end, Compare comp) {
+ typedef typename boost::movelib::iterator_traits<Iter>::value_type T;
+
+ // Move pivot into local for speed.
+ T pivot(boost::move(*begin));
+
+ Iter first = begin;
+ Iter last = end;
+
+ // Find the first element greater than or equal than the pivot (the median of 3 guarantees
+ // this exists).
+ while (comp(*++first, pivot));
+
+ // Find the first element strictly smaller than the pivot. We have to guard this search if
+ // there was no element before *first.
+ if (first - 1 == begin) while (first < last && !comp(*--last, pivot));
+ else while ( !comp(*--last, pivot));
+
+ // If the first pair of elements that should be swapped to partition are the same element,
+ // the passed in sequence already was correctly partitioned.
+ bool already_partitioned = first >= last;
+
+ // Keep swapping pairs of elements that are on the wrong side of the pivot. Previously
+ // swapped pairs guard the searches, which is why the first iteration is special-cased
+ // above.
+ while (first < last) {
+ boost::adl_move_iter_swap(first, last);
+ while (comp(*++first, pivot));
+ while (!comp(*--last, pivot));
+ }
+
+ // Put the pivot in the right place.
+ Iter pivot_pos = first - 1;
+ *begin = boost::move(*pivot_pos);
+ *pivot_pos = boost::move(pivot);
+
+ return pdqsort_detail::pair<Iter, bool>(pivot_pos, already_partitioned);
+ }
+
+ // Similar function to the one above, except elements equal to the pivot are put to the left of
+ // the pivot and it doesn't check or return if the passed sequence already was partitioned.
+ // Since this is rarely used (the many equal case), and in that case pdqsort already has O(n)
+ // performance, no block quicksort is applied here for simplicity.
+ template<class Iter, class Compare>
+ inline Iter partition_left(Iter begin, Iter end, Compare comp) {
+ typedef typename boost::movelib::iterator_traits<Iter>::value_type T;
+
+ T pivot(boost::move(*begin));
+ Iter first = begin;
+ Iter last = end;
+
+ while (comp(pivot, *--last));
+
+ if (last + 1 == end) while (first < last && !comp(pivot, *++first));
+ else while ( !comp(pivot, *++first));
+
+ while (first < last) {
+ boost::adl_move_iter_swap(first, last);
+ while (comp(pivot, *--last));
+ while (!comp(pivot, *++first));
+ }
+
+ Iter pivot_pos = last;
+ *begin = boost::move(*pivot_pos);
+ *pivot_pos = boost::move(pivot);
+
+ return pivot_pos;
+ }
+
+
+ template<class Iter, class Compare>
+ void pdqsort_loop( Iter begin, Iter end, Compare comp
+ , typename boost::movelib::iterator_traits<Iter>::size_type bad_allowed
+ , bool leftmost = true)
+ {
+ typedef typename boost::movelib::iterator_traits<Iter>::size_type size_type;
+
+ // Use a while loop for tail recursion elimination.
+ while (true) {
+ size_type size = size_type(end - begin);
+
+ // Insertion sort is faster for small arrays.
+ if (size < insertion_sort_threshold) {
+ insertion_sort(begin, end, comp);
+ return;
+ }
+
+ // Choose pivot as median of 3 or pseudomedian of 9.
+ size_type s2 = size / 2;
+ if (size > ninther_threshold) {
+ sort3(begin, begin + s2, end - 1, comp);
+ sort3(begin + 1, begin + (s2 - 1), end - 2, comp);
+ sort3(begin + 2, begin + (s2 + 1), end - 3, comp);
+ sort3(begin + (s2 - 1), begin + s2, begin + (s2 + 1), comp);
+ boost::adl_move_iter_swap(begin, begin + s2);
+ } else sort3(begin + s2, begin, end - 1, comp);
+
+ // If *(begin - 1) is the end of the right partition of a previous partition operation
+ // there is no element in [begin, end) that is smaller than *(begin - 1). Then if our
+ // pivot compares equal to *(begin - 1) we change strategy, putting equal elements in
+ // the left partition, greater elements in the right partition. We do not have to
+ // recurse on the left partition, since it's sorted (all equal).
+ if (!leftmost && !comp(*(begin - 1), *begin)) {
+ begin = partition_left(begin, end, comp) + 1;
+ continue;
+ }
+
+ // Partition and get results.
+ pdqsort_detail::pair<Iter, bool> part_result = partition_right(begin, end, comp);
+ Iter pivot_pos = part_result.first;
+ bool already_partitioned = part_result.second;
+
+ // Check for a highly unbalanced partition.
+ size_type l_size = size_type(pivot_pos - begin);
+ size_type r_size = size_type(end - (pivot_pos + 1));
+ bool highly_unbalanced = l_size < size / 8 || r_size < size / 8;
+
+ // If we got a highly unbalanced partition we shuffle elements to break many patterns.
+ if (highly_unbalanced) {
+ // If we had too many bad partitions, switch to heapsort to guarantee O(n log n).
+ if (--bad_allowed == 0) {
+ boost::movelib::heap_sort(begin, end, comp);
+ return;
+ }
+
+ if (l_size >= insertion_sort_threshold) {
+ boost::adl_move_iter_swap(begin, begin + l_size / 4);
+ boost::adl_move_iter_swap(pivot_pos - 1, pivot_pos - l_size / 4);
+
+ if (l_size > ninther_threshold) {
+ boost::adl_move_iter_swap(begin + 1, begin + (l_size / 4 + 1));
+ boost::adl_move_iter_swap(begin + 2, begin + (l_size / 4 + 2));
+ boost::adl_move_iter_swap(pivot_pos - 2, pivot_pos - (l_size / 4 + 1));
+ boost::adl_move_iter_swap(pivot_pos - 3, pivot_pos - (l_size / 4 + 2));
+ }
+ }
+
+ if (r_size >= insertion_sort_threshold) {
+ boost::adl_move_iter_swap(pivot_pos + 1, pivot_pos + (1 + r_size / 4));
+ boost::adl_move_iter_swap(end - 1, end - r_size / 4);
+
+ if (r_size > ninther_threshold) {
+ boost::adl_move_iter_swap(pivot_pos + 2, pivot_pos + (2 + r_size / 4));
+ boost::adl_move_iter_swap(pivot_pos + 3, pivot_pos + (3 + r_size / 4));
+ boost::adl_move_iter_swap(end - 2, end - (1 + r_size / 4));
+ boost::adl_move_iter_swap(end - 3, end - (2 + r_size / 4));
+ }
+ }
+ } else {
+ // If we were decently balanced and we tried to sort an already partitioned
+ // sequence try to use insertion sort.
+ if (already_partitioned && partial_insertion_sort(begin, pivot_pos, comp)
+ && partial_insertion_sort(pivot_pos + 1, end, comp)) return;
+ }
+
+ // Sort the left partition first using recursion and do tail recursion elimination for
+ // the right-hand partition.
+ pdqsort_loop<Iter, Compare>(begin, pivot_pos, comp, bad_allowed, leftmost);
+ begin = pivot_pos + 1;
+ leftmost = false;
+ }
+ }
+}
+
+
+template<class Iter, class Compare>
+void pdqsort(Iter begin, Iter end, Compare comp)
+{
+ if (begin == end) return;
+ typedef typename boost::movelib::iterator_traits<Iter>::size_type size_type;
+ pdqsort_detail::pdqsort_loop<Iter, Compare>(begin, end, comp, pdqsort_detail::log2(size_type(end - begin)));
+}
+
+} //namespace movelib {
+} //namespace boost {
+
+#include <boost/move/detail/config_end.hpp>
+
+#endif //BOOST_MOVE_ALGO_PDQSORT_HPP
diff --git a/include/boost/move/algo/detail/set_difference.hpp b/include/boost/move/algo/detail/set_difference.hpp
new file mode 100644
index 0000000..51d0475
--- /dev/null
+++ b/include/boost/move/algo/detail/set_difference.hpp
@@ -0,0 +1,207 @@
+//////////////////////////////////////////////////////////////////////////////
+//
+// (C) Copyright Ion Gaztanaga 2017-2017.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+//
+// See http://www.boost.org/libs/move for documentation.
+//
+//////////////////////////////////////////////////////////////////////////////
+#ifndef BOOST_MOVE_SET_DIFFERENCE_HPP
+#define BOOST_MOVE_SET_DIFFERENCE_HPP
+
+#include <boost/move/algo/move.hpp>
+#include <boost/move/iterator.hpp>
+#include <boost/move/utility_core.hpp>
+
+namespace boost {
+
+namespace move_detail{
+
+template<class InputIt, class OutputIt>
+OutputIt copy(InputIt first, InputIt last, OutputIt result)
+{
+ while (first != last) {
+ *result++ = *first;
+ ++result;
+ ++first;
+ }
+ return result;
+}
+
+} //namespace move_detail{
+
+namespace movelib {
+
+//Moves the elements from the sorted range [first1, last1) which are not found in the sorted
+//range [first2, last2) to the range beginning at result.
+//The resulting range is also sorted. Equivalent elements are treated individually,
+//that is, if some element is found m times in [first1, last1) and n times in [first2, last2),
+//it will be moved to result exactly max(m-n, 0) times.
+//The resulting range cannot overlap with either of the input ranges.
+template<class InputIt1, class InputIt2,
+ class OutputIt, class Compare>
+OutputIt set_difference
+ (InputIt1 first1, InputIt1 last1, InputIt2 first2, InputIt2 last2, OutputIt result, Compare comp)
+{
+ while (first1 != last1) {
+ if (first2 == last2)
+ return boost::move_detail::copy(first1, last1, result);
+
+ if (comp(*first1, *first2)) {
+ *result = *first1;
+ ++result;
+ ++first1;
+ }
+ else {
+ if (!comp(*first2, *first1)) {
+ ++first1;
+ }
+ ++first2;
+ }
+ }
+ return result;
+}
+
+//Moves the elements from the sorted range [first1, last1) which are not found in the sorted
+//range [first2, last2) to the range beginning at first1 (in place operation in range1).
+//The resulting range is also sorted. Equivalent elements are treated individually,
+//that is, if some element is found m times in [first1, last1) and n times in [first2, last2),
+//it will be moved to result exactly max(m-n, 0) times.
+template<class InputOutputIt1, class InputIt2, class Compare>
+InputOutputIt1 inplace_set_difference
+ (InputOutputIt1 first1, InputOutputIt1 last1, InputIt2 first2, InputIt2 last2, Compare comp )
+{
+ while (first1 != last1) {
+ //Skip copying from range 1 if no element has to be skipped
+ if (first2 == last2){
+ return last1;
+ }
+ else if (comp(*first1, *first2)){
+ ++first1;
+ }
+ else{
+ if (!comp(*first2, *first1)) {
+ InputOutputIt1 result = first1;
+ //An element from range 1 must be skipped, no longer an inplace operation
+ return boost::movelib::set_difference
+ ( boost::make_move_iterator(++first1)
+ , boost::make_move_iterator(last1)
+ , ++first2, last2, result, comp);
+ }
+ ++first2;
+ }
+ }
+ return first1;
+}
+
+//Moves the elements from the sorted range [first1, last1) which are not found in the sorted
+//range [first2, last2) to the range beginning at first1.
+//The resulting range is also sorted. Equivalent elements from range 1 are moved past to end
+//of the result,
+//that is, if some element is found m times in [first1, last1) and n times in [first2, last2),
+//it will be moved to result exactly max(m-n, 0) times.
+//The resulting range cannot overlap with either of the input ranges.
+template<class ForwardIt1, class InputIt2,
+ class OutputIt, class Compare>
+OutputIt set_unique_difference
+ (ForwardIt1 first1, ForwardIt1 last1, InputIt2 first2, InputIt2 last2, OutputIt result, Compare comp)
+{
+ while (first1 != last1) {
+ if (first2 == last2){
+ //unique_copy-like sequence with forward iterators but don't write i
+ //to result before comparing as moving *i could alter the value in i.
+ ForwardIt1 i = first1;
+ while (++first1 != last1) {
+ if (comp(*i, *first1)) {
+ *result = *i;
+ ++result;
+ i = first1;
+ }
+ }
+ *result = *i;
+ ++result;
+ break;
+ }
+
+ if (comp(*first1, *first2)) {
+ //Skip equivalent elements in range1 but don't write i
+ //to result before comparing as moving *i could alter the value in i.
+ ForwardIt1 i = first1;
+ while (++first1 != last1) {
+ if (comp(*i, *first1)) {
+ break;
+ }
+ }
+ *result = *i;
+ ++result;
+ }
+ else {
+ if (comp(*first2, *first1)) {
+ ++first2;
+ }
+ else{
+ ++first1;
+ }
+ }
+ }
+ return result;
+}
+
+//Moves the elements from the sorted range [first1, last1) which are not found in the sorted
+//range [first2, last2) to the range beginning at first1 (in place operation in range1).
+//The resulting range is also sorted. Equivalent elements are treated individually,
+//that is, if some element is found m times in [first1, last1) and n times in [first2, last2),
+//it will be moved to result exactly max(m-n, 0) times.
+template<class ForwardOutputIt1, class ForwardIt2, class Compare>
+ForwardOutputIt1 inplace_set_unique_difference
+ (ForwardOutputIt1 first1, ForwardOutputIt1 last1, ForwardIt2 first2, ForwardIt2 last2, Compare comp )
+{
+ while (first1 != last1) {
+ //Skip copying from range 1 if no element has to be skipped
+ if (first2 == last2){
+ //unique-like algorithm for the remaining range 1
+ ForwardOutputIt1 result = first1;
+ while (++first1 != last1) {
+ if (comp(*result, *first1) && ++result != first1) {
+ *result = boost::move(*first1);
+ }
+ }
+ return ++result;
+ }
+ else if (comp(*first2, *first1)) {
+ ++first2;
+ }
+ else if (comp(*first1, *first2)){
+ //skip any adjacent equivalent elementin range 1
+ ForwardOutputIt1 result = first1;
+ if (++first1 != last1 && !comp(*result, *first1)) {
+ //Some elements from range 1 must be skipped, no longer an inplace operation
+ while (++first1 != last1 && !comp(*result, *first1)){}
+ return boost::movelib::set_unique_difference
+ ( boost::make_move_iterator(first1)
+ , boost::make_move_iterator(last1)
+ , first2, last2, ++result, comp);
+ }
+ }
+ else{
+ ForwardOutputIt1 result = first1;
+ //Some elements from range 1 must be skipped, no longer an inplace operation
+ while (++first1 != last1 && !comp(*result, *first1)){}
+ //An element from range 1 must be skipped, no longer an inplace operation
+ return boost::movelib::set_unique_difference
+ ( boost::make_move_iterator(first1)
+ , boost::make_move_iterator(last1)
+ , first2, last2, result, comp);
+ }
+ }
+ return first1;
+}
+
+
+
+} //namespace movelib {
+} //namespace boost {
+
+#endif //#define BOOST_MOVE_SET_DIFFERENCE_HPP
diff --git a/include/boost/move/algo/move.hpp b/include/boost/move/algo/move.hpp
new file mode 100644
index 0000000..2390877
--- /dev/null
+++ b/include/boost/move/algo/move.hpp
@@ -0,0 +1,156 @@
+//////////////////////////////////////////////////////////////////////////////
+//
+// (C) Copyright Ion Gaztanaga 2012-2016.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+//
+// See http://www.boost.org/libs/move for documentation.
+//
+//////////////////////////////////////////////////////////////////////////////
+
+//! \file
+
+#ifndef BOOST_MOVE_ALGO_MOVE_HPP
+#define BOOST_MOVE_ALGO_MOVE_HPP
+
+#ifndef BOOST_CONFIG_HPP
+# include <boost/config.hpp>
+#endif
+#
+#if defined(BOOST_HAS_PRAGMA_ONCE)
+# pragma once
+#endif
+
+#include <boost/move/detail/config_begin.hpp>
+
+#include <boost/move/utility_core.hpp>
+#include <boost/move/detail/iterator_traits.hpp>
+#include <boost/move/detail/iterator_to_raw_pointer.hpp>
+#include <boost/detail/no_exceptions_support.hpp>
+
+namespace boost {
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// move
+//
+//////////////////////////////////////////////////////////////////////////////
+
+#if !defined(BOOST_MOVE_USE_STANDARD_LIBRARY_MOVE)
+
+ //! <b>Effects</b>: Moves elements in the range [first,last) into the range [result,result + (last -
+ //! first)) starting from first and proceeding to last. For each non-negative integer n < (last-first),
+ //! performs *(result + n) = ::boost::move (*(first + n)).
+ //!
+ //! <b>Effects</b>: result + (last - first).
+ //!
+ //! <b>Requires</b>: result shall not be in the range [first,last).
+ //!
+ //! <b>Complexity</b>: Exactly last - first move assignments.
+ template <typename I, // I models InputIterator
+ typename O> // O models OutputIterator
+ O move(I f, I l, O result)
+ {
+ while (f != l) {
+ *result = ::boost::move(*f);
+ ++f; ++result;
+ }
+ return result;
+ }
+
+ //////////////////////////////////////////////////////////////////////////////
+ //
+ // move_backward
+ //
+ //////////////////////////////////////////////////////////////////////////////
+
+ //! <b>Effects</b>: Moves elements in the range [first,last) into the range
+ //! [result - (last-first),result) starting from last - 1 and proceeding to
+ //! first. For each positive integer n <= (last - first),
+ //! performs *(result - n) = ::boost::move(*(last - n)).
+ //!
+ //! <b>Requires</b>: result shall not be in the range [first,last).
+ //!
+ //! <b>Returns</b>: result - (last - first).
+ //!
+ //! <b>Complexity</b>: Exactly last - first assignments.
+ template <typename I, // I models BidirectionalIterator
+ typename O> // O models BidirectionalIterator
+ O move_backward(I f, I l, O result)
+ {
+ while (f != l) {
+ --l; --result;
+ *result = ::boost::move(*l);
+ }
+ return result;
+ }
+
+#else
+
+ using ::std::move_backward;
+
+#endif //!defined(BOOST_MOVE_USE_STANDARD_LIBRARY_MOVE)
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// uninitialized_move
+//
+//////////////////////////////////////////////////////////////////////////////
+
+//! <b>Effects</b>:
+//! \code
+//! for (; first != last; ++result, ++first)
+//! new (static_cast<void*>(&*result))
+//! typename iterator_traits<ForwardIterator>::value_type(boost::move(*first));
+//! \endcode
+//!
+//! <b>Returns</b>: result
+template
+ <typename I, // I models InputIterator
+ typename F> // F models ForwardIterator
+F uninitialized_move(I f, I l, F r
+ /// @cond
+// ,typename ::boost::move_detail::enable_if<has_move_emulation_enabled<typename boost::movelib::iterator_traits<I>::value_type> >::type* = 0
+ /// @endcond
+ )
+{
+ typedef typename boost::movelib::iterator_traits<I>::value_type input_value_type;
+
+ F back = r;
+ BOOST_TRY{
+ while (f != l) {
+ void * const addr = static_cast<void*>(::boost::move_detail::addressof(*r));
+ ::new(addr) input_value_type(::boost::move(*f));
+ ++f; ++r;
+ }
+ }
+ BOOST_CATCH(...){
+ for (; back != r; ++back){
+ boost::movelib::iterator_to_raw_pointer(back)->~input_value_type();
+ }
+ BOOST_RETHROW;
+ }
+ BOOST_CATCH_END
+ return r;
+}
+
+/// @cond
+/*
+template
+ <typename I, // I models InputIterator
+ typename F> // F models ForwardIterator
+F uninitialized_move(I f, I l, F r,
+ typename ::boost::move_detail::disable_if<has_move_emulation_enabled<typename boost::movelib::iterator_traits<I>::value_type> >::type* = 0)
+{
+ return std::uninitialized_copy(f, l, r);
+}
+*/
+
+/// @endcond
+
+} //namespace boost {
+
+#include <boost/move/detail/config_end.hpp>
+
+#endif //#ifndef BOOST_MOVE_ALGO_MOVE_HPP
diff --git a/include/boost/move/algo/predicate.hpp b/include/boost/move/algo/predicate.hpp
new file mode 100644
index 0000000..0287d66
--- /dev/null
+++ b/include/boost/move/algo/predicate.hpp
@@ -0,0 +1,86 @@
+//////////////////////////////////////////////////////////////////////////////
+//
+// (C) Copyright Ion Gaztanaga 2015-2016.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+//
+// See http://www.boost.org/libs/move for documentation.
+//
+//////////////////////////////////////////////////////////////////////////////
+#ifndef BOOST_MOVE_ALGO_PREDICATE_HPP
+#define BOOST_MOVE_ALGO_PREDICATE_HPP
+
+#include <boost/move/algo/move.hpp>
+#include <boost/move/adl_move_swap.hpp>
+#include <boost/move/algo/detail/basic_op.hpp>
+#include <boost/move/detail/iterator_traits.hpp>
+#include <boost/move/detail/destruct_n.hpp>
+#include <boost/assert.hpp>
+
+namespace boost {
+namespace movelib {
+
+template<class Comp>
+struct antistable
+{
+ explicit antistable(Comp &comp)
+ : m_comp(comp)
+ {}
+
+ template<class U, class V>
+ bool operator()(const U &u, const V & v)
+ { return !m_comp(v, u); }
+
+ private:
+ antistable & operator=(const antistable &);
+ Comp &m_comp;
+};
+
+template <class Comp>
+class negate
+{
+ public:
+ negate()
+ {}
+
+ explicit negate(Comp comp)
+ : m_comp(comp)
+ {}
+
+ template <class T1, class T2>
+ bool operator()(const T1& l, const T2& r)
+ {
+ return !m_comp(l, r);
+ }
+
+ private:
+ Comp m_comp;
+};
+
+
+template <class Comp>
+class inverse
+{
+ public:
+ inverse()
+ {}
+
+ explicit inverse(Comp comp)
+ : m_comp(comp)
+ {}
+
+ template <class T1, class T2>
+ bool operator()(const T1& l, const T2& r)
+ {
+ return m_comp(r, l);
+ }
+
+ private:
+ Comp m_comp;
+};
+
+} //namespace movelib {
+} //namespace boost {
+
+#endif //#define BOOST_MOVE_ALGO_PREDICATE_HPP
diff --git a/include/boost/move/algo/unique.hpp b/include/boost/move/algo/unique.hpp
new file mode 100644
index 0000000..8022a65
--- /dev/null
+++ b/include/boost/move/algo/unique.hpp
@@ -0,0 +1,55 @@
+//////////////////////////////////////////////////////////////////////////////
+//
+// (C) Copyright Ion Gaztanaga 2017-2017.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+//
+// See http://www.boost.org/libs/move for documentation.
+//
+//////////////////////////////////////////////////////////////////////////////
+
+#ifndef BOOST_MOVE_ALGO_UNIQUE_HPP
+#define BOOST_MOVE_ALGO_UNIQUE_HPP
+
+#include <boost/move/detail/config_begin.hpp>
+#include <boost/move/utility_core.hpp>
+
+namespace boost {
+namespace movelib {
+
+//! <b>Requires</b>: The comparison function shall be an equivalence relation. The type of *first shall satisfy
+//! the MoveAssignable requirements
+//!
+//! <b>Effects</b>: For a nonempty range, eliminates all but the first element from every consecutive group
+//! of equivalent elements referred to by the iterator i in the range [first + 1, last) for which the
+//! following conditions hold: pred(*(i - 1), *i) != false.
+//!
+//! <b>Returns</b>: The end of the resulting range.
+//!
+//! <b>Complexity</b>: For nonempty ranges, exactly (last - first) - 1 applications of the corresponding predicate.
+template<class ForwardIterator, class BinaryPredicate>
+ForwardIterator unique(ForwardIterator first, ForwardIterator last, BinaryPredicate pred)
+{
+ if (first != last) {
+ ForwardIterator next(first);
+ ++next;
+ for (; next != last; ++next, ++first) {
+ if (pred(*first, *next)) { //Find first equal element
+ while (++next != last)
+ if (!pred(*first, *next))
+ *++first = ::boost::move(*next);
+ break;
+ }
+ }
+ ++first;
+ }
+ return first;
+}
+
+} //namespace movelib {
+} //namespace boost {
+
+#include <boost/move/detail/config_end.hpp>
+
+#endif //#define BOOST_MOVE_ALGO_UNIQUE_HPP