Squashed 'third_party/eigen/' changes from 61d72f6..cf794d3
Change-Id: I9b814151b01f49af6337a8605d0c42a3a1ed4c72
git-subtree-dir: third_party/eigen
git-subtree-split: cf794d3b741a6278df169e58461f8529f43bce5d
diff --git a/bench/BenchTimer.h b/bench/BenchTimer.h
index 28e2bca..ea28496 100644
--- a/bench/BenchTimer.h
+++ b/bench/BenchTimer.h
@@ -22,12 +22,19 @@
# endif
# include <windows.h>
#elif defined(__APPLE__)
-#include <CoreServices/CoreServices.h>
#include <mach/mach_time.h>
#else
# include <unistd.h>
#endif
+static void escape(void *p) {
+ asm volatile("" : : "g"(p) : "memory");
+}
+
+static void clobber() {
+ asm volatile("" : : : "memory");
+}
+
#include <Eigen/Core>
namespace Eigen
@@ -168,6 +175,7 @@
CODE; \
} \
TIMER.stop(); \
+ clobber(); \
} \
}
diff --git a/bench/analyze-blocking-sizes.cpp b/bench/analyze-blocking-sizes.cpp
new file mode 100644
index 0000000..d563a1d
--- /dev/null
+++ b/bench/analyze-blocking-sizes.cpp
@@ -0,0 +1,876 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2015 Benoit Jacob <benoitjacob@google.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include <iostream>
+#include <cstdint>
+#include <cstdlib>
+#include <vector>
+#include <algorithm>
+#include <fstream>
+#include <string>
+#include <cmath>
+#include <cassert>
+#include <cstring>
+#include <memory>
+
+#include <Eigen/Core>
+
+using namespace std;
+
+const int default_precision = 4;
+
+// see --only-cubic-sizes
+bool only_cubic_sizes = false;
+
+// see --dump-tables
+bool dump_tables = false;
+
+uint8_t log2_pot(size_t x) {
+ size_t l = 0;
+ while (x >>= 1) l++;
+ return l;
+}
+
+uint16_t compact_size_triple(size_t k, size_t m, size_t n)
+{
+ return (log2_pot(k) << 8) | (log2_pot(m) << 4) | log2_pot(n);
+}
+
+// just a helper to store a triple of K,M,N sizes for matrix product
+struct size_triple_t
+{
+ uint16_t k, m, n;
+ size_triple_t() : k(0), m(0), n(0) {}
+ size_triple_t(size_t _k, size_t _m, size_t _n) : k(_k), m(_m), n(_n) {}
+ size_triple_t(const size_triple_t& o) : k(o.k), m(o.m), n(o.n) {}
+ size_triple_t(uint16_t compact)
+ {
+ k = 1 << ((compact & 0xf00) >> 8);
+ m = 1 << ((compact & 0x0f0) >> 4);
+ n = 1 << ((compact & 0x00f) >> 0);
+ }
+ bool is_cubic() const { return k == m && m == n; }
+};
+
+ostream& operator<<(ostream& s, const size_triple_t& t)
+{
+ return s << "(" << t.k << ", " << t.m << ", " << t.n << ")";
+}
+
+struct inputfile_entry_t
+{
+ uint16_t product_size;
+ uint16_t pot_block_size;
+ size_triple_t nonpot_block_size;
+ float gflops;
+};
+
+struct inputfile_t
+{
+ enum class type_t {
+ unknown,
+ all_pot_sizes,
+ default_sizes
+ };
+
+ string filename;
+ vector<inputfile_entry_t> entries;
+ type_t type;
+
+ inputfile_t(const string& fname)
+ : filename(fname)
+ , type(type_t::unknown)
+ {
+ ifstream stream(filename);
+ if (!stream.is_open()) {
+ cerr << "couldn't open input file: " << filename << endl;
+ exit(1);
+ }
+ string line;
+ while (getline(stream, line)) {
+ if (line.empty()) continue;
+ if (line.find("BEGIN MEASUREMENTS ALL POT SIZES") == 0) {
+ if (type != type_t::unknown) {
+ cerr << "Input file " << filename << " contains redundant BEGIN MEASUREMENTS lines";
+ exit(1);
+ }
+ type = type_t::all_pot_sizes;
+ continue;
+ }
+ if (line.find("BEGIN MEASUREMENTS DEFAULT SIZES") == 0) {
+ if (type != type_t::unknown) {
+ cerr << "Input file " << filename << " contains redundant BEGIN MEASUREMENTS lines";
+ exit(1);
+ }
+ type = type_t::default_sizes;
+ continue;
+ }
+
+
+ if (type == type_t::unknown) {
+ continue;
+ }
+ switch(type) {
+ case type_t::all_pot_sizes: {
+ unsigned int product_size, block_size;
+ float gflops;
+ int sscanf_result =
+ sscanf(line.c_str(), "%x %x %f",
+ &product_size,
+ &block_size,
+ &gflops);
+ if (3 != sscanf_result ||
+ !product_size ||
+ product_size > 0xfff ||
+ !block_size ||
+ block_size > 0xfff ||
+ !isfinite(gflops))
+ {
+ cerr << "ill-formed input file: " << filename << endl;
+ cerr << "offending line:" << endl << line << endl;
+ exit(1);
+ }
+ if (only_cubic_sizes && !size_triple_t(product_size).is_cubic()) {
+ continue;
+ }
+ inputfile_entry_t entry;
+ entry.product_size = uint16_t(product_size);
+ entry.pot_block_size = uint16_t(block_size);
+ entry.gflops = gflops;
+ entries.push_back(entry);
+ break;
+ }
+ case type_t::default_sizes: {
+ unsigned int product_size;
+ float gflops;
+ int bk, bm, bn;
+ int sscanf_result =
+ sscanf(line.c_str(), "%x default(%d, %d, %d) %f",
+ &product_size,
+ &bk, &bm, &bn,
+ &gflops);
+ if (5 != sscanf_result ||
+ !product_size ||
+ product_size > 0xfff ||
+ !isfinite(gflops))
+ {
+ cerr << "ill-formed input file: " << filename << endl;
+ cerr << "offending line:" << endl << line << endl;
+ exit(1);
+ }
+ if (only_cubic_sizes && !size_triple_t(product_size).is_cubic()) {
+ continue;
+ }
+ inputfile_entry_t entry;
+ entry.product_size = uint16_t(product_size);
+ entry.pot_block_size = 0;
+ entry.nonpot_block_size = size_triple_t(bk, bm, bn);
+ entry.gflops = gflops;
+ entries.push_back(entry);
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+ stream.close();
+ if (type == type_t::unknown) {
+ cerr << "Unrecognized input file " << filename << endl;
+ exit(1);
+ }
+ if (entries.empty()) {
+ cerr << "didn't find any measurements in input file: " << filename << endl;
+ exit(1);
+ }
+ }
+};
+
+struct preprocessed_inputfile_entry_t
+{
+ uint16_t product_size;
+ uint16_t block_size;
+
+ float efficiency;
+};
+
+bool lower_efficiency(const preprocessed_inputfile_entry_t& e1, const preprocessed_inputfile_entry_t& e2)
+{
+ return e1.efficiency < e2.efficiency;
+}
+
+struct preprocessed_inputfile_t
+{
+ string filename;
+ vector<preprocessed_inputfile_entry_t> entries;
+
+ preprocessed_inputfile_t(const inputfile_t& inputfile)
+ : filename(inputfile.filename)
+ {
+ if (inputfile.type != inputfile_t::type_t::all_pot_sizes) {
+ abort();
+ }
+ auto it = inputfile.entries.begin();
+ auto it_first_with_given_product_size = it;
+ while (it != inputfile.entries.end()) {
+ ++it;
+ if (it == inputfile.entries.end() ||
+ it->product_size != it_first_with_given_product_size->product_size)
+ {
+ import_input_file_range_one_product_size(it_first_with_given_product_size, it);
+ it_first_with_given_product_size = it;
+ }
+ }
+ }
+
+private:
+ void import_input_file_range_one_product_size(
+ const vector<inputfile_entry_t>::const_iterator& begin,
+ const vector<inputfile_entry_t>::const_iterator& end)
+ {
+ uint16_t product_size = begin->product_size;
+ float max_gflops = 0.0f;
+ for (auto it = begin; it != end; ++it) {
+ if (it->product_size != product_size) {
+ cerr << "Unexpected ordering of entries in " << filename << endl;
+ cerr << "(Expected all entries for product size " << hex << product_size << dec << " to be grouped)" << endl;
+ exit(1);
+ }
+ max_gflops = max(max_gflops, it->gflops);
+ }
+ for (auto it = begin; it != end; ++it) {
+ preprocessed_inputfile_entry_t entry;
+ entry.product_size = it->product_size;
+ entry.block_size = it->pot_block_size;
+ entry.efficiency = it->gflops / max_gflops;
+ entries.push_back(entry);
+ }
+ }
+};
+
+void check_all_files_in_same_exact_order(
+ const vector<preprocessed_inputfile_t>& preprocessed_inputfiles)
+{
+ if (preprocessed_inputfiles.empty()) {
+ return;
+ }
+
+ const preprocessed_inputfile_t& first_file = preprocessed_inputfiles[0];
+ const size_t num_entries = first_file.entries.size();
+
+ for (size_t i = 0; i < preprocessed_inputfiles.size(); i++) {
+ if (preprocessed_inputfiles[i].entries.size() != num_entries) {
+ cerr << "these files have different number of entries: "
+ << preprocessed_inputfiles[i].filename
+ << " and "
+ << first_file.filename
+ << endl;
+ exit(1);
+ }
+ }
+
+ for (size_t entry_index = 0; entry_index < num_entries; entry_index++) {
+ const uint16_t entry_product_size = first_file.entries[entry_index].product_size;
+ const uint16_t entry_block_size = first_file.entries[entry_index].block_size;
+ for (size_t file_index = 0; file_index < preprocessed_inputfiles.size(); file_index++) {
+ const preprocessed_inputfile_t& cur_file = preprocessed_inputfiles[file_index];
+ if (cur_file.entries[entry_index].product_size != entry_product_size ||
+ cur_file.entries[entry_index].block_size != entry_block_size)
+ {
+ cerr << "entries not in same order between these files: "
+ << first_file.filename
+ << " and "
+ << cur_file.filename
+ << endl;
+ exit(1);
+ }
+ }
+ }
+}
+
+float efficiency_of_subset(
+ const vector<preprocessed_inputfile_t>& preprocessed_inputfiles,
+ const vector<size_t>& subset)
+{
+ if (subset.size() <= 1) {
+ return 1.0f;
+ }
+ const preprocessed_inputfile_t& first_file = preprocessed_inputfiles[subset[0]];
+ const size_t num_entries = first_file.entries.size();
+ float efficiency = 1.0f;
+ size_t entry_index = 0;
+ size_t first_entry_index_with_this_product_size = 0;
+ uint16_t product_size = first_file.entries[0].product_size;
+ while (entry_index < num_entries) {
+ ++entry_index;
+ if (entry_index == num_entries ||
+ first_file.entries[entry_index].product_size != product_size)
+ {
+ float efficiency_this_product_size = 0.0f;
+ for (size_t e = first_entry_index_with_this_product_size; e < entry_index; e++) {
+ float efficiency_this_entry = 1.0f;
+ for (auto i = subset.begin(); i != subset.end(); ++i) {
+ efficiency_this_entry = min(efficiency_this_entry, preprocessed_inputfiles[*i].entries[e].efficiency);
+ }
+ efficiency_this_product_size = max(efficiency_this_product_size, efficiency_this_entry);
+ }
+ efficiency = min(efficiency, efficiency_this_product_size);
+ if (entry_index < num_entries) {
+ first_entry_index_with_this_product_size = entry_index;
+ product_size = first_file.entries[entry_index].product_size;
+ }
+ }
+ }
+
+ return efficiency;
+}
+
+void dump_table_for_subset(
+ const vector<preprocessed_inputfile_t>& preprocessed_inputfiles,
+ const vector<size_t>& subset)
+{
+ const preprocessed_inputfile_t& first_file = preprocessed_inputfiles[subset[0]];
+ const size_t num_entries = first_file.entries.size();
+ size_t entry_index = 0;
+ size_t first_entry_index_with_this_product_size = 0;
+ uint16_t product_size = first_file.entries[0].product_size;
+ size_t i = 0;
+ size_triple_t min_product_size(first_file.entries.front().product_size);
+ size_triple_t max_product_size(first_file.entries.back().product_size);
+ if (!min_product_size.is_cubic() || !max_product_size.is_cubic()) {
+ abort();
+ }
+ if (only_cubic_sizes) {
+ cerr << "Can't generate tables with --only-cubic-sizes." << endl;
+ abort();
+ }
+ cout << "struct LookupTable {" << endl;
+ cout << " static const size_t BaseSize = " << min_product_size.k << ";" << endl;
+ const size_t NumSizes = log2_pot(max_product_size.k / min_product_size.k) + 1;
+ const size_t TableSize = NumSizes * NumSizes * NumSizes;
+ cout << " static const size_t NumSizes = " << NumSizes << ";" << endl;
+ cout << " static const unsigned short* Data() {" << endl;
+ cout << " static const unsigned short data[" << TableSize << "] = {";
+ while (entry_index < num_entries) {
+ ++entry_index;
+ if (entry_index == num_entries ||
+ first_file.entries[entry_index].product_size != product_size)
+ {
+ float best_efficiency_this_product_size = 0.0f;
+ uint16_t best_block_size_this_product_size = 0;
+ for (size_t e = first_entry_index_with_this_product_size; e < entry_index; e++) {
+ float efficiency_this_entry = 1.0f;
+ for (auto i = subset.begin(); i != subset.end(); ++i) {
+ efficiency_this_entry = min(efficiency_this_entry, preprocessed_inputfiles[*i].entries[e].efficiency);
+ }
+ if (efficiency_this_entry > best_efficiency_this_product_size) {
+ best_efficiency_this_product_size = efficiency_this_entry;
+ best_block_size_this_product_size = first_file.entries[e].block_size;
+ }
+ }
+ if ((i++) % NumSizes) {
+ cout << " ";
+ } else {
+ cout << endl << " ";
+ }
+ cout << "0x" << hex << best_block_size_this_product_size << dec;
+ if (entry_index < num_entries) {
+ cout << ",";
+ first_entry_index_with_this_product_size = entry_index;
+ product_size = first_file.entries[entry_index].product_size;
+ }
+ }
+ }
+ if (i != TableSize) {
+ cerr << endl << "Wrote " << i << " table entries, expected " << TableSize << endl;
+ abort();
+ }
+ cout << endl << " };" << endl;
+ cout << " return data;" << endl;
+ cout << " }" << endl;
+ cout << "};" << endl;
+}
+
+float efficiency_of_partition(
+ const vector<preprocessed_inputfile_t>& preprocessed_inputfiles,
+ const vector<vector<size_t>>& partition)
+{
+ float efficiency = 1.0f;
+ for (auto s = partition.begin(); s != partition.end(); ++s) {
+ efficiency = min(efficiency, efficiency_of_subset(preprocessed_inputfiles, *s));
+ }
+ return efficiency;
+}
+
+void make_first_subset(size_t subset_size, vector<size_t>& out_subset, size_t set_size)
+{
+ assert(subset_size >= 1 && subset_size <= set_size);
+ out_subset.resize(subset_size);
+ for (size_t i = 0; i < subset_size; i++) {
+ out_subset[i] = i;
+ }
+}
+
+bool is_last_subset(const vector<size_t>& subset, size_t set_size)
+{
+ return subset[0] == set_size - subset.size();
+}
+
+void next_subset(vector<size_t>& inout_subset, size_t set_size)
+{
+ if (is_last_subset(inout_subset, set_size)) {
+ cerr << "iterating past the last subset" << endl;
+ abort();
+ }
+ size_t i = 1;
+ while (inout_subset[inout_subset.size() - i] == set_size - i) {
+ i++;
+ assert(i <= inout_subset.size());
+ }
+ size_t first_index_to_change = inout_subset.size() - i;
+ inout_subset[first_index_to_change]++;
+ size_t p = inout_subset[first_index_to_change];
+ for (size_t j = first_index_to_change + 1; j < inout_subset.size(); j++) {
+ inout_subset[j] = ++p;
+ }
+}
+
+const size_t number_of_subsets_limit = 100;
+const size_t always_search_subsets_of_size_at_least = 2;
+
+bool is_number_of_subsets_feasible(size_t n, size_t p)
+{
+ assert(n>0 && p>0 && p<=n);
+ uint64_t numerator = 1, denominator = 1;
+ for (size_t i = 0; i < p; i++) {
+ numerator *= n - i;
+ denominator *= i + 1;
+ if (numerator > denominator * number_of_subsets_limit) {
+ return false;
+ }
+ }
+ return true;
+}
+
+size_t max_feasible_subset_size(size_t n)
+{
+ assert(n > 0);
+ const size_t minresult = min<size_t>(n-1, always_search_subsets_of_size_at_least);
+ for (size_t p = 1; p <= n - 1; p++) {
+ if (!is_number_of_subsets_feasible(n, p+1)) {
+ return max(p, minresult);
+ }
+ }
+ return n - 1;
+}
+
+void find_subset_with_efficiency_higher_than(
+ const vector<preprocessed_inputfile_t>& preprocessed_inputfiles,
+ float required_efficiency_to_beat,
+ vector<size_t>& inout_remainder,
+ vector<size_t>& out_subset)
+{
+ out_subset.resize(0);
+
+ if (required_efficiency_to_beat >= 1.0f) {
+ cerr << "can't beat efficiency 1." << endl;
+ abort();
+ }
+
+ while (!inout_remainder.empty()) {
+
+ vector<size_t> candidate_indices(inout_remainder.size());
+ for (size_t i = 0; i < candidate_indices.size(); i++) {
+ candidate_indices[i] = i;
+ }
+
+ size_t candidate_indices_subset_size = max_feasible_subset_size(candidate_indices.size());
+ while (candidate_indices_subset_size >= 1) {
+ vector<size_t> candidate_indices_subset;
+ make_first_subset(candidate_indices_subset_size,
+ candidate_indices_subset,
+ candidate_indices.size());
+
+ vector<size_t> best_candidate_indices_subset;
+ float best_efficiency = 0.0f;
+ vector<size_t> trial_subset = out_subset;
+ trial_subset.resize(out_subset.size() + candidate_indices_subset_size);
+ while (true)
+ {
+ for (size_t i = 0; i < candidate_indices_subset_size; i++) {
+ trial_subset[out_subset.size() + i] = inout_remainder[candidate_indices_subset[i]];
+ }
+
+ float trial_efficiency = efficiency_of_subset(preprocessed_inputfiles, trial_subset);
+ if (trial_efficiency > best_efficiency) {
+ best_efficiency = trial_efficiency;
+ best_candidate_indices_subset = candidate_indices_subset;
+ }
+ if (is_last_subset(candidate_indices_subset, candidate_indices.size())) {
+ break;
+ }
+ next_subset(candidate_indices_subset, candidate_indices.size());
+ }
+
+ if (best_efficiency > required_efficiency_to_beat) {
+ for (size_t i = 0; i < best_candidate_indices_subset.size(); i++) {
+ candidate_indices[i] = candidate_indices[best_candidate_indices_subset[i]];
+ }
+ candidate_indices.resize(best_candidate_indices_subset.size());
+ }
+ candidate_indices_subset_size--;
+ }
+
+ size_t candidate_index = candidate_indices[0];
+ auto candidate_iterator = inout_remainder.begin() + candidate_index;
+ vector<size_t> trial_subset = out_subset;
+
+ trial_subset.push_back(*candidate_iterator);
+ float trial_efficiency = efficiency_of_subset(preprocessed_inputfiles, trial_subset);
+ if (trial_efficiency > required_efficiency_to_beat) {
+ out_subset.push_back(*candidate_iterator);
+ inout_remainder.erase(candidate_iterator);
+ } else {
+ break;
+ }
+ }
+}
+
+void find_partition_with_efficiency_higher_than(
+ const vector<preprocessed_inputfile_t>& preprocessed_inputfiles,
+ float required_efficiency_to_beat,
+ vector<vector<size_t>>& out_partition)
+{
+ out_partition.resize(0);
+
+ vector<size_t> remainder;
+ for (size_t i = 0; i < preprocessed_inputfiles.size(); i++) {
+ remainder.push_back(i);
+ }
+
+ while (!remainder.empty()) {
+ vector<size_t> new_subset;
+ find_subset_with_efficiency_higher_than(
+ preprocessed_inputfiles,
+ required_efficiency_to_beat,
+ remainder,
+ new_subset);
+ out_partition.push_back(new_subset);
+ }
+}
+
+void print_partition(
+ const vector<preprocessed_inputfile_t>& preprocessed_inputfiles,
+ const vector<vector<size_t>>& partition)
+{
+ float efficiency = efficiency_of_partition(preprocessed_inputfiles, partition);
+ cout << "Partition into " << partition.size() << " subsets for " << efficiency * 100.0f << "% efficiency" << endl;
+ for (auto subset = partition.begin(); subset != partition.end(); ++subset) {
+ cout << " Subset " << (subset - partition.begin())
+ << ", efficiency " << efficiency_of_subset(preprocessed_inputfiles, *subset) * 100.0f << "%:"
+ << endl;
+ for (auto file = subset->begin(); file != subset->end(); ++file) {
+ cout << " " << preprocessed_inputfiles[*file].filename << endl;
+ }
+ if (dump_tables) {
+ cout << " Table:" << endl;
+ dump_table_for_subset(preprocessed_inputfiles, *subset);
+ }
+ }
+ cout << endl;
+}
+
+struct action_t
+{
+ virtual const char* invokation_name() const { abort(); return nullptr; }
+ virtual void run(const vector<string>&) const { abort(); }
+ virtual ~action_t() {}
+};
+
+struct partition_action_t : action_t
+{
+ virtual const char* invokation_name() const override { return "partition"; }
+ virtual void run(const vector<string>& input_filenames) const override
+ {
+ vector<preprocessed_inputfile_t> preprocessed_inputfiles;
+
+ if (input_filenames.empty()) {
+ cerr << "The " << invokation_name() << " action needs a list of input files." << endl;
+ exit(1);
+ }
+
+ for (auto it = input_filenames.begin(); it != input_filenames.end(); ++it) {
+ inputfile_t inputfile(*it);
+ switch (inputfile.type) {
+ case inputfile_t::type_t::all_pot_sizes:
+ preprocessed_inputfiles.emplace_back(inputfile);
+ break;
+ case inputfile_t::type_t::default_sizes:
+ cerr << "The " << invokation_name() << " action only uses measurements for all pot sizes, and "
+ << "has no use for " << *it << " which contains measurements for default sizes." << endl;
+ exit(1);
+ break;
+ default:
+ cerr << "Unrecognized input file: " << *it << endl;
+ exit(1);
+ }
+ }
+
+ check_all_files_in_same_exact_order(preprocessed_inputfiles);
+
+ float required_efficiency_to_beat = 0.0f;
+ vector<vector<vector<size_t>>> partitions;
+ cerr << "searching for partitions...\r" << flush;
+ while (true)
+ {
+ vector<vector<size_t>> partition;
+ find_partition_with_efficiency_higher_than(
+ preprocessed_inputfiles,
+ required_efficiency_to_beat,
+ partition);
+ float actual_efficiency = efficiency_of_partition(preprocessed_inputfiles, partition);
+ cerr << "partition " << preprocessed_inputfiles.size() << " files into " << partition.size()
+ << " subsets for " << 100.0f * actual_efficiency
+ << " % efficiency"
+ << " \r" << flush;
+ partitions.push_back(partition);
+ if (partition.size() == preprocessed_inputfiles.size() || actual_efficiency == 1.0f) {
+ break;
+ }
+ required_efficiency_to_beat = actual_efficiency;
+ }
+ cerr << " " << endl;
+ while (true) {
+ bool repeat = false;
+ for (size_t i = 0; i < partitions.size() - 1; i++) {
+ if (partitions[i].size() >= partitions[i+1].size()) {
+ partitions.erase(partitions.begin() + i);
+ repeat = true;
+ break;
+ }
+ }
+ if (!repeat) {
+ break;
+ }
+ }
+ for (auto it = partitions.begin(); it != partitions.end(); ++it) {
+ print_partition(preprocessed_inputfiles, *it);
+ }
+ }
+};
+
+struct evaluate_defaults_action_t : action_t
+{
+ struct results_entry_t {
+ uint16_t product_size;
+ size_triple_t default_block_size;
+ uint16_t best_pot_block_size;
+ float default_gflops;
+ float best_pot_gflops;
+ float default_efficiency;
+ };
+ friend ostream& operator<<(ostream& s, const results_entry_t& entry)
+ {
+ return s
+ << "Product size " << size_triple_t(entry.product_size)
+ << ": default block size " << entry.default_block_size
+ << " -> " << entry.default_gflops
+ << " GFlop/s = " << entry.default_efficiency * 100.0f << " %"
+ << " of best POT block size " << size_triple_t(entry.best_pot_block_size)
+ << " -> " << entry.best_pot_gflops
+ << " GFlop/s" << dec;
+ }
+ static bool lower_efficiency(const results_entry_t& e1, const results_entry_t& e2) {
+ return e1.default_efficiency < e2.default_efficiency;
+ }
+ virtual const char* invokation_name() const override { return "evaluate-defaults"; }
+ void show_usage_and_exit() const
+ {
+ cerr << "usage: " << invokation_name() << " default-sizes-data all-pot-sizes-data" << endl;
+ cerr << "checks how well the performance with default sizes compares to the best "
+ << "performance measured over all POT sizes." << endl;
+ exit(1);
+ }
+ virtual void run(const vector<string>& input_filenames) const override
+ {
+ if (input_filenames.size() != 2) {
+ show_usage_and_exit();
+ }
+ inputfile_t inputfile_default_sizes(input_filenames[0]);
+ inputfile_t inputfile_all_pot_sizes(input_filenames[1]);
+ if (inputfile_default_sizes.type != inputfile_t::type_t::default_sizes) {
+ cerr << inputfile_default_sizes.filename << " is not an input file with default sizes." << endl;
+ show_usage_and_exit();
+ }
+ if (inputfile_all_pot_sizes.type != inputfile_t::type_t::all_pot_sizes) {
+ cerr << inputfile_all_pot_sizes.filename << " is not an input file with all POT sizes." << endl;
+ show_usage_and_exit();
+ }
+ vector<results_entry_t> results;
+ vector<results_entry_t> cubic_results;
+
+ uint16_t product_size = 0;
+ auto it_all_pot_sizes = inputfile_all_pot_sizes.entries.begin();
+ for (auto it_default_sizes = inputfile_default_sizes.entries.begin();
+ it_default_sizes != inputfile_default_sizes.entries.end();
+ ++it_default_sizes)
+ {
+ if (it_default_sizes->product_size == product_size) {
+ continue;
+ }
+ product_size = it_default_sizes->product_size;
+ while (it_all_pot_sizes != inputfile_all_pot_sizes.entries.end() &&
+ it_all_pot_sizes->product_size != product_size)
+ {
+ ++it_all_pot_sizes;
+ }
+ if (it_all_pot_sizes == inputfile_all_pot_sizes.entries.end()) {
+ break;
+ }
+ uint16_t best_pot_block_size = 0;
+ float best_pot_gflops = 0;
+ for (auto it = it_all_pot_sizes;
+ it != inputfile_all_pot_sizes.entries.end() && it->product_size == product_size;
+ ++it)
+ {
+ if (it->gflops > best_pot_gflops) {
+ best_pot_gflops = it->gflops;
+ best_pot_block_size = it->pot_block_size;
+ }
+ }
+ results_entry_t entry;
+ entry.product_size = product_size;
+ entry.default_block_size = it_default_sizes->nonpot_block_size;
+ entry.best_pot_block_size = best_pot_block_size;
+ entry.default_gflops = it_default_sizes->gflops;
+ entry.best_pot_gflops = best_pot_gflops;
+ entry.default_efficiency = entry.default_gflops / entry.best_pot_gflops;
+ results.push_back(entry);
+
+ size_triple_t t(product_size);
+ if (t.k == t.m && t.m == t.n) {
+ cubic_results.push_back(entry);
+ }
+ }
+
+ cout << "All results:" << endl;
+ for (auto it = results.begin(); it != results.end(); ++it) {
+ cout << *it << endl;
+ }
+ cout << endl;
+
+ sort(results.begin(), results.end(), lower_efficiency);
+
+ const size_t n = min<size_t>(20, results.size());
+ cout << n << " worst results:" << endl;
+ for (size_t i = 0; i < n; i++) {
+ cout << results[i] << endl;
+ }
+ cout << endl;
+
+ cout << "cubic results:" << endl;
+ for (auto it = cubic_results.begin(); it != cubic_results.end(); ++it) {
+ cout << *it << endl;
+ }
+ cout << endl;
+
+ sort(cubic_results.begin(), cubic_results.end(), lower_efficiency);
+
+ cout.precision(2);
+ vector<float> a = {0.5f, 0.20f, 0.10f, 0.05f, 0.02f, 0.01f};
+ for (auto it = a.begin(); it != a.end(); ++it) {
+ size_t n = min(results.size() - 1, size_t(*it * results.size()));
+ cout << (100.0f * n / (results.size() - 1))
+ << " % of product sizes have default efficiency <= "
+ << 100.0f * results[n].default_efficiency << " %" << endl;
+ }
+ cout.precision(default_precision);
+ }
+};
+
+
+void show_usage_and_exit(int argc, char* argv[],
+ const vector<unique_ptr<action_t>>& available_actions)
+{
+ cerr << "usage: " << argv[0] << " <action> [options...] <input files...>" << endl;
+ cerr << "available actions:" << endl;
+ for (auto it = available_actions.begin(); it != available_actions.end(); ++it) {
+ cerr << " " << (*it)->invokation_name() << endl;
+ }
+ cerr << "the input files should each contain an output of benchmark-blocking-sizes" << endl;
+ exit(1);
+}
+
+int main(int argc, char* argv[])
+{
+ cout.precision(default_precision);
+ cerr.precision(default_precision);
+
+ vector<unique_ptr<action_t>> available_actions;
+ available_actions.emplace_back(new partition_action_t);
+ available_actions.emplace_back(new evaluate_defaults_action_t);
+
+ vector<string> input_filenames;
+
+ action_t* action = nullptr;
+
+ if (argc < 2) {
+ show_usage_and_exit(argc, argv, available_actions);
+ }
+ for (int i = 1; i < argc; i++) {
+ bool arg_handled = false;
+ // Step 1. Try to match action invokation names.
+ for (auto it = available_actions.begin(); it != available_actions.end(); ++it) {
+ if (!strcmp(argv[i], (*it)->invokation_name())) {
+ if (!action) {
+ action = it->get();
+ arg_handled = true;
+ break;
+ } else {
+ cerr << "can't specify more than one action!" << endl;
+ show_usage_and_exit(argc, argv, available_actions);
+ }
+ }
+ }
+ if (arg_handled) {
+ continue;
+ }
+ // Step 2. Try to match option names.
+ if (argv[i][0] == '-') {
+ if (!strcmp(argv[i], "--only-cubic-sizes")) {
+ only_cubic_sizes = true;
+ arg_handled = true;
+ }
+ if (!strcmp(argv[i], "--dump-tables")) {
+ dump_tables = true;
+ arg_handled = true;
+ }
+ if (!arg_handled) {
+ cerr << "Unrecognized option: " << argv[i] << endl;
+ show_usage_and_exit(argc, argv, available_actions);
+ }
+ }
+ if (arg_handled) {
+ continue;
+ }
+ // Step 3. Default to interpreting args as input filenames.
+ input_filenames.emplace_back(argv[i]);
+ }
+
+ if (dump_tables && only_cubic_sizes) {
+ cerr << "Incompatible options: --only-cubic-sizes and --dump-tables." << endl;
+ show_usage_and_exit(argc, argv, available_actions);
+ }
+
+ if (!action) {
+ show_usage_and_exit(argc, argv, available_actions);
+ }
+
+ action->run(input_filenames);
+}
diff --git a/bench/benchCholesky.cpp b/bench/benchCholesky.cpp
index 42b3e12..9a8e7cf 100644
--- a/bench/benchCholesky.cpp
+++ b/bench/benchCholesky.cpp
@@ -31,7 +31,7 @@
int rows = m.rows();
int cols = m.cols();
- int cost = 0;
+ double cost = 0;
for (int j=0; j<rows; ++j)
{
int r = std::max(rows - j -1,0);
@@ -78,10 +78,10 @@
else
std::cout << "fixed ";
std::cout << covMat.rows() << " \t"
- << (timerNoSqrt.value() * REPEAT) / repeats << "s "
- << "(" << 1e-6 * cost*repeats/timerNoSqrt.value() << " MFLOPS)\t"
- << (timerSqrt.value() * REPEAT) / repeats << "s "
- << "(" << 1e-6 * cost*repeats/timerSqrt.value() << " MFLOPS)\n";
+ << (timerNoSqrt.best()) / repeats << "s "
+ << "(" << 1e-9 * cost*repeats/timerNoSqrt.best() << " GFLOPS)\t"
+ << (timerSqrt.best()) / repeats << "s "
+ << "(" << 1e-9 * cost*repeats/timerSqrt.best() << " GFLOPS)\n";
#ifdef BENCH_GSL
@@ -119,13 +119,13 @@
int main(int argc, char* argv[])
{
- const int dynsizes[] = {4,6,8,16,24,32,49,64,128,256,512,900,0};
- std::cout << "size no sqrt standard";
+ const int dynsizes[] = {4,6,8,16,24,32,49,64,128,256,512,900,1500,0};
+ std::cout << "size LDLT LLT";
// #ifdef BENCH_GSL
// std::cout << " GSL (standard + double + ATLAS) ";
// #endif
std::cout << "\n";
- for (uint i=0; dynsizes[i]>0; ++i)
+ for (int i=0; dynsizes[i]>0; ++i)
benchLLT(Matrix<Scalar,Dynamic,Dynamic>(dynsizes[i],dynsizes[i]));
benchLLT(Matrix<Scalar,2,2>());
diff --git a/bench/bench_gemm.cpp b/bench/bench_gemm.cpp
index 41ca8b3..8528c55 100644
--- a/bench/bench_gemm.cpp
+++ b/bench/bench_gemm.cpp
@@ -2,6 +2,14 @@
// g++-4.4 bench_gemm.cpp -I .. -O2 -DNDEBUG -lrt -fopenmp && OMP_NUM_THREADS=2 ./a.out
// icpc bench_gemm.cpp -I .. -O3 -DNDEBUG -lrt -openmp && OMP_NUM_THREADS=2 ./a.out
+// Compilation options:
+//
+// -DSCALAR=std::complex<double>
+// -DSCALARA=double or -DSCALARB=double
+// -DHAVE_BLAS
+// -DDECOUPLED
+//
+
#include <iostream>
#include <Eigen/Core>
#include <bench/BenchTimer.h>
@@ -14,10 +22,18 @@
#define SCALAR float
#endif
+#ifndef SCALARA
+#define SCALARA SCALAR
+#endif
+
+#ifndef SCALARB
+#define SCALARB SCALAR
+#endif
+
typedef SCALAR Scalar;
typedef NumTraits<Scalar>::Real RealScalar;
-typedef Matrix<RealScalar,Dynamic,Dynamic> A;
-typedef Matrix</*Real*/Scalar,Dynamic,Dynamic> B;
+typedef Matrix<SCALARA,Dynamic,Dynamic> A;
+typedef Matrix<SCALARB,Dynamic,Dynamic> B;
typedef Matrix<Scalar,Dynamic,Dynamic> C;
typedef Matrix<RealScalar,Dynamic,Dynamic> M;
@@ -129,35 +145,69 @@
int tries = 2; // number of tries, we keep the best
int s = 2048;
- int cache_size = -1;
+ int m = s;
+ int n = s;
+ int p = s;
+ int cache_size1=-1, cache_size2=l2, cache_size3 = 0;
bool need_help = false;
- for (int i=1; i<argc; ++i)
+ for (int i=1; i<argc;)
{
- if(argv[i][0]=='s')
- s = atoi(argv[i]+1);
- else if(argv[i][0]=='c')
- cache_size = atoi(argv[i]+1);
- else if(argv[i][0]=='t')
- tries = atoi(argv[i]+1);
- else if(argv[i][0]=='p')
- rep = atoi(argv[i]+1);
+ if(argv[i][0]=='-')
+ {
+ if(argv[i][1]=='s')
+ {
+ ++i;
+ s = atoi(argv[i++]);
+ m = n = p = s;
+ if(argv[i][0]!='-')
+ {
+ n = atoi(argv[i++]);
+ p = atoi(argv[i++]);
+ }
+ }
+ else if(argv[i][1]=='c')
+ {
+ ++i;
+ cache_size1 = atoi(argv[i++]);
+ if(argv[i][0]!='-')
+ {
+ cache_size2 = atoi(argv[i++]);
+ if(argv[i][0]!='-')
+ cache_size3 = atoi(argv[i++]);
+ }
+ }
+ else if(argv[i][1]=='t')
+ {
+ ++i;
+ tries = atoi(argv[i++]);
+ }
+ else if(argv[i][1]=='p')
+ {
+ ++i;
+ rep = atoi(argv[i++]);
+ }
+ }
else
+ {
need_help = true;
+ break;
+ }
}
if(need_help)
{
- std::cout << argv[0] << " s<matrix size> c<cache size> t<nb tries> p<nb repeats>\n";
+ std::cout << argv[0] << " -s <matrix sizes> -c <cache sizes> -t <nb tries> -p <nb repeats>\n";
+ std::cout << " <matrix sizes> : size\n";
+ std::cout << " <matrix sizes> : rows columns depth\n";
return 1;
}
- if(cache_size>0)
- setCpuCacheSizes(cache_size,96*cache_size);
-
- int m = s;
- int n = s;
- int p = s;
+#if EIGEN_VERSION_AT_LEAST(3,2,90)
+ if(cache_size1>0)
+ setCpuCacheSizes(cache_size1,cache_size2,cache_size3);
+#endif
+
A a(m,p); a.setRandom();
B b(p,n); b.setRandom();
C c(m,n); c.setOnes();
@@ -172,6 +222,7 @@
// check the parallel product is correct
#if defined EIGEN_HAS_OPENMP
+ Eigen::initParallel();
int procs = omp_get_max_threads();
if(procs>1)
{
@@ -188,11 +239,20 @@
#elif defined HAVE_BLAS
blas_gemm(a,b,r);
c.noalias() += a * b;
- if(!r.isApprox(c)) std::cerr << "Warning, your product is crap!\n\n";
+ if(!r.isApprox(c)) {
+ std::cout << r - c << "\n";
+ std::cerr << "Warning, your product is crap!\n\n";
+ }
#else
- gemm(a,b,c);
- r.noalias() += a.cast<Scalar>() * b.cast<Scalar>();
- if(!r.isApprox(c)) std::cerr << "Warning, your product is crap!\n\n";
+ if(1.*m*n*p<2000.*2000*2000)
+ {
+ gemm(a,b,c);
+ r.noalias() += a.cast<Scalar>() .lazyProduct( b.cast<Scalar>() );
+ if(!r.isApprox(c)) {
+ std::cout << r - c << "\n";
+ std::cerr << "Warning, your product is crap!\n\n";
+ }
+ }
#endif
#ifdef HAVE_BLAS
@@ -214,7 +274,7 @@
{
BenchTimer tmono;
omp_set_num_threads(1);
- Eigen::internal::setNbThreads(1);
+ Eigen::setNbThreads(1);
c = rc;
BENCH(tmono, tries, rep, gemm(a,b,c));
std::cout << "eigen mono cpu " << tmono.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmono.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << tmono.total(CPU_TIMER) << "s)\n";
@@ -223,6 +283,15 @@
}
#endif
+ if(1.*m*n*p<30*30*30)
+ {
+ BenchTimer tmt;
+ c = rc;
+ BENCH(tmt, tries, rep, c.noalias()+=a.lazyProduct(b));
+ std::cout << "lazy cpu " << tmt.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmt.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << tmt.total(CPU_TIMER) << "s)\n";
+ std::cout << "lazy real " << tmt.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmt.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << tmt.total(REAL_TIMER) << "s)\n";
+ }
+
#ifdef DECOUPLED
if((NumTraits<A::Scalar>::IsComplex) && (NumTraits<B::Scalar>::IsComplex))
{
diff --git a/bench/bench_norm.cpp b/bench/bench_norm.cpp
index 806db29..129afcf 100644
--- a/bench/bench_norm.cpp
+++ b/bench/bench_norm.cpp
@@ -6,19 +6,25 @@
using namespace std;
template<typename T>
-EIGEN_DONT_INLINE typename T::Scalar sqsumNorm(const T& v)
+EIGEN_DONT_INLINE typename T::Scalar sqsumNorm(T& v)
{
return v.norm();
}
template<typename T>
-EIGEN_DONT_INLINE typename T::Scalar hypotNorm(const T& v)
+EIGEN_DONT_INLINE typename T::Scalar stableNorm(T& v)
+{
+ return v.stableNorm();
+}
+
+template<typename T>
+EIGEN_DONT_INLINE typename T::Scalar hypotNorm(T& v)
{
return v.hypotNorm();
}
template<typename T>
-EIGEN_DONT_INLINE typename T::Scalar blueNorm(const T& v)
+EIGEN_DONT_INLINE typename T::Scalar blueNorm(T& v)
{
return v.blueNorm();
}
@@ -32,25 +38,25 @@
Scalar ssq = 1;
for (int i=0;i<n;++i)
{
- Scalar ax = internal::abs(v.coeff(i));
+ Scalar ax = std::abs(v.coeff(i));
if (scale >= ax)
{
- ssq += internal::abs2(ax/scale);
+ ssq += numext::abs2(ax/scale);
}
else
{
- ssq = Scalar(1) + ssq * internal::abs2(scale/ax);
+ ssq = Scalar(1) + ssq * numext::abs2(scale/ax);
scale = ax;
}
}
- return scale * internal::sqrt(ssq);
+ return scale * std::sqrt(ssq);
}
template<typename T>
EIGEN_DONT_INLINE typename T::Scalar twopassNorm(T& v)
{
typedef typename T::Scalar Scalar;
- Scalar s = v.cwise().abs().maxCoeff();
+ Scalar s = v.array().abs().maxCoeff();
return s*(v/s).norm();
}
@@ -73,16 +79,20 @@
v(i) = v(2*i) + v(2*i+1);
n = n/2;
}
- return internal::sqrt(v(0));
+ return std::sqrt(v(0));
}
+namespace Eigen {
+namespace internal {
#ifdef EIGEN_VECTORIZE
-Packet4f internal::plt(const Packet4f& a, Packet4f& b) { return _mm_cmplt_ps(a,b); }
-Packet2d internal::plt(const Packet2d& a, Packet2d& b) { return _mm_cmplt_pd(a,b); }
+Packet4f plt(const Packet4f& a, Packet4f& b) { return _mm_cmplt_ps(a,b); }
+Packet2d plt(const Packet2d& a, Packet2d& b) { return _mm_cmplt_pd(a,b); }
-Packet4f internal::pandnot(const Packet4f& a, Packet4f& b) { return _mm_andnot_ps(a,b); }
-Packet2d internal::pandnot(const Packet2d& a, Packet2d& b) { return _mm_andnot_pd(a,b); }
+Packet4f pandnot(const Packet4f& a, Packet4f& b) { return _mm_andnot_ps(a,b); }
+Packet2d pandnot(const Packet2d& a, Packet2d& b) { return _mm_andnot_pd(a,b); }
#endif
+}
+}
template<typename T>
EIGEN_DONT_INLINE typename T::Scalar pblueNorm(const T& v)
@@ -126,7 +136,7 @@
overfl = rbig*s2m; // overfow boundary for abig
eps = std::pow(ibeta, 1-it);
- relerr = internal::sqrt(eps); // tolerance for neglecting asml
+ relerr = std::sqrt(eps); // tolerance for neglecting asml
abig = 1.0/eps - 1.0;
if (Scalar(nbig)>abig) nmax = abig; // largest safe n
else nmax = nbig;
@@ -134,13 +144,13 @@
typedef typename internal::packet_traits<Scalar>::type Packet;
const int ps = internal::packet_traits<Scalar>::size;
- Packet pasml = internal::pset1(Scalar(0));
- Packet pamed = internal::pset1(Scalar(0));
- Packet pabig = internal::pset1(Scalar(0));
- Packet ps2m = internal::pset1(s2m);
- Packet ps1m = internal::pset1(s1m);
- Packet pb2 = internal::pset1(b2);
- Packet pb1 = internal::pset1(b1);
+ Packet pasml = internal::pset1<Packet>(Scalar(0));
+ Packet pamed = internal::pset1<Packet>(Scalar(0));
+ Packet pabig = internal::pset1<Packet>(Scalar(0));
+ Packet ps2m = internal::pset1<Packet>(s2m);
+ Packet ps1m = internal::pset1<Packet>(s1m);
+ Packet pb2 = internal::pset1<Packet>(b2);
+ Packet pb1 = internal::pset1<Packet>(b1);
for(int j=0; j<v.size(); j+=ps)
{
Packet ax = internal::pabs(v.template packet<Aligned>(j));
@@ -170,7 +180,7 @@
Scalar amed = internal::predux(pamed);
if(abig > Scalar(0))
{
- abig = internal::sqrt(abig);
+ abig = std::sqrt(abig);
if(abig > overfl)
{
eigen_assert(false && "overflow");
@@ -179,7 +189,7 @@
if(amed > Scalar(0))
{
abig = abig/s2m;
- amed = internal::sqrt(amed);
+ amed = std::sqrt(amed);
}
else
{
@@ -191,55 +201,56 @@
{
if (amed > Scalar(0))
{
- abig = internal::sqrt(amed);
- amed = internal::sqrt(asml) / s1m;
+ abig = std::sqrt(amed);
+ amed = std::sqrt(asml) / s1m;
}
else
{
- return internal::sqrt(asml)/s1m;
+ return std::sqrt(asml)/s1m;
}
}
else
{
- return internal::sqrt(amed);
+ return std::sqrt(amed);
}
asml = std::min(abig, amed);
abig = std::max(abig, amed);
if(asml <= abig*relerr)
return abig;
else
- return abig * internal::sqrt(Scalar(1) + internal::abs2(asml/abig));
+ return abig * std::sqrt(Scalar(1) + numext::abs2(asml/abig));
#endif
}
#define BENCH_PERF(NRM) { \
+ float af = 0; double ad = 0; std::complex<float> ac = 0; \
Eigen::BenchTimer tf, td, tcf; tf.reset(); td.reset(); tcf.reset();\
for (int k=0; k<tries; ++k) { \
tf.start(); \
- for (int i=0; i<iters; ++i) NRM(vf); \
+ for (int i=0; i<iters; ++i) { af += NRM(vf); } \
tf.stop(); \
} \
for (int k=0; k<tries; ++k) { \
td.start(); \
- for (int i=0; i<iters; ++i) NRM(vd); \
+ for (int i=0; i<iters; ++i) { ad += NRM(vd); } \
td.stop(); \
} \
- for (int k=0; k<std::max(1,tries/3); ++k) { \
+ /*for (int k=0; k<std::max(1,tries/3); ++k) { \
tcf.start(); \
- for (int i=0; i<iters; ++i) NRM(vcf); \
+ for (int i=0; i<iters; ++i) { ac += NRM(vcf); } \
tcf.stop(); \
- } \
+ } */\
std::cout << #NRM << "\t" << tf.value() << " " << td.value() << " " << tcf.value() << "\n"; \
}
void check_accuracy(double basef, double based, int s)
{
- double yf = basef * internal::abs(internal::random<double>());
- double yd = based * internal::abs(internal::random<double>());
+ double yf = basef * std::abs(internal::random<double>());
+ double yd = based * std::abs(internal::random<double>());
VectorXf vf = VectorXf::Ones(s) * yf;
VectorXd vd = VectorXd::Ones(s) * yd;
- std::cout << "reference\t" << internal::sqrt(double(s))*yf << "\t" << internal::sqrt(double(s))*yd << "\n";
+ std::cout << "reference\t" << std::sqrt(double(s))*yf << "\t" << std::sqrt(double(s))*yd << "\n";
std::cout << "sqsumNorm\t" << sqsumNorm(vf) << "\t" << sqsumNorm(vd) << "\n";
std::cout << "hypotNorm\t" << hypotNorm(vf) << "\t" << hypotNorm(vd) << "\n";
std::cout << "blueNorm\t" << blueNorm(vf) << "\t" << blueNorm(vd) << "\n";
@@ -255,8 +266,8 @@
VectorXd vd(s);
for (int i=0; i<s; ++i)
{
- vf[i] = internal::abs(internal::random<double>()) * std::pow(double(10), internal::random<int>(ef0,ef1));
- vd[i] = internal::abs(internal::random<double>()) * std::pow(double(10), internal::random<int>(ed0,ed1));
+ vf[i] = std::abs(internal::random<double>()) * std::pow(double(10), internal::random<int>(ef0,ef1));
+ vd[i] = std::abs(internal::random<double>()) * std::pow(double(10), internal::random<int>(ed0,ed1));
}
//std::cout << "reference\t" << internal::sqrt(double(s))*yf << "\t" << internal::sqrt(double(s))*yd << "\n";
@@ -312,34 +323,38 @@
std::cout << "\n";
}
+ y = 1;
std::cout.precision(4);
- std::cerr << "Performance (out of cache):\n";
+ int s1 = 1024*1024*32;
+ std::cerr << "Performance (out of cache, " << s1 << "):\n";
{
int iters = 1;
- VectorXf vf = VectorXf::Random(1024*1024*32) * y;
- VectorXd vd = VectorXd::Random(1024*1024*32) * y;
- VectorXcf vcf = VectorXcf::Random(1024*1024*32) * y;
+ VectorXf vf = VectorXf::Random(s1) * y;
+ VectorXd vd = VectorXd::Random(s1) * y;
+ VectorXcf vcf = VectorXcf::Random(s1) * y;
BENCH_PERF(sqsumNorm);
+ BENCH_PERF(stableNorm);
BENCH_PERF(blueNorm);
-// BENCH_PERF(pblueNorm);
-// BENCH_PERF(lapackNorm);
-// BENCH_PERF(hypotNorm);
-// BENCH_PERF(twopassNorm);
+ BENCH_PERF(pblueNorm);
+ BENCH_PERF(lapackNorm);
+ BENCH_PERF(hypotNorm);
+ BENCH_PERF(twopassNorm);
BENCH_PERF(bl2passNorm);
}
- std::cerr << "\nPerformance (in cache):\n";
+ std::cerr << "\nPerformance (in cache, " << 512 << "):\n";
{
int iters = 100000;
VectorXf vf = VectorXf::Random(512) * y;
VectorXd vd = VectorXd::Random(512) * y;
VectorXcf vcf = VectorXcf::Random(512) * y;
BENCH_PERF(sqsumNorm);
+ BENCH_PERF(stableNorm);
BENCH_PERF(blueNorm);
-// BENCH_PERF(pblueNorm);
-// BENCH_PERF(lapackNorm);
-// BENCH_PERF(hypotNorm);
-// BENCH_PERF(twopassNorm);
+ BENCH_PERF(pblueNorm);
+ BENCH_PERF(lapackNorm);
+ BENCH_PERF(hypotNorm);
+ BENCH_PERF(twopassNorm);
BENCH_PERF(bl2passNorm);
}
}
diff --git a/bench/benchmark-blocking-sizes.cpp b/bench/benchmark-blocking-sizes.cpp
new file mode 100644
index 0000000..827be28
--- /dev/null
+++ b/bench/benchmark-blocking-sizes.cpp
@@ -0,0 +1,677 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2015 Benoit Jacob <benoitjacob@google.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include <iostream>
+#include <cstdint>
+#include <cstdlib>
+#include <vector>
+#include <fstream>
+#include <memory>
+#include <cstdio>
+
+bool eigen_use_specific_block_size;
+int eigen_block_size_k, eigen_block_size_m, eigen_block_size_n;
+#define EIGEN_TEST_SPECIFIC_BLOCKING_SIZES eigen_use_specific_block_size
+#define EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_K eigen_block_size_k
+#define EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_M eigen_block_size_m
+#define EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_N eigen_block_size_n
+#include <Eigen/Core>
+
+#include <bench/BenchTimer.h>
+
+using namespace Eigen;
+using namespace std;
+
+static BenchTimer timer;
+
+// how many times we repeat each measurement.
+// measurements are randomly shuffled - we're not doing
+// all N identical measurements in a row.
+const int measurement_repetitions = 3;
+
+// Timings below this value are too short to be accurate,
+// we'll repeat measurements with more iterations until
+// we get a timing above that threshold.
+const float min_accurate_time = 1e-2f;
+
+// See --min-working-set-size command line parameter.
+size_t min_working_set_size = 0;
+
+float max_clock_speed = 0.0f;
+
+// range of sizes that we will benchmark (in all 3 K,M,N dimensions)
+const size_t maxsize = 2048;
+const size_t minsize = 16;
+
+typedef MatrixXf MatrixType;
+typedef MatrixType::Scalar Scalar;
+typedef internal::packet_traits<Scalar>::type Packet;
+
+static_assert((maxsize & (maxsize - 1)) == 0, "maxsize must be a power of two");
+static_assert((minsize & (minsize - 1)) == 0, "minsize must be a power of two");
+static_assert(maxsize > minsize, "maxsize must be larger than minsize");
+static_assert(maxsize < (minsize << 16), "maxsize must be less than (minsize<<16)");
+
+// just a helper to store a triple of K,M,N sizes for matrix product
+struct size_triple_t
+{
+ size_t k, m, n;
+ size_triple_t() : k(0), m(0), n(0) {}
+ size_triple_t(size_t _k, size_t _m, size_t _n) : k(_k), m(_m), n(_n) {}
+ size_triple_t(const size_triple_t& o) : k(o.k), m(o.m), n(o.n) {}
+ size_triple_t(uint16_t compact)
+ {
+ k = 1 << ((compact & 0xf00) >> 8);
+ m = 1 << ((compact & 0x0f0) >> 4);
+ n = 1 << ((compact & 0x00f) >> 0);
+ }
+};
+
+uint8_t log2_pot(size_t x) {
+ size_t l = 0;
+ while (x >>= 1) l++;
+ return l;
+}
+
+// Convert between size tripes and a compact form fitting in 12 bits
+// where each size, which must be a POT, is encoded as its log2, on 4 bits
+// so the largest representable size is 2^15 == 32k ... big enough.
+uint16_t compact_size_triple(size_t k, size_t m, size_t n)
+{
+ return (log2_pot(k) << 8) | (log2_pot(m) << 4) | log2_pot(n);
+}
+
+uint16_t compact_size_triple(const size_triple_t& t)
+{
+ return compact_size_triple(t.k, t.m, t.n);
+}
+
+// A single benchmark. Initially only contains benchmark params.
+// Then call run(), which stores the result in the gflops field.
+struct benchmark_t
+{
+ uint16_t compact_product_size;
+ uint16_t compact_block_size;
+ bool use_default_block_size;
+ float gflops;
+ benchmark_t()
+ : compact_product_size(0)
+ , compact_block_size(0)
+ , use_default_block_size(false)
+ , gflops(0)
+ {
+ }
+ benchmark_t(size_t pk, size_t pm, size_t pn,
+ size_t bk, size_t bm, size_t bn)
+ : compact_product_size(compact_size_triple(pk, pm, pn))
+ , compact_block_size(compact_size_triple(bk, bm, bn))
+ , use_default_block_size(false)
+ , gflops(0)
+ {}
+ benchmark_t(size_t pk, size_t pm, size_t pn)
+ : compact_product_size(compact_size_triple(pk, pm, pn))
+ , compact_block_size(0)
+ , use_default_block_size(true)
+ , gflops(0)
+ {}
+
+ void run();
+};
+
+ostream& operator<<(ostream& s, const benchmark_t& b)
+{
+ s << hex << b.compact_product_size << dec;
+ if (b.use_default_block_size) {
+ size_triple_t t(b.compact_product_size);
+ Index k = t.k, m = t.m, n = t.n;
+ internal::computeProductBlockingSizes<Scalar, Scalar>(k, m, n);
+ s << " default(" << k << ", " << m << ", " << n << ")";
+ } else {
+ s << " " << hex << b.compact_block_size << dec;
+ }
+ s << " " << b.gflops;
+ return s;
+}
+
+// We sort first by increasing benchmark parameters,
+// then by decreasing performance.
+bool operator<(const benchmark_t& b1, const benchmark_t& b2)
+{
+ return b1.compact_product_size < b2.compact_product_size ||
+ (b1.compact_product_size == b2.compact_product_size && (
+ (b1.compact_block_size < b2.compact_block_size || (
+ b1.compact_block_size == b2.compact_block_size &&
+ b1.gflops > b2.gflops))));
+}
+
+void benchmark_t::run()
+{
+ size_triple_t productsizes(compact_product_size);
+
+ if (use_default_block_size) {
+ eigen_use_specific_block_size = false;
+ } else {
+ // feed eigen with our custom blocking params
+ eigen_use_specific_block_size = true;
+ size_triple_t blocksizes(compact_block_size);
+ eigen_block_size_k = blocksizes.k;
+ eigen_block_size_m = blocksizes.m;
+ eigen_block_size_n = blocksizes.n;
+ }
+
+ // set up the matrix pool
+
+ const size_t combined_three_matrices_sizes =
+ sizeof(Scalar) *
+ (productsizes.k * productsizes.m +
+ productsizes.k * productsizes.n +
+ productsizes.m * productsizes.n);
+
+ // 64 M is large enough that nobody has a cache bigger than that,
+ // while still being small enough that everybody has this much RAM,
+ // so conveniently we don't need to special-case platforms here.
+ const size_t unlikely_large_cache_size = 64 << 20;
+
+ const size_t working_set_size =
+ min_working_set_size ? min_working_set_size : unlikely_large_cache_size;
+
+ const size_t matrix_pool_size =
+ 1 + working_set_size / combined_three_matrices_sizes;
+
+ MatrixType *lhs = new MatrixType[matrix_pool_size];
+ MatrixType *rhs = new MatrixType[matrix_pool_size];
+ MatrixType *dst = new MatrixType[matrix_pool_size];
+
+ for (size_t i = 0; i < matrix_pool_size; i++) {
+ lhs[i] = MatrixType::Zero(productsizes.m, productsizes.k);
+ rhs[i] = MatrixType::Zero(productsizes.k, productsizes.n);
+ dst[i] = MatrixType::Zero(productsizes.m, productsizes.n);
+ }
+
+ // main benchmark loop
+
+ int iters_at_a_time = 1;
+ float time_per_iter = 0.0f;
+ size_t matrix_index = 0;
+ while (true) {
+
+ double starttime = timer.getCpuTime();
+ for (int i = 0; i < iters_at_a_time; i++) {
+ dst[matrix_index].noalias() = lhs[matrix_index] * rhs[matrix_index];
+ matrix_index++;
+ if (matrix_index == matrix_pool_size) {
+ matrix_index = 0;
+ }
+ }
+ double endtime = timer.getCpuTime();
+
+ const float timing = float(endtime - starttime);
+
+ if (timing >= min_accurate_time) {
+ time_per_iter = timing / iters_at_a_time;
+ break;
+ }
+
+ iters_at_a_time *= 2;
+ }
+
+ delete[] lhs;
+ delete[] rhs;
+ delete[] dst;
+
+ gflops = 2e-9 * productsizes.k * productsizes.m * productsizes.n / time_per_iter;
+}
+
+void print_cpuinfo()
+{
+#ifdef __linux__
+ cout << "contents of /proc/cpuinfo:" << endl;
+ string line;
+ ifstream cpuinfo("/proc/cpuinfo");
+ if (cpuinfo.is_open()) {
+ while (getline(cpuinfo, line)) {
+ cout << line << endl;
+ }
+ cpuinfo.close();
+ }
+ cout << endl;
+#elif defined __APPLE__
+ cout << "output of sysctl hw:" << endl;
+ system("sysctl hw");
+ cout << endl;
+#endif
+}
+
+template <typename T>
+string type_name()
+{
+ return "unknown";
+}
+
+template<>
+string type_name<float>()
+{
+ return "float";
+}
+
+template<>
+string type_name<double>()
+{
+ return "double";
+}
+
+struct action_t
+{
+ virtual const char* invokation_name() const { abort(); return nullptr; }
+ virtual void run() const { abort(); }
+ virtual ~action_t() {}
+};
+
+void show_usage_and_exit(int /*argc*/, char* argv[],
+ const vector<unique_ptr<action_t>>& available_actions)
+{
+ cerr << "usage: " << argv[0] << " <action> [options...]" << endl << endl;
+ cerr << "available actions:" << endl << endl;
+ for (auto it = available_actions.begin(); it != available_actions.end(); ++it) {
+ cerr << " " << (*it)->invokation_name() << endl;
+ }
+ cerr << endl;
+ cerr << "options:" << endl << endl;
+ cerr << " --min-working-set-size=N:" << endl;
+ cerr << " Set the minimum working set size to N bytes." << endl;
+ cerr << " This is rounded up as needed to a multiple of matrix size." << endl;
+ cerr << " A larger working set lowers the chance of a warm cache." << endl;
+ cerr << " The default value 0 means use a large enough working" << endl;
+ cerr << " set to likely outsize caches." << endl;
+ cerr << " A value of 1 (that is, 1 byte) would mean don't do anything to" << endl;
+ cerr << " avoid warm caches." << endl;
+ exit(1);
+}
+
+float measure_clock_speed()
+{
+ cerr << "Measuring clock speed... \r" << flush;
+
+ vector<float> all_gflops;
+ for (int i = 0; i < 8; i++) {
+ benchmark_t b(1024, 1024, 1024);
+ b.run();
+ all_gflops.push_back(b.gflops);
+ }
+
+ sort(all_gflops.begin(), all_gflops.end());
+ float stable_estimate = all_gflops[2] + all_gflops[3] + all_gflops[4] + all_gflops[5];
+
+ // multiply by an arbitrary constant to discourage trying doing anything with the
+ // returned values besides just comparing them with each other.
+ float result = stable_estimate * 123.456f;
+
+ return result;
+}
+
+struct human_duration_t
+{
+ int seconds;
+ human_duration_t(int s) : seconds(s) {}
+};
+
+ostream& operator<<(ostream& s, const human_duration_t& d)
+{
+ int remainder = d.seconds;
+ if (remainder > 3600) {
+ int hours = remainder / 3600;
+ s << hours << " h ";
+ remainder -= hours * 3600;
+ }
+ if (remainder > 60) {
+ int minutes = remainder / 60;
+ s << minutes << " min ";
+ remainder -= minutes * 60;
+ }
+ if (d.seconds < 600) {
+ s << remainder << " s";
+ }
+ return s;
+}
+
+const char session_filename[] = "/data/local/tmp/benchmark-blocking-sizes-session.data";
+
+void serialize_benchmarks(const char* filename, const vector<benchmark_t>& benchmarks, size_t first_benchmark_to_run)
+{
+ FILE* file = fopen(filename, "w");
+ if (!file) {
+ cerr << "Could not open file " << filename << " for writing." << endl;
+ cerr << "Do you have write permissions on the current working directory?" << endl;
+ exit(1);
+ }
+ size_t benchmarks_vector_size = benchmarks.size();
+ fwrite(&max_clock_speed, sizeof(max_clock_speed), 1, file);
+ fwrite(&benchmarks_vector_size, sizeof(benchmarks_vector_size), 1, file);
+ fwrite(&first_benchmark_to_run, sizeof(first_benchmark_to_run), 1, file);
+ fwrite(benchmarks.data(), sizeof(benchmark_t), benchmarks.size(), file);
+ fclose(file);
+}
+
+bool deserialize_benchmarks(const char* filename, vector<benchmark_t>& benchmarks, size_t& first_benchmark_to_run)
+{
+ FILE* file = fopen(filename, "r");
+ if (!file) {
+ return false;
+ }
+ if (1 != fread(&max_clock_speed, sizeof(max_clock_speed), 1, file)) {
+ return false;
+ }
+ size_t benchmarks_vector_size = 0;
+ if (1 != fread(&benchmarks_vector_size, sizeof(benchmarks_vector_size), 1, file)) {
+ return false;
+ }
+ if (1 != fread(&first_benchmark_to_run, sizeof(first_benchmark_to_run), 1, file)) {
+ return false;
+ }
+ benchmarks.resize(benchmarks_vector_size);
+ if (benchmarks.size() != fread(benchmarks.data(), sizeof(benchmark_t), benchmarks.size(), file)) {
+ return false;
+ }
+ unlink(filename);
+ return true;
+}
+
+void try_run_some_benchmarks(
+ vector<benchmark_t>& benchmarks,
+ double time_start,
+ size_t& first_benchmark_to_run)
+{
+ if (first_benchmark_to_run == benchmarks.size()) {
+ return;
+ }
+
+ double time_last_progress_update = 0;
+ double time_last_clock_speed_measurement = 0;
+ double time_now = 0;
+
+ size_t benchmark_index = first_benchmark_to_run;
+
+ while (true) {
+ float ratio_done = float(benchmark_index) / benchmarks.size();
+ time_now = timer.getRealTime();
+
+ // We check clock speed every minute and at the end.
+ if (benchmark_index == benchmarks.size() ||
+ time_now > time_last_clock_speed_measurement + 60.0f)
+ {
+ time_last_clock_speed_measurement = time_now;
+
+ // Ensure that clock speed is as expected
+ float current_clock_speed = measure_clock_speed();
+
+ // The tolerance needs to be smaller than the relative difference between
+ // clock speeds that a device could operate under.
+ // It seems unlikely that a device would be throttling clock speeds by
+ // amounts smaller than 2%.
+ // With a value of 1%, I was getting within noise on a Sandy Bridge.
+ const float clock_speed_tolerance = 0.02f;
+
+ if (current_clock_speed > (1 + clock_speed_tolerance) * max_clock_speed) {
+ // Clock speed is now higher than we previously measured.
+ // Either our initial measurement was inaccurate, which won't happen
+ // too many times as we are keeping the best clock speed value and
+ // and allowing some tolerance; or something really weird happened,
+ // which invalidates all benchmark results collected so far.
+ // Either way, we better restart all over again now.
+ if (benchmark_index) {
+ cerr << "Restarting at " << 100.0f * ratio_done
+ << " % because clock speed increased. " << endl;
+ }
+ max_clock_speed = current_clock_speed;
+ first_benchmark_to_run = 0;
+ return;
+ }
+
+ bool rerun_last_tests = false;
+
+ if (current_clock_speed < (1 - clock_speed_tolerance) * max_clock_speed) {
+ cerr << "Measurements completed so far: "
+ << 100.0f * ratio_done
+ << " % " << endl;
+ cerr << "Clock speed seems to be only "
+ << current_clock_speed/max_clock_speed
+ << " times what it used to be." << endl;
+
+ unsigned int seconds_to_sleep_if_lower_clock_speed = 1;
+
+ while (current_clock_speed < (1 - clock_speed_tolerance) * max_clock_speed) {
+ if (seconds_to_sleep_if_lower_clock_speed > 32) {
+ cerr << "Sleeping longer probably won't make a difference." << endl;
+ cerr << "Serializing benchmarks to " << session_filename << endl;
+ serialize_benchmarks(session_filename, benchmarks, first_benchmark_to_run);
+ cerr << "Now restart this benchmark, and it should pick up where we left." << endl;
+ exit(2);
+ }
+ rerun_last_tests = true;
+ cerr << "Sleeping "
+ << seconds_to_sleep_if_lower_clock_speed
+ << " s... \r" << endl;
+ sleep(seconds_to_sleep_if_lower_clock_speed);
+ current_clock_speed = measure_clock_speed();
+ seconds_to_sleep_if_lower_clock_speed *= 2;
+ }
+ }
+
+ if (rerun_last_tests) {
+ cerr << "Redoing the last "
+ << 100.0f * float(benchmark_index - first_benchmark_to_run) / benchmarks.size()
+ << " % because clock speed had been low. " << endl;
+ return;
+ }
+
+ // nothing wrong with the clock speed so far, so there won't be a need to rerun
+ // benchmarks run so far in case we later encounter a lower clock speed.
+ first_benchmark_to_run = benchmark_index;
+ }
+
+ if (benchmark_index == benchmarks.size()) {
+ // We're done!
+ first_benchmark_to_run = benchmarks.size();
+ // Erase progress info
+ cerr << " " << endl;
+ return;
+ }
+
+ // Display progress info on stderr
+ if (time_now > time_last_progress_update + 1.0f) {
+ time_last_progress_update = time_now;
+ cerr << "Measurements... " << 100.0f * ratio_done
+ << " %, ETA "
+ << human_duration_t(float(time_now - time_start) * (1.0f - ratio_done) / ratio_done)
+ << " \r" << flush;
+ }
+
+ // This is where we actually run a benchmark!
+ benchmarks[benchmark_index].run();
+ benchmark_index++;
+ }
+}
+
+void run_benchmarks(vector<benchmark_t>& benchmarks)
+{
+ size_t first_benchmark_to_run;
+ vector<benchmark_t> deserialized_benchmarks;
+ bool use_deserialized_benchmarks = false;
+ if (deserialize_benchmarks(session_filename, deserialized_benchmarks, first_benchmark_to_run)) {
+ cerr << "Found serialized session with "
+ << 100.0f * first_benchmark_to_run / deserialized_benchmarks.size()
+ << " % already done" << endl;
+ if (deserialized_benchmarks.size() == benchmarks.size() &&
+ first_benchmark_to_run > 0 &&
+ first_benchmark_to_run < benchmarks.size())
+ {
+ use_deserialized_benchmarks = true;
+ }
+ }
+
+ if (use_deserialized_benchmarks) {
+ benchmarks = deserialized_benchmarks;
+ } else {
+ // not using deserialized benchmarks, starting from scratch
+ first_benchmark_to_run = 0;
+
+ // Randomly shuffling benchmarks allows us to get accurate enough progress info,
+ // as now the cheap/expensive benchmarks are randomly mixed so they average out.
+ // It also means that if data is corrupted for some time span, the odds are that
+ // not all repetitions of a given benchmark will be corrupted.
+ random_shuffle(benchmarks.begin(), benchmarks.end());
+ }
+
+ for (int i = 0; i < 4; i++) {
+ max_clock_speed = max(max_clock_speed, measure_clock_speed());
+ }
+
+ double time_start = 0.0;
+ while (first_benchmark_to_run < benchmarks.size()) {
+ if (first_benchmark_to_run == 0) {
+ time_start = timer.getRealTime();
+ }
+ try_run_some_benchmarks(benchmarks,
+ time_start,
+ first_benchmark_to_run);
+ }
+
+ // Sort timings by increasing benchmark parameters, and decreasing gflops.
+ // The latter is very important. It means that we can ignore all but the first
+ // benchmark with given parameters.
+ sort(benchmarks.begin(), benchmarks.end());
+
+ // Collect best (i.e. now first) results for each parameter values.
+ vector<benchmark_t> best_benchmarks;
+ for (auto it = benchmarks.begin(); it != benchmarks.end(); ++it) {
+ if (best_benchmarks.empty() ||
+ best_benchmarks.back().compact_product_size != it->compact_product_size ||
+ best_benchmarks.back().compact_block_size != it->compact_block_size)
+ {
+ best_benchmarks.push_back(*it);
+ }
+ }
+
+ // keep and return only the best benchmarks
+ benchmarks = best_benchmarks;
+}
+
+struct measure_all_pot_sizes_action_t : action_t
+{
+ virtual const char* invokation_name() const { return "all-pot-sizes"; }
+ virtual void run() const
+ {
+ vector<benchmark_t> benchmarks;
+ for (int repetition = 0; repetition < measurement_repetitions; repetition++) {
+ for (size_t ksize = minsize; ksize <= maxsize; ksize *= 2) {
+ for (size_t msize = minsize; msize <= maxsize; msize *= 2) {
+ for (size_t nsize = minsize; nsize <= maxsize; nsize *= 2) {
+ for (size_t kblock = minsize; kblock <= ksize; kblock *= 2) {
+ for (size_t mblock = minsize; mblock <= msize; mblock *= 2) {
+ for (size_t nblock = minsize; nblock <= nsize; nblock *= 2) {
+ benchmarks.emplace_back(ksize, msize, nsize, kblock, mblock, nblock);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ run_benchmarks(benchmarks);
+
+ cout << "BEGIN MEASUREMENTS ALL POT SIZES" << endl;
+ for (auto it = benchmarks.begin(); it != benchmarks.end(); ++it) {
+ cout << *it << endl;
+ }
+ }
+};
+
+struct measure_default_sizes_action_t : action_t
+{
+ virtual const char* invokation_name() const { return "default-sizes"; }
+ virtual void run() const
+ {
+ vector<benchmark_t> benchmarks;
+ for (int repetition = 0; repetition < measurement_repetitions; repetition++) {
+ for (size_t ksize = minsize; ksize <= maxsize; ksize *= 2) {
+ for (size_t msize = minsize; msize <= maxsize; msize *= 2) {
+ for (size_t nsize = minsize; nsize <= maxsize; nsize *= 2) {
+ benchmarks.emplace_back(ksize, msize, nsize);
+ }
+ }
+ }
+ }
+
+ run_benchmarks(benchmarks);
+
+ cout << "BEGIN MEASUREMENTS DEFAULT SIZES" << endl;
+ for (auto it = benchmarks.begin(); it != benchmarks.end(); ++it) {
+ cout << *it << endl;
+ }
+ }
+};
+
+int main(int argc, char* argv[])
+{
+ double time_start = timer.getRealTime();
+ cout.precision(4);
+ cerr.precision(4);
+
+ vector<unique_ptr<action_t>> available_actions;
+ available_actions.emplace_back(new measure_all_pot_sizes_action_t);
+ available_actions.emplace_back(new measure_default_sizes_action_t);
+
+ auto action = available_actions.end();
+
+ if (argc <= 1) {
+ show_usage_and_exit(argc, argv, available_actions);
+ }
+ for (auto it = available_actions.begin(); it != available_actions.end(); ++it) {
+ if (!strcmp(argv[1], (*it)->invokation_name())) {
+ action = it;
+ break;
+ }
+ }
+
+ if (action == available_actions.end()) {
+ show_usage_and_exit(argc, argv, available_actions);
+ }
+
+ for (int i = 2; i < argc; i++) {
+ if (argv[i] == strstr(argv[i], "--min-working-set-size=")) {
+ const char* equals_sign = strchr(argv[i], '=');
+ min_working_set_size = strtoul(equals_sign+1, nullptr, 10);
+ } else {
+ cerr << "unrecognized option: " << argv[i] << endl << endl;
+ show_usage_and_exit(argc, argv, available_actions);
+ }
+ }
+
+ print_cpuinfo();
+
+ cout << "benchmark parameters:" << endl;
+ cout << "pointer size: " << 8*sizeof(void*) << " bits" << endl;
+ cout << "scalar type: " << type_name<Scalar>() << endl;
+ cout << "packet size: " << internal::packet_traits<MatrixType::Scalar>::size << endl;
+ cout << "minsize = " << minsize << endl;
+ cout << "maxsize = " << maxsize << endl;
+ cout << "measurement_repetitions = " << measurement_repetitions << endl;
+ cout << "min_accurate_time = " << min_accurate_time << endl;
+ cout << "min_working_set_size = " << min_working_set_size;
+ if (min_working_set_size == 0) {
+ cout << " (try to outsize caches)";
+ }
+ cout << endl << endl;
+
+ (*action)->run();
+
+ double time_end = timer.getRealTime();
+ cerr << "Finished in " << human_duration_t(time_end - time_start) << endl;
+}
diff --git a/bench/btl/CMakeLists.txt b/bench/btl/CMakeLists.txt
index 119b470..38ff9f4 100644
--- a/bench/btl/CMakeLists.txt
+++ b/bench/btl/CMakeLists.txt
@@ -11,29 +11,24 @@
string(REGEX MATCH icpc IS_ICPC ${CMAKE_CXX_COMPILER})
IF(CMAKE_COMPILER_IS_GNUCXX OR IS_ICPC)
- SET(CMAKE_CXX_FLAGS "-g0 -O3 -DNDEBUG")
- SET(CMAKE_Fortran_FLAGS "-g0 -O3 -DNDEBUG")
- IF(NOT BTL_NOVEC)
- SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse2")
- SET(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -msse2")
- ELSE(NOT BTL_NOVEC)
+ SET(CMAKE_CXX_FLAGS "-g0 -O3 -DNDEBUG ${CMAKE_CXX_FLAGS}")
+ SET(CMAKE_Fortran_FLAGS "-g0 -O3 -DNDEBUG ${CMAKE_Fortran_FLAGS}")
+ IF(BTL_NOVEC)
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DEIGEN_DONT_VECTORIZE")
- ENDIF(NOT BTL_NOVEC)
+ ENDIF(BTL_NOVEC)
ENDIF(CMAKE_COMPILER_IS_GNUCXX OR IS_ICPC)
IF(MSVC)
SET(CMAKE_CXX_FLAGS " /O2 /Ot /GL /fp:fast -DNDEBUG")
# SET(CMAKE_Fortran_FLAGS "-g0 -O3 -DNDEBUG")
- IF(NOT BTL_NOVEC)
- SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:SSE2")
- ELSE(NOT BTL_NOVEC)
+ IF(BTL_NOVEC)
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DEIGEN_DONT_VECTORIZE")
- ENDIF(NOT BTL_NOVEC)
+ ENDIF(BTL_NOVEC)
ENDIF(MSVC)
if(IS_ICPC)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fast")
- set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -fast")
+ set(CMAKE_CXX_FLAGS "-fast ${CMAKE_CXX_FLAGS}")
+ set(CMAKE_Fortran_FLAGS "-fast ${CMAKE_Fortran_FLAGS}")
endif(IS_ICPC)
include_directories(
@@ -48,6 +43,12 @@
# set(DEFAULT_LIBRARIES ${MKL_LIBRARIES})
# endif (MKL_FOUND)
+find_library(EIGEN_BTL_RT_LIBRARY rt)
+# if we cannot find it easily, then we don't need it!
+if(NOT EIGEN_BTL_RT_LIBRARY)
+ set(EIGEN_BTL_RT_LIBRARY "")
+endif()
+
MACRO(BTL_ADD_BENCH targetname)
foreach(_current_var ${ARGN})
@@ -70,7 +71,7 @@
IF(BUILD_${targetname})
ADD_EXECUTABLE(${targetname} ${_sources})
ADD_TEST(${targetname} "${targetname}")
- target_link_libraries(${targetname} ${DEFAULT_LIBRARIES} rt)
+ target_link_libraries(${targetname} ${DEFAULT_LIBRARIES} ${EIGEN_BTL_RT_LIBRARY})
ENDIF(BUILD_${targetname})
ENDMACRO(BTL_ADD_BENCH)
@@ -91,6 +92,7 @@
add_subdirectory(libs/eigen3)
add_subdirectory(libs/eigen2)
+add_subdirectory(libs/tensors)
add_subdirectory(libs/BLAS)
add_subdirectory(libs/ublas)
add_subdirectory(libs/gmm)
@@ -98,6 +100,7 @@
add_subdirectory(libs/blitz)
add_subdirectory(libs/tvmet)
add_subdirectory(libs/STL)
+add_subdirectory(libs/blaze)
add_subdirectory(data)
diff --git a/bench/btl/actions/action_axpby.hh b/bench/btl/actions/action_axpby.hh
index 98511ab..dadd0cc 100644
--- a/bench/btl/actions/action_axpby.hh
+++ b/bench/btl/actions/action_axpby.hh
@@ -33,7 +33,7 @@
public :
// Ctor
- Action_axpby( int size ):_size(size),_alpha(0.5),_beta(0.95)
+ Action_axpby( int size ):_alpha(0.5),_beta(0.95),_size(size)
{
MESSAGE("Action_axpby Ctor");
diff --git a/bench/btl/actions/action_axpy.hh b/bench/btl/actions/action_axpy.hh
index e4cb3a5..261be4c 100644
--- a/bench/btl/actions/action_axpy.hh
+++ b/bench/btl/actions/action_axpy.hh
@@ -35,7 +35,7 @@
// Ctor
- Action_axpy( int size ):_size(size),_coef(1.0)
+ Action_axpy( int size ):_coef(1.0),_size(size)
{
MESSAGE("Action_axpy Ctor");
diff --git a/bench/btl/cmake/FindACML.cmake b/bench/btl/cmake/FindACML.cmake
index f45ae1b..4989fa2 100644
--- a/bench/btl/cmake/FindACML.cmake
+++ b/bench/btl/cmake/FindACML.cmake
@@ -17,6 +17,7 @@
libacml_mp.so
PATHS
/usr/lib
+ /usr/lib64
$ENV{ACMLDIR}/lib
${LIB_INSTALL_DIR}
)
@@ -35,6 +36,7 @@
libacml.so libacml_mv.so
PATHS
/usr/lib
+ /usr/lib64
$ENV{ACMLDIR}/lib
${LIB_INSTALL_DIR}
)
diff --git a/bench/btl/cmake/FindATLAS.cmake b/bench/btl/cmake/FindATLAS.cmake
index 6b90652..4136a98 100644
--- a/bench/btl/cmake/FindATLAS.cmake
+++ b/bench/btl/cmake/FindATLAS.cmake
@@ -3,33 +3,25 @@
set(ATLAS_FIND_QUIETLY TRUE)
endif (ATLAS_LIBRARIES)
-find_file(ATLAS_LIB libatlas.so.3 PATHS /usr/lib $ENV{ATLASDIR} ${LIB_INSTALL_DIR})
-find_library(ATLAS_LIB atlas PATHS $ENV{ATLASDIR} ${LIB_INSTALL_DIR})
+find_file(ATLAS_LIB libatlas.so.3 PATHS /usr/lib /usr/lib/atlas /usr/lib64 /usr/lib64/atlas $ENV{ATLASDIR} ${LIB_INSTALL_DIR})
+find_library(ATLAS_LIB satlas PATHS $ENV{ATLASDIR} ${LIB_INSTALL_DIR})
-find_file(ATLAS_CBLAS libcblas.so.3 PATHS /usr/lib $ENV{ATLASDIR} ${LIB_INSTALL_DIR})
-find_library(ATLAS_CBLAS cblas PATHS $ENV{ATLASDIR} ${LIB_INSTALL_DIR})
+find_file(ATLAS_LAPACK NAMES liblapack_atlas.so.3 liblapack.so.3 PATHS /usr/lib /usr/lib/atlas /usr/lib64 /usr/lib64/atlas $ENV{ATLASDIR} ${LIB_INSTALL_DIR})
+find_library(ATLAS_LAPACK NAMES lapack_atlas lapack PATHS $ENV{ATLASDIR} ${LIB_INSTALL_DIR})
-find_file(ATLAS_LAPACK liblapack_atlas.so.3 PATHS /usr/lib $ENV{ATLASDIR} ${LIB_INSTALL_DIR})
-find_library(ATLAS_LAPACK lapack_atlas PATHS $ENV{ATLASDIR} ${LIB_INSTALL_DIR})
-
-if(NOT ATLAS_LAPACK)
- find_file(ATLAS_LAPACK liblapack.so.3 PATHS /usr/lib/atlas $ENV{ATLASDIR} ${LIB_INSTALL_DIR})
- find_library(ATLAS_LAPACK lapack PATHS $ENV{ATLASDIR} ${LIB_INSTALL_DIR})
-endif(NOT ATLAS_LAPACK)
-
-find_file(ATLAS_F77BLAS libf77blas.so.3 PATHS /usr/lib $ENV{ATLASDIR} ${LIB_INSTALL_DIR})
+find_file(ATLAS_F77BLAS libf77blas.so.3 PATHS /usr/lib /usr/lib/atlas /usr/lib64 /usr/lib64/atlas $ENV{ATLASDIR} ${LIB_INSTALL_DIR})
find_library(ATLAS_F77BLAS f77blas PATHS $ENV{ATLASDIR} ${LIB_INSTALL_DIR})
if(ATLAS_LIB AND ATLAS_CBLAS AND ATLAS_LAPACK AND ATLAS_F77BLAS)
- set(ATLAS_LIBRARIES ${ATLAS_LAPACK} ${ATLAS_CBLAS} ${ATLAS_F77BLAS} ${ATLAS_LIB})
+ set(ATLAS_LIBRARIES ${ATLAS_LAPACK} ${ATLAS_LIB})
# search the default lapack lib link to it
find_file(ATLAS_REFERENCE_LAPACK liblapack.so.3 PATHS /usr/lib /usr/lib64)
find_library(ATLAS_REFERENCE_LAPACK NAMES lapack)
- if(ATLAS_REFERENCE_LAPACK)
- set(ATLAS_LIBRARIES ${ATLAS_LIBRARIES} ${ATLAS_REFERENCE_LAPACK})
- endif()
+# if(ATLAS_REFERENCE_LAPACK)
+# set(ATLAS_LIBRARIES ${ATLAS_LIBRARIES} ${ATLAS_REFERENCE_LAPACK})
+# endif()
endif(ATLAS_LIB AND ATLAS_CBLAS AND ATLAS_LAPACK AND ATLAS_F77BLAS)
diff --git a/bench/btl/cmake/FindBLAZE.cmake b/bench/btl/cmake/FindBLAZE.cmake
new file mode 100644
index 0000000..dba4c89
--- /dev/null
+++ b/bench/btl/cmake/FindBLAZE.cmake
@@ -0,0 +1,31 @@
+# - Try to find eigen2 headers
+# Once done this will define
+#
+# BLAZE_FOUND - system has blaze lib
+# BLAZE_INCLUDE_DIR - the blaze include directory
+#
+# Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+# Adapted from FindEigen.cmake:
+# Copyright (c) 2006, 2007 Montel Laurent, <montel@kde.org>
+# Redistribution and use is allowed according to the terms of the BSD license.
+# For details see the accompanying COPYING-CMAKE-SCRIPTS file.
+
+if (BLAZE_INCLUDE_DIR)
+
+ # in cache already
+ set(BLAZE_FOUND TRUE)
+
+else (BLAZE_INCLUDE_DIR)
+
+find_path(BLAZE_INCLUDE_DIR NAMES blaze/Blaze.h
+ PATHS
+ ${INCLUDE_INSTALL_DIR}
+ )
+
+include(FindPackageHandleStandardArgs)
+find_package_handle_standard_args(BLAZE DEFAULT_MSG BLAZE_INCLUDE_DIR)
+
+mark_as_advanced(BLAZE_INCLUDE_DIR)
+
+endif(BLAZE_INCLUDE_DIR)
+
diff --git a/bench/btl/cmake/FindCBLAS.cmake b/bench/btl/cmake/FindCBLAS.cmake
index 554f029..ce0f2f2 100644
--- a/bench/btl/cmake/FindCBLAS.cmake
+++ b/bench/btl/cmake/FindCBLAS.cmake
@@ -23,6 +23,7 @@
libcblas.so.3
PATHS
/usr/lib
+ /usr/lib64
$ENV{CBLASDIR}/lib
${LIB_INSTALL_DIR}
)
diff --git a/bench/btl/cmake/FindGOTO.cmake b/bench/btl/cmake/FindGOTO.cmake
deleted file mode 100644
index 67ea093..0000000
--- a/bench/btl/cmake/FindGOTO.cmake
+++ /dev/null
@@ -1,15 +0,0 @@
-
-if (GOTO_LIBRARIES)
- set(GOTO_FIND_QUIETLY TRUE)
-endif (GOTO_LIBRARIES)
-
-find_library(GOTO_LIBRARIES goto PATHS $ENV{GOTODIR} ${LIB_INSTALL_DIR})
-
-if(GOTO_LIBRARIES AND CMAKE_COMPILER_IS_GNUCXX)
- set(GOTO_LIBRARIES ${GOTO_LIBRARIES} "-lpthread -lgfortran")
-endif()
-
-include(FindPackageHandleStandardArgs)
-find_package_handle_standard_args(GOTO DEFAULT_MSG GOTO_LIBRARIES)
-
-mark_as_advanced(GOTO_LIBRARIES)
diff --git a/bench/btl/cmake/FindGOTO2.cmake b/bench/btl/cmake/FindGOTO2.cmake
deleted file mode 100644
index baa68d2..0000000
--- a/bench/btl/cmake/FindGOTO2.cmake
+++ /dev/null
@@ -1,25 +0,0 @@
-
-if (GOTO2_LIBRARIES)
- set(GOTO2_FIND_QUIETLY TRUE)
-endif (GOTO2_LIBRARIES)
-#
-# find_path(GOTO_INCLUDES
-# NAMES
-# cblas.h
-# PATHS
-# $ENV{GOTODIR}/include
-# ${INCLUDE_INSTALL_DIR}
-# )
-
-find_file(GOTO2_LIBRARIES libgoto2.so PATHS /usr/lib $ENV{GOTO2DIR} ${LIB_INSTALL_DIR})
-find_library(GOTO2_LIBRARIES goto2 PATHS $ENV{GOTO2DIR} ${LIB_INSTALL_DIR})
-
-if(GOTO2_LIBRARIES AND CMAKE_COMPILER_IS_GNUCXX)
- set(GOTO2_LIBRARIES ${GOTO2_LIBRARIES} "-lpthread -lgfortran")
-endif()
-
-include(FindPackageHandleStandardArgs)
-find_package_handle_standard_args(GOTO2 DEFAULT_MSG
- GOTO2_LIBRARIES)
-
-mark_as_advanced(GOTO2_LIBRARIES)
diff --git a/bench/btl/cmake/FindOPENBLAS.cmake b/bench/btl/cmake/FindOPENBLAS.cmake
new file mode 100644
index 0000000..2a09194
--- /dev/null
+++ b/bench/btl/cmake/FindOPENBLAS.cmake
@@ -0,0 +1,17 @@
+
+if (OPENBLAS_LIBRARIES)
+ set(OPENBLAS_FIND_QUIETLY TRUE)
+endif (OPENBLAS_LIBRARIES)
+
+find_file(OPENBLAS_LIBRARIES NAMES libopenblas.so libopenblas.so.0 PATHS /usr/lib /usr/lib64 $ENV{OPENBLASDIR} ${LIB_INSTALL_DIR})
+find_library(OPENBLAS_LIBRARIES openblas PATHS $ENV{OPENBLASDIR} ${LIB_INSTALL_DIR})
+
+if(OPENBLAS_LIBRARIES AND CMAKE_COMPILER_IS_GNUCXX)
+ set(OPENBLAS_LIBRARIES ${OPENBLAS_LIBRARIES} "-lpthread -lgfortran")
+endif()
+
+include(FindPackageHandleStandardArgs)
+find_package_handle_standard_args(OPENBLAS DEFAULT_MSG
+ OPENBLAS_LIBRARIES)
+
+mark_as_advanced(OPENBLAS_LIBRARIES)
diff --git a/bench/btl/data/action_settings.txt b/bench/btl/data/action_settings.txt
index e32213e..39d2b5d 100644
--- a/bench/btl/data/action_settings.txt
+++ b/bench/btl/data/action_settings.txt
@@ -1,19 +1,19 @@
-aat ; "{/*1.5 A x A^T}" ; "matrix size" ; 4:3000
-ata ; "{/*1.5 A^T x A}" ; "matrix size" ; 4:3000
-atv ; "{/*1.5 matrix^T x vector}" ; "matrix size" ; 4:3000
+aat ; "{/*1.5 A x A^T}" ; "matrix size" ; 4:5000
+ata ; "{/*1.5 A^T x A}" ; "matrix size" ; 4:5000
+atv ; "{/*1.5 matrix^T x vector}" ; "matrix size" ; 4:5000
axpby ; "{/*1.5 Y = alpha X + beta Y}" ; "vector size" ; 5:1000000
axpy ; "{/*1.5 Y += alpha X}" ; "vector size" ; 5:1000000
-matrix_matrix ; "{/*1.5 matrix matrix product}" ; "matrix size" ; 4:3000
-matrix_vector ; "{/*1.5 matrix vector product}" ; "matrix size" ; 4:3000
-trmm ; "{/*1.5 triangular matrix matrix product}" ; "matrix size" ; 4:3000
-trisolve_vector ; "{/*1.5 triangular solver - vector (X = inv(L) X)}" ; "size" ; 4:3000
-trisolve_matrix ; "{/*1.5 triangular solver - matrix (M = inv(L) M)}" ; "size" ; 4:3000
-cholesky ; "{/*1.5 Cholesky decomposition}" ; "matrix size" ; 4:3000
-complete_lu_decomp ; "{/*1.5 Complete LU decomposition}" ; "matrix size" ; 4:3000
-partial_lu_decomp ; "{/*1.5 Partial LU decomposition}" ; "matrix size" ; 4:3000
-tridiagonalization ; "{/*1.5 Tridiagonalization}" ; "matrix size" ; 4:3000
-hessenberg ; "{/*1.5 Hessenberg decomposition}" ; "matrix size" ; 4:3000
-symv ; "{/*1.5 symmetric matrix vector product}" ; "matrix size" ; 4:3000
-syr2 ; "{/*1.5 symmretric rank-2 update (A += u^T v + u v^T)}" ; "matrix size" ; 4:3000
-ger ; "{/*1.5 general rank-1 update (A += u v^T)}" ; "matrix size" ; 4:3000
-rot ; "{/*1.5 apply rotation in the plane}" ; "vector size" ; 4:1000000
\ No newline at end of file
+matrix_matrix ; "{/*1.5 matrix matrix product}" ; "matrix size" ; 4:5000
+matrix_vector ; "{/*1.5 matrix vector product}" ; "matrix size" ; 4:5000
+trmm ; "{/*1.5 triangular matrix matrix product}" ; "matrix size" ; 4:5000
+trisolve_vector ; "{/*1.5 triangular solver - vector (X = inv(L) X)}" ; "size" ; 4:5000
+trisolve_matrix ; "{/*1.5 triangular solver - matrix (M = inv(L) M)}" ; "size" ; 4:5000
+cholesky ; "{/*1.5 Cholesky decomposition}" ; "matrix size" ; 4:5000
+complete_lu_decomp ; "{/*1.5 Complete LU decomposition}" ; "matrix size" ; 4:5000
+partial_lu_decomp ; "{/*1.5 Partial LU decomposition}" ; "matrix size" ; 4:5000
+tridiagonalization ; "{/*1.5 Tridiagonalization}" ; "matrix size" ; 4:5000
+hessenberg ; "{/*1.5 Hessenberg decomposition}" ; "matrix size" ; 4:5000
+symv ; "{/*1.5 symmetric matrix vector product}" ; "matrix size" ; 4:5000
+syr2 ; "{/*1.5 symmretric rank-2 update (A += u^T v + u v^T)}" ; "matrix size" ; 4:5000
+ger ; "{/*1.5 general rank-1 update (A += u v^T)}" ; "matrix size" ; 4:5000
+rot ; "{/*1.5 apply rotation in the plane}" ; "vector size" ; 4:1000000
diff --git a/bench/btl/data/perlib_plot_settings.txt b/bench/btl/data/perlib_plot_settings.txt
index 6844bab..f023cfe 100644
--- a/bench/btl/data/perlib_plot_settings.txt
+++ b/bench/btl/data/perlib_plot_settings.txt
@@ -10,7 +10,7 @@
mtl4 ; with lines lw 3 lt 1 lc rgbcolor "#d18847"
blitz ; with lines lw 3 lt 1 lc rgbcolor "#ff00ff"
F77 ; with lines lw 3 lt 3 lc rgbcolor "#e6e64c"
-GOTO ; with lines lw 3 lt 3 lc rgbcolor "#C05600"
-GOTO2 ; with lines lw 3 lt 1 lc rgbcolor "#C05600"
+OPENBLAS ; with lines lw 3 lt 1 lc rgbcolor "#C05600"
C ; with lines lw 3 lt 3 lc rgbcolor "#e6bd96"
ACML ; with lines lw 2 lt 3 lc rgbcolor "#e6e64c"
+blaze ; with lines lw 3 lt 1 lc rgbcolor "#ff00ff"
diff --git a/bench/btl/generic_bench/bench.hh b/bench/btl/generic_bench/bench.hh
index 005c363..7b7b951 100644
--- a/bench/btl/generic_bench/bench.hh
+++ b/bench/btl/generic_bench/bench.hh
@@ -102,8 +102,8 @@
// merge the two data
std::vector<int> newSizes;
std::vector<double> newFlops;
- int i=0;
- int j=0;
+ unsigned int i=0;
+ unsigned int j=0;
while (i<tab_sizes.size() && j<oldSizes.size())
{
if (tab_sizes[i] == oldSizes[j])
diff --git a/bench/btl/generic_bench/bench_parameter.hh b/bench/btl/generic_bench/bench_parameter.hh
index 4c355cd..2b01149 100644
--- a/bench/btl/generic_bench/bench_parameter.hh
+++ b/bench/btl/generic_bench/bench_parameter.hh
@@ -29,11 +29,11 @@
// min vector size for axpy bench
#define MIN_AXPY 5
// max vector size for axpy bench
-#define MAX_AXPY 1000000
+#define MAX_AXPY 3000000
// min matrix size for matrix vector product bench
#define MIN_MV 5
// max matrix size for matrix vector product bench
-#define MAX_MV 3000
+#define MAX_MV 5000
// min matrix size for matrix matrix product bench
#define MIN_MM 5
// max matrix size for matrix matrix product bench
diff --git a/bench/btl/generic_bench/btl.hh b/bench/btl/generic_bench/btl.hh
index f1a88ff..706b00f 100644
--- a/bench/btl/generic_bench/btl.hh
+++ b/bench/btl/generic_bench/btl.hh
@@ -44,15 +44,10 @@
#define BTL_ASM_COMMENT(X)
#endif
-#if (defined __GNUC__) && (!defined __INTEL_COMPILER) && !defined(__arm__) && !defined(__powerpc__)
-#define BTL_DISABLE_SSE_EXCEPTIONS() { \
- int aux; \
- asm( \
- "stmxcsr %[aux] \n\t" \
- "orl $32832, %[aux] \n\t" \
- "ldmxcsr %[aux] \n\t" \
- : : [aux] "m" (aux)); \
-}
+#ifdef __SSE__
+#include "xmmintrin.h"
+// This enables flush to zero (FTZ) and denormals are zero (DAZ) modes:
+#define BTL_DISABLE_SSE_EXCEPTIONS() { _mm_setcsr(_mm_getcsr() | 0x8040); }
#else
#define BTL_DISABLE_SSE_EXCEPTIONS()
#endif
@@ -176,7 +171,7 @@
if (_config!=NULL)
{
std::vector<BtlString> config = BtlString(_config).split(" \t\n");
- for (int i = 0; i<config.size(); i++)
+ for (unsigned int i = 0; i<config.size(); i++)
{
if (config[i].beginsWith("-a"))
{
@@ -224,7 +219,7 @@
return false;
BtlString name(_name);
- for (int i=0; i<Instance.m_selectedActionNames.size(); ++i)
+ for (unsigned int i=0; i<Instance.m_selectedActionNames.size(); ++i)
if (name.contains(Instance.m_selectedActionNames[i]))
return false;
diff --git a/bench/btl/generic_bench/init/init_function.hh b/bench/btl/generic_bench/init/init_function.hh
index 7b3bdba..e467cb6 100644
--- a/bench/btl/generic_bench/init/init_function.hh
+++ b/bench/btl/generic_bench/init/init_function.hh
@@ -30,23 +30,23 @@
return index_i+index_j;
}
-double pseudo_random(int index)
+double pseudo_random(int /*index*/)
{
return std::rand()/double(RAND_MAX);
}
-double pseudo_random(int index_i, int index_j)
+double pseudo_random(int /*index_i*/, int /*index_j*/)
{
return std::rand()/double(RAND_MAX);
}
-double null_function(int index)
+double null_function(int /*index*/)
{
return 0.0;
}
-double null_function(int index_i, int index_j)
+double null_function(int /*index_i*/, int /*index_j*/)
{
return 0.0;
}
diff --git a/bench/btl/generic_bench/init/init_matrix.hh b/bench/btl/generic_bench/init/init_matrix.hh
index 67cbd20..6382d30 100644
--- a/bench/btl/generic_bench/init/init_matrix.hh
+++ b/bench/btl/generic_bench/init/init_matrix.hh
@@ -29,7 +29,7 @@
X.resize(size);
- for (int j=0;j<X.size();j++){
+ for (unsigned int j=0;j<X.size();j++){
X[j]=typename Vector::value_type(init_function(row,j));
}
}
@@ -42,7 +42,7 @@
template<double init_function(int,int),class Vector>
BTL_DONT_INLINE void init_matrix(Vector & A, int size){
A.resize(size);
- for (int row=0; row<A.size() ; row++){
+ for (unsigned int row=0; row<A.size() ; row++){
init_row<init_function>(A[row],size,row);
}
}
@@ -50,11 +50,11 @@
template<double init_function(int,int),class Matrix>
BTL_DONT_INLINE void init_matrix_symm(Matrix& A, int size){
A.resize(size);
- for (int row=0; row<A.size() ; row++)
+ for (unsigned int row=0; row<A.size() ; row++)
A[row].resize(size);
- for (int row=0; row<A.size() ; row++){
+ for (unsigned int row=0; row<A.size() ; row++){
A[row][row] = init_function(row,row);
- for (int col=0; col<row ; col++){
+ for (unsigned int col=0; col<row ; col++){
double x = init_function(row,col);
A[row][col] = A[col][row] = x;
}
diff --git a/bench/btl/generic_bench/init/init_vector.hh b/bench/btl/generic_bench/init/init_vector.hh
index efaf0c9..518e87d 100644
--- a/bench/btl/generic_bench/init/init_vector.hh
+++ b/bench/btl/generic_bench/init/init_vector.hh
@@ -29,7 +29,7 @@
X.resize(size);
- for (int i=0;i<X.size();i++){
+ for (unsigned int i=0;i<X.size();i++){
X[i]=typename Vector::value_type(init_function(i));
}
}
diff --git a/bench/btl/generic_bench/timers/portable_perf_analyzer.hh b/bench/btl/generic_bench/timers/portable_perf_analyzer.hh
index fc0f316..5e579fb 100644
--- a/bench/btl/generic_bench/timers/portable_perf_analyzer.hh
+++ b/bench/btl/generic_bench/timers/portable_perf_analyzer.hh
@@ -78,7 +78,7 @@
// time measurement
action.calculate();
_chronos.start();
- for (int ii=0;ii<_nb_calc;ii++)
+ for (unsigned int ii=0;ii<_nb_calc;ii++)
{
action.calculate();
}
diff --git a/bench/btl/generic_bench/timers/portable_timer.hh b/bench/btl/generic_bench/timers/portable_timer.hh
index e6ad309..c199811 100755
--- a/bench/btl/generic_bench/timers/portable_timer.hh
+++ b/bench/btl/generic_bench/timers/portable_timer.hh
@@ -34,7 +34,7 @@
// timer -------------------------------------------------------------------//
// A timer object measures CPU time.
-#ifdef _MSC_VER
+#if defined(_MSC_VER)
#define NOMINMAX
#include <windows.h>
@@ -87,6 +87,48 @@
}; // Portable_Timer
+#elif defined(__APPLE__)
+#include <CoreServices/CoreServices.h>
+#include <mach/mach_time.h>
+
+
+class Portable_Timer
+{
+ public:
+
+ Portable_Timer()
+ {
+ }
+
+ void start()
+ {
+ m_start_time = double(mach_absolute_time())*1e-9;;
+
+ }
+
+ void stop()
+ {
+ m_stop_time = double(mach_absolute_time())*1e-9;;
+
+ }
+
+ double elapsed()
+ {
+ return user_time();
+ }
+
+ double user_time()
+ {
+ return m_stop_time - m_start_time;
+ }
+
+
+private:
+
+ double m_stop_time, m_start_time;
+
+}; // Portable_Timer (Apple)
+
#else
#include <sys/time.h>
@@ -138,7 +180,7 @@
int m_clkid;
double m_stop_time, m_start_time;
-}; // Portable_Timer
+}; // Portable_Timer (Linux)
#endif
diff --git a/bench/btl/generic_bench/utils/size_lin_log.hh b/bench/btl/generic_bench/utils/size_lin_log.hh
index bca3932..bbc9f54 100644
--- a/bench/btl/generic_bench/utils/size_lin_log.hh
+++ b/bench/btl/generic_bench/utils/size_lin_log.hh
@@ -23,7 +23,7 @@
#include "size_log.hh"
template<class Vector>
-void size_lin_log(const int nb_point, const int size_min, const int size_max, Vector & X)
+void size_lin_log(const int nb_point, const int /*size_min*/, const int size_max, Vector & X)
{
int ten=10;
int nine=9;
diff --git a/bench/btl/libs/BLAS/CMakeLists.txt b/bench/btl/libs/BLAS/CMakeLists.txt
index de42fe0..0272cca 100644
--- a/bench/btl/libs/BLAS/CMakeLists.txt
+++ b/bench/btl/libs/BLAS/CMakeLists.txt
@@ -18,27 +18,14 @@
endif (MKL_FOUND)
-find_package(GOTO2)
-if (GOTO2_FOUND)
- btl_add_bench(btl_goto2 main.cpp)
- if(BUILD_btl_goto2)
- target_link_libraries(btl_goto2 ${GOTO_LIBRARIES} )
- set_target_properties(btl_goto2 PROPERTIES COMPILE_FLAGS "-DCBLASNAME=GOTO2")
- endif(BUILD_btl_goto2)
-endif (GOTO2_FOUND)
-
-find_package(GOTO)
-if (GOTO_FOUND)
- if(GOTO2_FOUND)
- btl_add_bench(btl_goto main.cpp OFF)
- else()
- btl_add_bench(btl_goto main.cpp)
- endif()
- if(BUILD_btl_goto)
- target_link_libraries(btl_goto ${GOTO_LIBRARIES} )
- set_target_properties(btl_goto PROPERTIES COMPILE_FLAGS "-DCBLASNAME=GOTO")
- endif(BUILD_btl_goto)
-endif (GOTO_FOUND)
+find_package(OPENBLAS)
+if (OPENBLAS_FOUND)
+ btl_add_bench(btl_openblas main.cpp)
+ if(BUILD_btl_openblas)
+ target_link_libraries(btl_openblas ${OPENBLAS_LIBRARIES} )
+ set_target_properties(btl_openblas PROPERTIES COMPILE_FLAGS "-DCBLASNAME=OPENBLAS")
+ endif(BUILD_btl_openblas)
+endif (OPENBLAS_FOUND)
find_package(ACML)
if (ACML_FOUND)
diff --git a/bench/btl/libs/BLAS/blas_interface_impl.hh b/bench/btl/libs/BLAS/blas_interface_impl.hh
index 0e84df0..fc4ba2a 100644
--- a/bench/btl/libs/BLAS/blas_interface_impl.hh
+++ b/bench/btl/libs/BLAS/blas_interface_impl.hh
@@ -75,7 +75,6 @@
static inline void partial_lu_decomp(const gene_matrix & X, gene_matrix & C, int N){
int N2 = N*N;
BLAS_FUNC(copy)(&N2, X, &intone, C, &intone);
- char uplo = 'L';
int info = 0;
int * ipiv = (int*)alloca(sizeof(int)*N);
BLAS_FUNC(getrf)(&N, &N, C, &N, ipiv, &info);
@@ -92,7 +91,7 @@
BLAS_FUNC(trsm)(&right, &lower, ¬rans, &nonunit, &N, &N, &fone, L, &N, X, &N);
}
- static inline void trmm(gene_matrix & A, gene_matrix & B, gene_matrix & X, int N){
+ static inline void trmm(gene_matrix & A, gene_matrix & B, gene_matrix & /*X*/, int N){
BLAS_FUNC(trmm)(&left, &lower, ¬rans,&nonunit, &N,&N,&fone,A,&N,B,&N);
}
@@ -101,7 +100,6 @@
static inline void lu_decomp(const gene_matrix & X, gene_matrix & C, int N){
int N2 = N*N;
BLAS_FUNC(copy)(&N2, X, &intone, C, &intone);
- char uplo = 'L';
int info = 0;
int * ipiv = (int*)alloca(sizeof(int)*N);
int * jpiv = (int*)alloca(sizeof(int)*N);
@@ -134,8 +132,6 @@
}
char uplo = 'U';
int info = 0;
- int ilo = 1;
- int ihi = N;
int bsize = 64;
int worksize = N*bsize;
SCALAR* d = new SCALAR[3*N+worksize];
diff --git a/bench/btl/libs/BLAS/c_interface_base.h b/bench/btl/libs/BLAS/c_interface_base.h
index 515d8dc..de61380 100644
--- a/bench/btl/libs/BLAS/c_interface_base.h
+++ b/bench/btl/libs/BLAS/c_interface_base.h
@@ -17,12 +17,12 @@
typedef real* gene_matrix;
typedef real* gene_vector;
- static void free_matrix(gene_matrix & A, int N){
- delete A;
+ static void free_matrix(gene_matrix & A, int /*N*/){
+ delete[] A;
}
static void free_vector(gene_vector & B){
- delete B;
+ delete[] B;
}
static inline void matrix_from_stl(gene_matrix & A, stl_matrix & A_stl){
diff --git a/bench/btl/libs/BLAS/main.cpp b/bench/btl/libs/BLAS/main.cpp
index 8347c9f..564d55e 100644
--- a/bench/btl/libs/BLAS/main.cpp
+++ b/bench/btl/libs/BLAS/main.cpp
@@ -56,13 +56,13 @@
bench<Action_trmm<blas_interface<REAL_TYPE> > >(MIN_MM,MAX_MM,NB_POINT);
- bench<Action_cholesky<blas_interface<REAL_TYPE> > >(MIN_MM,MAX_MM,NB_POINT);
- bench<Action_partial_lu<blas_interface<REAL_TYPE> > >(MIN_MM,MAX_MM,NB_POINT);
+ bench<Action_cholesky<blas_interface<REAL_TYPE> > >(MIN_LU,MAX_LU,NB_POINT);
+ bench<Action_partial_lu<blas_interface<REAL_TYPE> > >(MIN_LU,MAX_LU,NB_POINT);
#ifdef HAS_LAPACK
- bench<Action_lu_decomp<blas_interface<REAL_TYPE> > >(MIN_MM,MAX_MM,NB_POINT);
- bench<Action_hessenberg<blas_interface<REAL_TYPE> > >(MIN_MM,MAX_MM,NB_POINT);
- bench<Action_tridiagonalization<blas_interface<REAL_TYPE> > >(MIN_MM,MAX_MM,NB_POINT);
+// bench<Action_lu_decomp<blas_interface<REAL_TYPE> > >(MIN_LU,MAX_LU,NB_POINT);
+ bench<Action_hessenberg<blas_interface<REAL_TYPE> > >(MIN_LU,MAX_LU,NB_POINT);
+ bench<Action_tridiagonalization<blas_interface<REAL_TYPE> > >(MIN_LU,MAX_LU,NB_POINT);
#endif
//bench<Action_lu_solve<blas_LU_solve_interface<REAL_TYPE> > >(MIN_LU,MAX_LU,NB_POINT);
diff --git a/bench/btl/libs/STL/STL_interface.hh b/bench/btl/libs/STL/STL_interface.hh
index 93e76bd..ef4cc92 100644
--- a/bench/btl/libs/STL/STL_interface.hh
+++ b/bench/btl/libs/STL/STL_interface.hh
@@ -44,9 +44,9 @@
return "STL";
}
- static void free_matrix(gene_matrix & A, int N){}
+ static void free_matrix(gene_matrix & /*A*/, int /*N*/){}
- static void free_vector(gene_vector & B){}
+ static void free_vector(gene_vector & /*B*/){}
static inline void matrix_from_stl(gene_matrix & A, stl_matrix & A_stl){
A = A_stl;
diff --git a/bench/btl/libs/blaze/CMakeLists.txt b/bench/btl/libs/blaze/CMakeLists.txt
new file mode 100644
index 0000000..e99a085
--- /dev/null
+++ b/bench/btl/libs/blaze/CMakeLists.txt
@@ -0,0 +1,13 @@
+
+find_package(BLAZE)
+find_package(Boost COMPONENTS system)
+if (BLAZE_FOUND AND Boost_FOUND)
+ include_directories(${BLAZE_INCLUDE_DIR} ${Boost_INCLUDE_DIRS})
+ btl_add_bench(btl_blaze main.cpp)
+ # Note: The newest blaze version requires C++14.
+ # Ideally, we should set this depending on the version of Blaze we found
+ set_property(TARGET btl_blaze PROPERTY CXX_STANDARD 14)
+ if(BUILD_btl_blaze)
+ target_link_libraries(btl_blaze ${Boost_LIBRARIES})
+ endif()
+endif ()
diff --git a/bench/btl/libs/blaze/blaze_interface.hh b/bench/btl/libs/blaze/blaze_interface.hh
new file mode 100644
index 0000000..ee15239
--- /dev/null
+++ b/bench/btl/libs/blaze/blaze_interface.hh
@@ -0,0 +1,140 @@
+//=====================================================
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//=====================================================
+//
+// This program is free software; you can redistribute it and/or
+// modify it under the terms of the GNU General Public License
+// as published by the Free Software Foundation; either version 2
+// of the License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+// You should have received a copy of the GNU General Public License
+// along with this program; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+//
+#ifndef BLAZE_INTERFACE_HH
+#define BLAZE_INTERFACE_HH
+
+#include <blaze/Math.h>
+#include <blaze/Blaze.h>
+// using namespace blaze;
+
+#include <vector>
+
+template<class real>
+class blaze_interface {
+
+public :
+
+ typedef real real_type ;
+
+ typedef std::vector<real> stl_vector;
+ typedef std::vector<stl_vector > stl_matrix;
+
+ typedef blaze::DynamicMatrix<real,blaze::columnMajor> gene_matrix;
+ typedef blaze::DynamicVector<real> gene_vector;
+
+ static inline std::string name() { return "blaze"; }
+
+ static void free_matrix(gene_matrix & A, int N){
+ return ;
+ }
+
+ static void free_vector(gene_vector & B){
+ return ;
+ }
+
+ static inline void matrix_from_stl(gene_matrix & A, stl_matrix & A_stl){
+ A.resize(A_stl[0].size(), A_stl.size());
+
+ for (int j=0; j<A_stl.size() ; j++){
+ for (int i=0; i<A_stl[j].size() ; i++){
+ A(i,j) = A_stl[j][i];
+ }
+ }
+ }
+
+ static inline void vector_from_stl(gene_vector & B, stl_vector & B_stl){
+ B.resize(B_stl.size());
+ for (int i=0; i<B_stl.size() ; i++){
+ B[i] = B_stl[i];
+ }
+ }
+
+ static inline void vector_to_stl(gene_vector & B, stl_vector & B_stl){
+ for (int i=0; i<B_stl.size() ; i++){
+ B_stl[i] = B[i];
+ }
+ }
+
+ static inline void matrix_to_stl(gene_matrix & A, stl_matrix & A_stl){
+ int N=A_stl.size();
+ for (int j=0;j<N;j++){
+ A_stl[j].resize(N);
+ for (int i=0;i<N;i++){
+ A_stl[j][i] = A(i,j);
+ }
+ }
+ }
+
+ static inline void matrix_matrix_product(const gene_matrix & A, const gene_matrix & B, gene_matrix & X, int N){
+ X = (A*B);
+ }
+
+ static inline void transposed_matrix_matrix_product(const gene_matrix & A, const gene_matrix & B, gene_matrix & X, int N){
+ X = (trans(A)*trans(B));
+ }
+
+ static inline void ata_product(const gene_matrix & A, gene_matrix & X, int N){
+ X = (trans(A)*A);
+ }
+
+ static inline void aat_product(const gene_matrix & A, gene_matrix & X, int N){
+ X = (A*trans(A));
+ }
+
+ static inline void matrix_vector_product(gene_matrix & A, gene_vector & B, gene_vector & X, int N){
+ X = (A*B);
+ }
+
+ static inline void atv_product(gene_matrix & A, gene_vector & B, gene_vector & X, int N){
+ X = (trans(A)*B);
+ }
+
+ static inline void axpy(const real coef, const gene_vector & X, gene_vector & Y, int N){
+ Y += coef * X;
+ }
+
+ static inline void axpby(real a, const gene_vector & X, real b, gene_vector & Y, int N){
+ Y = a*X + b*Y;
+ }
+
+// static inline void cholesky(const gene_matrix & X, gene_matrix & C, int N){
+// C = X;
+// recursive_cholesky(C);
+// }
+
+// static inline void lu_decomp(const gene_matrix & X, gene_matrix & R, int N){
+// R = X;
+// std::vector<int> ipvt(N);
+// lu_factor(R, ipvt);
+// }
+
+// static inline void trisolve_lower(const gene_matrix & L, const gene_vector& B, gene_vector & X, int N){
+// X = lower_trisolve(L, B);
+// }
+
+ static inline void copy_matrix(const gene_matrix & source, gene_matrix & cible, int N){
+ cible = source;
+ }
+
+ static inline void copy_vector(const gene_vector & source, gene_vector & cible, int N){
+ cible = source;
+ }
+
+};
+
+#endif
diff --git a/bench/btl/libs/blaze/main.cpp b/bench/btl/libs/blaze/main.cpp
new file mode 100644
index 0000000..80e8f4e
--- /dev/null
+++ b/bench/btl/libs/blaze/main.cpp
@@ -0,0 +1,40 @@
+//=====================================================
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//=====================================================
+//
+// This program is free software; you can redistribute it and/or
+// modify it under the terms of the GNU General Public License
+// as published by the Free Software Foundation; either version 2
+// of the License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+// You should have received a copy of the GNU General Public License
+// along with this program; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+//
+#include "utilities.h"
+#include "blaze_interface.hh"
+#include "bench.hh"
+#include "basic_actions.hh"
+
+BTL_MAIN;
+
+int main()
+{
+
+ bench<Action_axpy<blaze_interface<REAL_TYPE> > >(MIN_AXPY,MAX_AXPY,NB_POINT);
+ bench<Action_axpby<blaze_interface<REAL_TYPE> > >(MIN_AXPY,MAX_AXPY,NB_POINT);
+
+ bench<Action_matrix_vector_product<blaze_interface<REAL_TYPE> > >(MIN_MV,MAX_MV,NB_POINT);
+ bench<Action_atv_product<blaze_interface<REAL_TYPE> > >(MIN_MV,MAX_MV,NB_POINT);
+// bench<Action_matrix_matrix_product<blaze_interface<REAL_TYPE> > >(MIN_MM,MAX_MM,NB_POINT);
+// bench<Action_ata_product<blaze_interface<REAL_TYPE> > >(MIN_MM,MAX_MM,NB_POINT);
+// bench<Action_aat_product<blaze_interface<REAL_TYPE> > >(MIN_MM,MAX_MM,NB_POINT);
+
+ return 0;
+}
+
+
diff --git a/bench/btl/libs/eigen2/eigen2_interface.hh b/bench/btl/libs/eigen2/eigen2_interface.hh
index 47fe581..1deabda 100644
--- a/bench/btl/libs/eigen2/eigen2_interface.hh
+++ b/bench/btl/libs/eigen2/eigen2_interface.hh
@@ -47,7 +47,7 @@
{
#if defined(EIGEN_VECTORIZE_SSE)
if (SIZE==Dynamic) return "eigen2"; else return "tiny_eigen2";
- #elif defined(EIGEN_VECTORIZE_ALTIVEC)
+ #elif defined(EIGEN_VECTORIZE_ALTIVEC) || defined(EIGEN_VECTORIZE_VSX)
if (SIZE==Dynamic) return "eigen2"; else return "tiny_eigen2";
#else
if (SIZE==Dynamic) return "eigen2_novec"; else return "tiny_eigen2_novec";
diff --git a/bench/btl/libs/eigen3/eigen3_interface.hh b/bench/btl/libs/eigen3/eigen3_interface.hh
index 31bcc1f..b821fd7 100644
--- a/bench/btl/libs/eigen3/eigen3_interface.hh
+++ b/bench/btl/libs/eigen3/eigen3_interface.hh
@@ -45,15 +45,15 @@
return EIGEN_MAKESTRING(BTL_PREFIX);
}
- static void free_matrix(gene_matrix & A, int N) {}
+ static void free_matrix(gene_matrix & /*A*/, int /*N*/) {}
- static void free_vector(gene_vector & B) {}
+ static void free_vector(gene_vector & /*B*/) {}
static BTL_DONT_INLINE void matrix_from_stl(gene_matrix & A, stl_matrix & A_stl){
A.resize(A_stl[0].size(), A_stl.size());
- for (int j=0; j<A_stl.size() ; j++){
- for (int i=0; i<A_stl[j].size() ; i++){
+ for (unsigned int j=0; j<A_stl.size() ; j++){
+ for (unsigned int i=0; i<A_stl[j].size() ; i++){
A.coeffRef(i,j) = A_stl[j][i];
}
}
@@ -62,19 +62,19 @@
static BTL_DONT_INLINE void vector_from_stl(gene_vector & B, stl_vector & B_stl){
B.resize(B_stl.size(),1);
- for (int i=0; i<B_stl.size() ; i++){
+ for (unsigned int i=0; i<B_stl.size() ; i++){
B.coeffRef(i) = B_stl[i];
}
}
static BTL_DONT_INLINE void vector_to_stl(gene_vector & B, stl_vector & B_stl){
- for (int i=0; i<B_stl.size() ; i++){
+ for (unsigned int i=0; i<B_stl.size() ; i++){
B_stl[i] = B.coeff(i);
}
}
static BTL_DONT_INLINE void matrix_to_stl(gene_matrix & A, stl_matrix & A_stl){
- int N=A_stl.size();
+ int N=A_stl.size();
for (int j=0;j<N;j++){
A_stl[j].resize(N);
@@ -84,28 +84,28 @@
}
}
- static inline void matrix_matrix_product(const gene_matrix & A, const gene_matrix & B, gene_matrix & X, int N){
+ static inline void matrix_matrix_product(const gene_matrix & A, const gene_matrix & B, gene_matrix & X, int /*N*/){
X.noalias() = A*B;
}
- static inline void transposed_matrix_matrix_product(const gene_matrix & A, const gene_matrix & B, gene_matrix & X, int N){
+ static inline void transposed_matrix_matrix_product(const gene_matrix & A, const gene_matrix & B, gene_matrix & X, int /*N*/){
X.noalias() = A.transpose()*B.transpose();
}
-// static inline void ata_product(const gene_matrix & A, gene_matrix & X, int N){
+// static inline void ata_product(const gene_matrix & A, gene_matrix & X, int /*N*/){
// X.noalias() = A.transpose()*A;
// }
- static inline void aat_product(const gene_matrix & A, gene_matrix & X, int N){
+ static inline void aat_product(const gene_matrix & A, gene_matrix & X, int /*N*/){
X.template triangularView<Lower>().setZero();
X.template selfadjointView<Lower>().rankUpdate(A);
}
- static inline void matrix_vector_product(const gene_matrix & A, const gene_vector & B, gene_vector & X, int N){
+ static inline void matrix_vector_product(const gene_matrix & A, const gene_vector & B, gene_vector & X, int /*N*/){
X.noalias() = A*B;
}
- static inline void symv(const gene_matrix & A, const gene_vector & B, gene_vector & X, int N){
+ static inline void symv(const gene_matrix & A, const gene_vector & B, gene_vector & X, int /*N*/){
X.noalias() = (A.template selfadjointView<Lower>() * B);
// internal::product_selfadjoint_vector<real,0,LowerTriangularBit,false,false>(N,A.data(),N, B.data(), 1, X.data(), 1);
}
@@ -155,54 +155,54 @@
}
}
- static EIGEN_DONT_INLINE void syr2(gene_matrix & A, gene_vector & X, gene_vector & Y, int N){
+ static EIGEN_DONT_INLINE void syr2(gene_matrix & A, gene_vector & X, gene_vector & Y, int N){
// internal::product_selfadjoint_rank2_update<real,0,LowerTriangularBit>(N,A.data(),N, X.data(), 1, Y.data(), 1, -1);
for(int j=0; j<N; ++j)
A.col(j).tail(N-j) += X[j] * Y.tail(N-j) + Y[j] * X.tail(N-j);
}
- static EIGEN_DONT_INLINE void ger(gene_matrix & A, gene_vector & X, gene_vector & Y, int N){
+ static EIGEN_DONT_INLINE void ger(gene_matrix & A, gene_vector & X, gene_vector & Y, int N){
for(int j=0; j<N; ++j)
A.col(j) += X * Y[j];
}
- static EIGEN_DONT_INLINE void rot(gene_vector & A, gene_vector & B, real c, real s, int N){
+ static EIGEN_DONT_INLINE void rot(gene_vector & A, gene_vector & B, real c, real s, int /*N*/){
internal::apply_rotation_in_the_plane(A, B, JacobiRotation<real>(c,s));
}
- static inline void atv_product(gene_matrix & A, gene_vector & B, gene_vector & X, int N){
+ static inline void atv_product(gene_matrix & A, gene_vector & B, gene_vector & X, int /*N*/){
X.noalias() = (A.transpose()*B);
}
- static inline void axpy(real coef, const gene_vector & X, gene_vector & Y, int N){
+ static inline void axpy(real coef, const gene_vector & X, gene_vector & Y, int /*N*/){
Y += coef * X;
}
- static inline void axpby(real a, const gene_vector & X, real b, gene_vector & Y, int N){
+ static inline void axpby(real a, const gene_vector & X, real b, gene_vector & Y, int /*N*/){
Y = a*X + b*Y;
}
- static EIGEN_DONT_INLINE void copy_matrix(const gene_matrix & source, gene_matrix & cible, int N){
+ static EIGEN_DONT_INLINE void copy_matrix(const gene_matrix & source, gene_matrix & cible, int /*N*/){
cible = source;
}
- static EIGEN_DONT_INLINE void copy_vector(const gene_vector & source, gene_vector & cible, int N){
+ static EIGEN_DONT_INLINE void copy_vector(const gene_vector & source, gene_vector & cible, int /*N*/){
cible = source;
}
- static inline void trisolve_lower(const gene_matrix & L, const gene_vector& B, gene_vector& X, int N){
+ static inline void trisolve_lower(const gene_matrix & L, const gene_vector& B, gene_vector& X, int /*N*/){
X = L.template triangularView<Lower>().solve(B);
}
- static inline void trisolve_lower_matrix(const gene_matrix & L, const gene_matrix& B, gene_matrix& X, int N){
+ static inline void trisolve_lower_matrix(const gene_matrix & L, const gene_matrix& B, gene_matrix& X, int /*N*/){
X = L.template triangularView<Upper>().solve(B);
}
- static inline void trmm(const gene_matrix & L, const gene_matrix& B, gene_matrix& X, int N){
+ static inline void trmm(const gene_matrix & L, const gene_matrix& B, gene_matrix& X, int /*N*/){
X.noalias() = L.template triangularView<Lower>() * B;
}
- static inline void cholesky(const gene_matrix & X, gene_matrix & C, int N){
+ static inline void cholesky(const gene_matrix & X, gene_matrix & C, int /*N*/){
C = X;
internal::llt_inplace<real,Lower>::blocked(C);
//C = X.llt().matrixL();
@@ -211,11 +211,11 @@
// Cholesky<gene_matrix>::computeInPlaceBlock(C);
}
- static inline void lu_decomp(const gene_matrix & X, gene_matrix & C, int N){
+ static inline void lu_decomp(const gene_matrix & X, gene_matrix & C, int /*N*/){
C = X.fullPivLu().matrixLU();
}
- static inline void partial_lu_decomp(const gene_matrix & X, gene_matrix & C, int N){
+ static inline void partial_lu_decomp(const gene_matrix & X, gene_matrix & C, int N){
Matrix<DenseIndex,1,Dynamic> piv(N);
DenseIndex nb;
C = X;
@@ -223,13 +223,13 @@
// C = X.partialPivLu().matrixLU();
}
- static inline void tridiagonalization(const gene_matrix & X, gene_matrix & C, int N){
+ static inline void tridiagonalization(const gene_matrix & X, gene_matrix & C, int N){
typename Tridiagonalization<gene_matrix>::CoeffVectorType aux(N-1);
C = X;
internal::tridiagonalization_inplace(C, aux);
}
- static inline void hessenberg(const gene_matrix & X, gene_matrix & C, int N){
+ static inline void hessenberg(const gene_matrix & X, gene_matrix & C, int /*N*/){
C = HessenbergDecomposition<gene_matrix>(X).packedMatrix();
}
diff --git a/bench/btl/libs/eigen3/main_adv.cpp b/bench/btl/libs/eigen3/main_adv.cpp
index efe5857..9586535 100644
--- a/bench/btl/libs/eigen3/main_adv.cpp
+++ b/bench/btl/libs/eigen3/main_adv.cpp
@@ -29,14 +29,14 @@
int main()
{
- bench<Action_trisolve<eigen3_interface<REAL_TYPE> > >(MIN_MM,MAX_MM,NB_POINT);
- bench<Action_trisolve_matrix<eigen3_interface<REAL_TYPE> > >(MIN_MM,MAX_MM,NB_POINT);
- bench<Action_cholesky<eigen3_interface<REAL_TYPE> > >(MIN_MM,MAX_MM,NB_POINT);
- bench<Action_lu_decomp<eigen3_interface<REAL_TYPE> > >(MIN_MM,MAX_MM,NB_POINT);
- bench<Action_partial_lu<eigen3_interface<REAL_TYPE> > >(MIN_MM,MAX_MM,NB_POINT);
+ bench<Action_trisolve<eigen3_interface<REAL_TYPE> > >(MIN_LU,MAX_LU,NB_POINT);
+ bench<Action_trisolve_matrix<eigen3_interface<REAL_TYPE> > >(MIN_LU,MAX_LU,NB_POINT);
+ bench<Action_cholesky<eigen3_interface<REAL_TYPE> > >(MIN_LU,MAX_LU,NB_POINT);
+// bench<Action_lu_decomp<eigen3_interface<REAL_TYPE> > >(MIN_LU,MAX_LU,NB_POINT);
+ bench<Action_partial_lu<eigen3_interface<REAL_TYPE> > >(MIN_LU,MAX_LU,NB_POINT);
- bench<Action_hessenberg<eigen3_interface<REAL_TYPE> > >(MIN_MM,MAX_MM,NB_POINT);
- bench<Action_tridiagonalization<eigen3_interface<REAL_TYPE> > >(MIN_MM,MAX_MM,NB_POINT);
+// bench<Action_hessenberg<eigen3_interface<REAL_TYPE> > >(MIN_LU,MAX_LU,NB_POINT);
+ bench<Action_tridiagonalization<eigen3_interface<REAL_TYPE> > >(MIN_LU,MAX_LU,NB_POINT);
return 0;
}
diff --git a/bench/btl/libs/tensors/CMakeLists.txt b/bench/btl/libs/tensors/CMakeLists.txt
new file mode 100644
index 0000000..09d6d8e
--- /dev/null
+++ b/bench/btl/libs/tensors/CMakeLists.txt
@@ -0,0 +1,44 @@
+
+
+if((NOT TENSOR_INCLUDE_DIR) AND Eigen_SOURCE_DIR)
+ # unless TENSOR_INCLUDE_DIR is defined, let's use current Eigen version
+ set(TENSOR_INCLUDE_DIR ${Eigen_SOURCE_DIR})
+ set(TENSOR_FOUND TRUE)
+else()
+ find_package(Tensor)
+endif()
+
+if (TENSOR_FOUND)
+
+ include_directories(${TENSOR_INCLUDE_DIR})
+ btl_add_bench(btl_tensor_linear main_linear.cpp)
+ btl_add_bench(btl_tensor_vecmat main_vecmat.cpp)
+ btl_add_bench(btl_tensor_matmat main_matmat.cpp)
+
+ btl_add_target_property(btl_tensor_linear COMPILE_FLAGS "-fno-exceptions -DBTL_PREFIX=tensor")
+ btl_add_target_property(btl_tensor_vecmat COMPILE_FLAGS "-fno-exceptions -DBTL_PREFIX=tensor")
+ btl_add_target_property(btl_tensor_matmat COMPILE_FLAGS "-fno-exceptions -DBTL_PREFIX=tensor")
+
+ option(BTL_BENCH_NOGCCVEC "also bench Eigen explicit vec without GCC's auto vec" OFF)
+ if(CMAKE_COMPILER_IS_GNUCXX AND BTL_BENCH_NOGCCVEC)
+ btl_add_bench(btl_tensor_nogccvec_linear main_linear.cpp)
+ btl_add_bench(btl_tensor_nogccvec_vecmat main_vecmat.cpp)
+ btl_add_bench(btl_tensor_nogccvec_matmat main_matmat.cpp)
+
+ btl_add_target_property(btl_tensor_nogccvec_linear COMPILE_FLAGS "-fno-exceptions -fno-tree-vectorize -DBTL_PREFIX=tensor_nogccvec")
+ btl_add_target_property(btl_tensor_nogccvec_vecmat COMPILE_FLAGS "-fno-exceptions -fno-tree-vectorize -DBTL_PREFIX=tensor_nogccvec")
+ btl_add_target_property(btl_tensor_nogccvec_matmat COMPILE_FLAGS "-fno-exceptions -fno-tree-vectorize -DBTL_PREFIX=tensor_nogccvec")
+ endif()
+
+
+ if(NOT BTL_NOVEC)
+ btl_add_bench(btl_tensor_novec_linear main_linear.cpp OFF)
+ btl_add_bench(btl_tensor_novec_vecmat main_vecmat.cpp OFF)
+ btl_add_bench(btl_tensor_novec_matmat main_matmat.cpp OFF)
+ btl_add_target_property(btl_tensor_novec_linear COMPILE_FLAGS "-fno-exceptions -DEIGEN_DONT_VECTORIZE -DBTL_PREFIX=tensor_novec")
+ btl_add_target_property(btl_tensor_novec_vecmat COMPILE_FLAGS "-fno-exceptions -DEIGEN_DONT_VECTORIZE -DBTL_PREFIX=tensor_novec")
+ btl_add_target_property(btl_tensor_novec_matmat COMPILE_FLAGS "-fno-exceptions -DEIGEN_DONT_VECTORIZE -DBTL_PREFIX=tensor_novec")
+
+ endif(NOT BTL_NOVEC)
+
+endif (TENSOR_FOUND)
diff --git a/bench/btl/libs/tensors/main_linear.cpp b/bench/btl/libs/tensors/main_linear.cpp
new file mode 100644
index 0000000..e257f1e
--- /dev/null
+++ b/bench/btl/libs/tensors/main_linear.cpp
@@ -0,0 +1,23 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "utilities.h"
+#include "tensor_interface.hh"
+#include "bench.hh"
+#include "basic_actions.hh"
+
+BTL_MAIN;
+
+int main()
+{
+ bench<Action_axpy<tensor_interface<REAL_TYPE> > >(MIN_AXPY,MAX_AXPY,NB_POINT);
+ bench<Action_axpby<tensor_interface<REAL_TYPE> > >(MIN_AXPY,MAX_AXPY,NB_POINT);
+
+ return 0;
+}
diff --git a/bench/btl/libs/tensors/main_matmat.cpp b/bench/btl/libs/tensors/main_matmat.cpp
new file mode 100644
index 0000000..675fcfc
--- /dev/null
+++ b/bench/btl/libs/tensors/main_matmat.cpp
@@ -0,0 +1,21 @@
+//=====================================================
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//=====================================================
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+//
+#include "utilities.h"
+#include "tensor_interface.hh"
+#include "bench.hh"
+#include "basic_actions.hh"
+
+BTL_MAIN;
+
+int main()
+{
+ bench<Action_matrix_matrix_product<tensor_interface<REAL_TYPE> > >(MIN_MM,MAX_MM,NB_POINT);
+
+ return 0;
+}
diff --git a/bench/btl/libs/tensors/main_vecmat.cpp b/bench/btl/libs/tensors/main_vecmat.cpp
new file mode 100644
index 0000000..1af00c8
--- /dev/null
+++ b/bench/btl/libs/tensors/main_vecmat.cpp
@@ -0,0 +1,21 @@
+//=====================================================
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//=====================================================
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+//
+#include "utilities.h"
+#include "tensor_interface.hh"
+#include "bench.hh"
+#include "basic_actions.hh"
+
+BTL_MAIN;
+
+int main()
+{
+ bench<Action_matrix_vector_product<tensor_interface<REAL_TYPE> > >(MIN_MV,MAX_MV,NB_POINT);
+
+ return 0;
+}
diff --git a/bench/btl/libs/tensors/tensor_interface.hh b/bench/btl/libs/tensors/tensor_interface.hh
new file mode 100644
index 0000000..97b8e0f
--- /dev/null
+++ b/bench/btl/libs/tensors/tensor_interface.hh
@@ -0,0 +1,105 @@
+//=====================================================
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//=====================================================
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+//
+#ifndef TENSOR_INTERFACE_HH
+#define TENSOR_INTERFACE_HH
+
+#include <unsupported/Eigen/CXX11/Tensor>
+#include <vector>
+#include "btl.hh"
+
+using namespace Eigen;
+
+template<class real>
+class tensor_interface
+{
+public :
+ typedef real real_type;
+ typedef typename Eigen::Tensor<real,2>::Index Index;
+
+ typedef std::vector<real> stl_vector;
+ typedef std::vector<stl_vector> stl_matrix;
+
+ typedef Eigen::Tensor<real,2> gene_matrix;
+ typedef Eigen::Tensor<real,1> gene_vector;
+
+
+ static inline std::string name( void )
+ {
+ return EIGEN_MAKESTRING(BTL_PREFIX);
+ }
+
+ static void free_matrix(gene_matrix & /*A*/, int /*N*/) {}
+
+ static void free_vector(gene_vector & /*B*/) {}
+
+ static BTL_DONT_INLINE void matrix_from_stl(gene_matrix & A, stl_matrix & A_stl){
+ A.resize(Eigen::array<Index,2>(A_stl[0].size(), A_stl.size()));
+
+ for (unsigned int j=0; j<A_stl.size() ; j++){
+ for (unsigned int i=0; i<A_stl[j].size() ; i++){
+ A.coeffRef(Eigen::array<Index,2>(i,j)) = A_stl[j][i];
+ }
+ }
+ }
+
+ static BTL_DONT_INLINE void vector_from_stl(gene_vector & B, stl_vector & B_stl){
+ B.resize(B_stl.size());
+
+ for (unsigned int i=0; i<B_stl.size() ; i++){
+ B.coeffRef(i) = B_stl[i];
+ }
+ }
+
+ static BTL_DONT_INLINE void vector_to_stl(gene_vector & B, stl_vector & B_stl){
+ for (unsigned int i=0; i<B_stl.size() ; i++){
+ B_stl[i] = B.coeff(i);
+ }
+ }
+
+ static BTL_DONT_INLINE void matrix_to_stl(gene_matrix & A, stl_matrix & A_stl){
+ int N=A_stl.size();
+
+ for (int j=0;j<N;j++){
+ A_stl[j].resize(N);
+ for (int i=0;i<N;i++){
+ A_stl[j][i] = A.coeff(Eigen::array<Index,2>(i,j));
+ }
+ }
+ }
+
+ static inline void matrix_matrix_product(const gene_matrix & A, const gene_matrix & B, gene_matrix & X, int /*N*/){
+ typedef typename Eigen::Tensor<real_type, 1>::DimensionPair DimPair;
+ const Eigen::array<DimPair, 1> dims(DimPair(1, 0));
+ X/*.noalias()*/ = A.contract(B, dims);
+ }
+
+ static inline void matrix_vector_product(const gene_matrix & A, const gene_vector & B, gene_vector & X, int /*N*/){
+ typedef typename Eigen::Tensor<real_type, 1>::DimensionPair DimPair;
+ const Eigen::array<DimPair, 1> dims(DimPair(1, 0));
+ X/*.noalias()*/ = A.contract(B, dims);
+ }
+
+ static inline void axpy(real coef, const gene_vector & X, gene_vector & Y, int /*N*/){
+ Y += X.constant(coef) * X;
+ }
+
+ static inline void axpby(real a, const gene_vector & X, real b, gene_vector & Y, int /*N*/){
+ Y = X.constant(a)*X + Y.constant(b)*Y;
+ }
+
+ static EIGEN_DONT_INLINE void copy_matrix(const gene_matrix & source, gene_matrix & cible, int /*N*/){
+ cible = source;
+ }
+
+ static EIGEN_DONT_INLINE void copy_vector(const gene_vector & source, gene_vector & cible, int /*N*/){
+ cible = source;
+ }
+};
+
+#endif
diff --git a/bench/dense_solvers.cpp b/bench/dense_solvers.cpp
new file mode 100644
index 0000000..24343dc
--- /dev/null
+++ b/bench/dense_solvers.cpp
@@ -0,0 +1,186 @@
+#include <iostream>
+#include "BenchTimer.h"
+#include <Eigen/Dense>
+#include <map>
+#include <vector>
+#include <string>
+#include <sstream>
+using namespace Eigen;
+
+std::map<std::string,Array<float,1,8,DontAlign|RowMajor> > results;
+std::vector<std::string> labels;
+std::vector<Array2i> sizes;
+
+template<typename Solver,typename MatrixType>
+EIGEN_DONT_INLINE
+void compute_norm_equation(Solver &solver, const MatrixType &A) {
+ if(A.rows()!=A.cols())
+ solver.compute(A.transpose()*A);
+ else
+ solver.compute(A);
+}
+
+template<typename Solver,typename MatrixType>
+EIGEN_DONT_INLINE
+void compute(Solver &solver, const MatrixType &A) {
+ solver.compute(A);
+}
+
+template<typename Scalar,int Size>
+void bench(int id, int rows, int size = Size)
+{
+ typedef Matrix<Scalar,Dynamic,Size> Mat;
+ typedef Matrix<Scalar,Dynamic,Dynamic> MatDyn;
+ typedef Matrix<Scalar,Size,Size> MatSquare;
+ Mat A(rows,size);
+ A.setRandom();
+ if(rows==size)
+ A = A*A.adjoint();
+ BenchTimer t_llt, t_ldlt, t_lu, t_fplu, t_qr, t_cpqr, t_cod, t_fpqr, t_jsvd, t_bdcsvd;
+
+ int svd_opt = ComputeThinU|ComputeThinV;
+
+ int tries = 5;
+ int rep = 1000/size;
+ if(rep==0) rep = 1;
+// rep = rep*rep;
+
+ LLT<MatSquare> llt(size);
+ LDLT<MatSquare> ldlt(size);
+ PartialPivLU<MatSquare> lu(size);
+ FullPivLU<MatSquare> fplu(size,size);
+ HouseholderQR<Mat> qr(A.rows(),A.cols());
+ ColPivHouseholderQR<Mat> cpqr(A.rows(),A.cols());
+ CompleteOrthogonalDecomposition<Mat> cod(A.rows(),A.cols());
+ FullPivHouseholderQR<Mat> fpqr(A.rows(),A.cols());
+ JacobiSVD<MatDyn> jsvd(A.rows(),A.cols());
+ BDCSVD<MatDyn> bdcsvd(A.rows(),A.cols());
+
+ BENCH(t_llt, tries, rep, compute_norm_equation(llt,A));
+ BENCH(t_ldlt, tries, rep, compute_norm_equation(ldlt,A));
+ BENCH(t_lu, tries, rep, compute_norm_equation(lu,A));
+ if(size<=1000)
+ BENCH(t_fplu, tries, rep, compute_norm_equation(fplu,A));
+ BENCH(t_qr, tries, rep, compute(qr,A));
+ BENCH(t_cpqr, tries, rep, compute(cpqr,A));
+ BENCH(t_cod, tries, rep, compute(cod,A));
+ if(size*rows<=10000000)
+ BENCH(t_fpqr, tries, rep, compute(fpqr,A));
+ if(size<500) // JacobiSVD is really too slow for too large matrices
+ BENCH(t_jsvd, tries, rep, jsvd.compute(A,svd_opt));
+// if(size*rows<=20000000)
+ BENCH(t_bdcsvd, tries, rep, bdcsvd.compute(A,svd_opt));
+
+ results["LLT"][id] = t_llt.best();
+ results["LDLT"][id] = t_ldlt.best();
+ results["PartialPivLU"][id] = t_lu.best();
+ results["FullPivLU"][id] = t_fplu.best();
+ results["HouseholderQR"][id] = t_qr.best();
+ results["ColPivHouseholderQR"][id] = t_cpqr.best();
+ results["CompleteOrthogonalDecomposition"][id] = t_cod.best();
+ results["FullPivHouseholderQR"][id] = t_fpqr.best();
+ results["JacobiSVD"][id] = t_jsvd.best();
+ results["BDCSVD"][id] = t_bdcsvd.best();
+}
+
+
+int main()
+{
+ labels.push_back("LLT");
+ labels.push_back("LDLT");
+ labels.push_back("PartialPivLU");
+ labels.push_back("FullPivLU");
+ labels.push_back("HouseholderQR");
+ labels.push_back("ColPivHouseholderQR");
+ labels.push_back("CompleteOrthogonalDecomposition");
+ labels.push_back("FullPivHouseholderQR");
+ labels.push_back("JacobiSVD");
+ labels.push_back("BDCSVD");
+
+ for(int i=0; i<labels.size(); ++i)
+ results[labels[i]].fill(-1);
+
+ const int small = 8;
+ sizes.push_back(Array2i(small,small));
+ sizes.push_back(Array2i(100,100));
+ sizes.push_back(Array2i(1000,1000));
+ sizes.push_back(Array2i(4000,4000));
+ sizes.push_back(Array2i(10000,small));
+ sizes.push_back(Array2i(10000,100));
+ sizes.push_back(Array2i(10000,1000));
+ sizes.push_back(Array2i(10000,4000));
+
+ using namespace std;
+
+ for(int k=0; k<sizes.size(); ++k)
+ {
+ cout << sizes[k](0) << "x" << sizes[k](1) << "...\n";
+ bench<float,Dynamic>(k,sizes[k](0),sizes[k](1));
+ }
+
+ cout.width(32);
+ cout << "solver/size";
+ cout << " ";
+ for(int k=0; k<sizes.size(); ++k)
+ {
+ std::stringstream ss;
+ ss << sizes[k](0) << "x" << sizes[k](1);
+ cout.width(10); cout << ss.str(); cout << " ";
+ }
+ cout << endl;
+
+
+ for(int i=0; i<labels.size(); ++i)
+ {
+ cout.width(32); cout << labels[i]; cout << " ";
+ ArrayXf r = (results[labels[i]]*100000.f).floor()/100.f;
+ for(int k=0; k<sizes.size(); ++k)
+ {
+ cout.width(10);
+ if(r(k)>=1e6) cout << "-";
+ else cout << r(k);
+ cout << " ";
+ }
+ cout << endl;
+ }
+
+ // HTML output
+ cout << "<table class=\"manual\">" << endl;
+ cout << "<tr><th>solver/size</th>" << endl;
+ for(int k=0; k<sizes.size(); ++k)
+ cout << " <th>" << sizes[k](0) << "x" << sizes[k](1) << "</th>";
+ cout << "</tr>" << endl;
+ for(int i=0; i<labels.size(); ++i)
+ {
+ cout << "<tr";
+ if(i%2==1) cout << " class=\"alt\"";
+ cout << "><td>" << labels[i] << "</td>";
+ ArrayXf r = (results[labels[i]]*100000.f).floor()/100.f;
+ for(int k=0; k<sizes.size(); ++k)
+ {
+ if(r(k)>=1e6) cout << "<td>-</td>";
+ else
+ {
+ cout << "<td>" << r(k);
+ if(i>0)
+ cout << " (x" << numext::round(10.f*results[labels[i]](k)/results["LLT"](k))/10.f << ")";
+ if(i<4 && sizes[k](0)!=sizes[k](1))
+ cout << " <sup><a href=\"#note_ls\">*</a></sup>";
+ cout << "</td>";
+ }
+ }
+ cout << "</tr>" << endl;
+ }
+ cout << "</table>" << endl;
+
+// cout << "LLT (ms) " << (results["LLT"]*1000.).format(fmt) << "\n";
+// cout << "LDLT (%) " << (results["LDLT"]/results["LLT"]).format(fmt) << "\n";
+// cout << "PartialPivLU (%) " << (results["PartialPivLU"]/results["LLT"]).format(fmt) << "\n";
+// cout << "FullPivLU (%) " << (results["FullPivLU"]/results["LLT"]).format(fmt) << "\n";
+// cout << "HouseholderQR (%) " << (results["HouseholderQR"]/results["LLT"]).format(fmt) << "\n";
+// cout << "ColPivHouseholderQR (%) " << (results["ColPivHouseholderQR"]/results["LLT"]).format(fmt) << "\n";
+// cout << "CompleteOrthogonalDecomposition (%) " << (results["CompleteOrthogonalDecomposition"]/results["LLT"]).format(fmt) << "\n";
+// cout << "FullPivHouseholderQR (%) " << (results["FullPivHouseholderQR"]/results["LLT"]).format(fmt) << "\n";
+// cout << "JacobiSVD (%) " << (results["JacobiSVD"]/results["LLT"]).format(fmt) << "\n";
+// cout << "BDCSVD (%) " << (results["BDCSVD"]/results["LLT"]).format(fmt) << "\n";
+}
diff --git a/bench/eig33.cpp b/bench/eig33.cpp
index 1608b99..47947a9 100644
--- a/bench/eig33.cpp
+++ b/bench/eig33.cpp
@@ -50,7 +50,7 @@
{
typedef typename Matrix::Scalar Scalar;
const Scalar s_inv3 = 1.0/3.0;
- const Scalar s_sqrt3 = internal::sqrt(Scalar(3.0));
+ const Scalar s_sqrt3 = std::sqrt(Scalar(3.0));
// The characteristic equation is x^3 - c2*x^2 + c1*x - c0 = 0. The
// eigenvalues are the roots to this equation, all guaranteed to be
@@ -73,23 +73,13 @@
q = Scalar(0);
// Compute the eigenvalues by solving for the roots of the polynomial.
- Scalar rho = internal::sqrt(-a_over_3);
- Scalar theta = std::atan2(internal::sqrt(-q),half_b)*s_inv3;
- Scalar cos_theta = internal::cos(theta);
- Scalar sin_theta = internal::sin(theta);
- roots(0) = c2_over_3 + Scalar(2)*rho*cos_theta;
- roots(1) = c2_over_3 - rho*(cos_theta + s_sqrt3*sin_theta);
- roots(2) = c2_over_3 - rho*(cos_theta - s_sqrt3*sin_theta);
-
- // Sort in increasing order.
- if (roots(0) >= roots(1))
- std::swap(roots(0),roots(1));
- if (roots(1) >= roots(2))
- {
- std::swap(roots(1),roots(2));
- if (roots(0) >= roots(1))
- std::swap(roots(0),roots(1));
- }
+ Scalar rho = std::sqrt(-a_over_3);
+ Scalar theta = std::atan2(std::sqrt(-q),half_b)*s_inv3;
+ Scalar cos_theta = std::cos(theta);
+ Scalar sin_theta = std::sin(theta);
+ roots(2) = c2_over_3 + Scalar(2)*rho*cos_theta;
+ roots(0) = c2_over_3 - rho*(cos_theta + s_sqrt3*sin_theta);
+ roots(1) = c2_over_3 - rho*(cos_theta - s_sqrt3*sin_theta);
}
template<typename Matrix, typename Vector>
@@ -99,9 +89,12 @@
// Scale the matrix so its entries are in [-1,1]. The scaling is applied
// only when at least one matrix entry has magnitude larger than 1.
- Scalar scale = mat.cwiseAbs()/*.template triangularView<Lower>()*/.maxCoeff();
+ Scalar shift = mat.trace()/3;
+ Matrix scaledMat = mat;
+ scaledMat.diagonal().array() -= shift;
+ Scalar scale = scaledMat.cwiseAbs()/*.template triangularView<Lower>()*/.maxCoeff();
scale = std::max(scale,Scalar(1));
- Matrix scaledMat = mat / scale;
+ scaledMat/=scale;
// Compute the eigenvalues
// scaledMat.setZero();
@@ -166,6 +159,7 @@
// Rescale back to the original size.
evals *= scale;
+ evals.array()+=shift;
}
int main()
@@ -173,24 +167,29 @@
BenchTimer t;
int tries = 10;
int rep = 400000;
- typedef Matrix3f Mat;
- typedef Vector3f Vec;
+ typedef Matrix3d Mat;
+ typedef Vector3d Vec;
Mat A = Mat::Random(3,3);
A = A.adjoint() * A;
+// Mat Q = A.householderQr().householderQ();
+// A = Q * Vec(2.2424567,2.2424566,7.454353).asDiagonal() * Q.transpose();
SelfAdjointEigenSolver<Mat> eig(A);
BENCH(t, tries, rep, eig.compute(A));
- std::cout << "Eigen: " << t.best() << "s\n";
+ std::cout << "Eigen iterative: " << t.best() << "s\n";
+
+ BENCH(t, tries, rep, eig.computeDirect(A));
+ std::cout << "Eigen direct : " << t.best() << "s\n";
Mat evecs;
Vec evals;
BENCH(t, tries, rep, eigen33(A,evecs,evals));
std::cout << "Direct: " << t.best() << "s\n\n";
- std::cerr << "Eigenvalue/eigenvector diffs:\n";
- std::cerr << (evals - eig.eigenvalues()).transpose() << "\n";
- for(int k=0;k<3;++k)
- if(evecs.col(k).dot(eig.eigenvectors().col(k))<0)
- evecs.col(k) = -evecs.col(k);
- std::cerr << evecs - eig.eigenvectors() << "\n\n";
+// std::cerr << "Eigenvalue/eigenvector diffs:\n";
+// std::cerr << (evals - eig.eigenvalues()).transpose() << "\n";
+// for(int k=0;k<3;++k)
+// if(evecs.col(k).dot(eig.eigenvectors().col(k))<0)
+// evecs.col(k) = -evecs.col(k);
+// std::cerr << evecs - eig.eigenvectors() << "\n\n";
}
diff --git a/bench/perf_monitoring/gemm/changesets.txt b/bench/perf_monitoring/gemm/changesets.txt
new file mode 100644
index 0000000..af8eb9b
--- /dev/null
+++ b/bench/perf_monitoring/gemm/changesets.txt
@@ -0,0 +1,61 @@
+#3.0.1
+#3.1.1
+#3.2.0
+3.2.4
+#5745:37f59e65eb6c
+5891:d8652709345d # introduce AVX
+#5893:24b4dc92c6d3 # merge
+5895:997c2ef9fc8b # introduce FMA
+#5904:e1eafd14eaa1 # complex and AVX
+5908:f8ee3c721251 # improve packing with ptranspose
+#5921:ca808bb456b0 # merge
+#5927:8b1001f9e3ac
+5937:5a4ca1ad8c53 # New gebp kernel handling up to 3 packets x 4 register-level blocks
+#5949:f3488f4e45b2 # merge
+#5969:e09031dccfd9 # Disable 3pX4 kernel on Altivec
+#5992:4a429f5e0483 # merge
+before-evaluators
+#6334:f6a45e5b8b7c # Implement evaluator for sparse outer products
+#6639:c9121c60b5c7
+#6655:06f163b5221f # Properly detect FMA support on ARM
+#6677:700e023044e7 # FMA has been wrongly disabled
+#6681:11d31dafb0e3
+#6699:5e6e8e10aad1 # merge default to tensors
+#6726:ff2d2388e7b9 # merge default to tensors
+#6742:0cbd6195e829 # merge default to tensors
+#6747:853d2bafeb8f # Generalized the gebp apis
+6765:71584fd55762 # Made the blocking computation aware of the l3 cache; Also optimized the blocking parameters to take into account the number of threads used for a computation
+#6781:9cc5a931b2c6 # generalized gemv
+#6792:f6e1daab600a # ensured that contractions that can be reduced to a matrix vector product
+#6844:039efd86b75c # merge tensor
+6845:7333ed40c6ef # change prefetching in gebp
+#6856:b5be5e10eb7f # merge index conversion
+#6893:c3a64aba7c70 # clean blocking size computation
+#6898:6fb31ebe6492 # rotating kernel for ARM
+6899:877facace746 # rotating kernel for ARM only
+#6904:c250623ae9fa # result_of
+6921:915f1b1fc158 # fix prefetching change for ARM
+6923:9ff25f6dacc6 # prefetching
+6933:52572e60b5d3 # blocking size strategy
+6937:c8c042f286b2 # avoid redundant pack_rhs
+6981:7e5d6f78da59 # dynamic loop swapping
+6984:45f26866c091 # rm dynamic loop swapping, adjust lhs's micro panel height to fully exploit L1 cache
+6986:a675d05b6f8f # blocking heuristic: block on the rhs in L1 if the lhs fit in L1.
+7013:f875e75f07e5 # organize a little our default cache sizes, and use a saner default L1 outside of x86 (10% faster on Nexus 5)
+7015:8aad8f35c955 # Refactor computeProductBlockingSizes to make room for the possibility of using lookup tables
+7016:a58d253e8c91 # Polish lookup tables generation
+7018:9b27294a8186 # actual_panel_rows computation should always be resilient to parameters not consistent with the known L1 cache size, see comment
+7019:c758b1e2c073 # Provide a empirical lookup table for blocking sizes measured on a Nexus 5. Only for float, only for Android on ARM 32bit for now.
+7085:627e039fba68 # Bug 986: add support for coefficient-based product with 0 depth.
+7098:b6f1db9cf9ec # Bug 992: don't select a 3p GEMM path with non-vectorizable scalar types, this hits unsupported paths in symm/triangular products code
+7591:09a8e2186610 # 3.3-alpha1
+7650:b0f3c8f43025 # help clang inlining
+#8744:74b789ada92a # Improved the matrix multiplication blocking in the case where mr is not a power of 2 (e.g on Haswell CPUs)
+8789:efcb912e4356 # Made the index type a template parameter to evaluateProductBlockingSizes. Use numext::mini and numext::maxi instead of std::min/std::max to compute blocking sizes
+8972:81d53c711775 # Don't optimize the processing of the last rows of a matrix matrix product in cases that violate the assumptions made by the optimized code path
+8985:d935df21a082 # Remove the rotating kernel.
+8988:6c2dc56e73b3 # Bug 256: enable vectorization with unaligned loads/stores.
+9148:b8b8c421e36c # Relax mixing-type constraints for binary coefficient-wise operators
+9174:d228bc282ac9 # merge
+9212:c90098affa7b # Fix performance regression introduced in changeset 8aad8f35c955
+9213:9f1c14e4694b # Fix performance regression in dgemm introduced by changeset 81d53c711775
diff --git a/bench/perf_monitoring/gemm/gemm.cpp b/bench/perf_monitoring/gemm/gemm.cpp
new file mode 100644
index 0000000..614bd47
--- /dev/null
+++ b/bench/perf_monitoring/gemm/gemm.cpp
@@ -0,0 +1,67 @@
+#include <iostream>
+#include <fstream>
+#include <vector>
+#include <Eigen/Core>
+#include "../../BenchTimer.h"
+using namespace Eigen;
+
+#ifndef SCALAR
+#error SCALAR must be defined
+#endif
+
+typedef SCALAR Scalar;
+
+typedef Matrix<Scalar,Dynamic,Dynamic> Mat;
+
+EIGEN_DONT_INLINE
+void gemm(const Mat &A, const Mat &B, Mat &C)
+{
+ C.noalias() += A * B;
+}
+
+EIGEN_DONT_INLINE
+double bench(long m, long n, long k)
+{
+ Mat A(m,k);
+ Mat B(k,n);
+ Mat C(m,n);
+ A.setRandom();
+ B.setRandom();
+ C.setZero();
+
+ BenchTimer t;
+
+ double up = 1e8*4/sizeof(Scalar);
+ double tm0 = 4, tm1 = 10;
+ if(NumTraits<Scalar>::IsComplex)
+ {
+ up /= 4;
+ tm0 = 2;
+ tm1 = 4;
+ }
+
+ double flops = 2. * m * n * k;
+ long rep = std::max(1., std::min(100., up/flops) );
+ long tries = std::max(tm0, std::min(tm1, up/flops) );
+
+ BENCH(t, tries, rep, gemm(A,B,C));
+
+ return 1e-9 * rep * flops / t.best();
+}
+
+int main(int argc, char **argv)
+{
+ std::vector<double> results;
+
+ std::ifstream settings("gemm_settings.txt");
+ long m, n, k;
+ while(settings >> m >> n >> k)
+ {
+ //std::cerr << " Testing " << m << " " << n << " " << k << std::endl;
+ results.push_back( bench(m, n, k) );
+ }
+
+ std::cout << RowVectorXd::Map(results.data(), results.size());
+
+ return 0;
+}
diff --git a/bench/perf_monitoring/gemm/gemm_settings.txt b/bench/perf_monitoring/gemm/gemm_settings.txt
new file mode 100644
index 0000000..5c43e1c
--- /dev/null
+++ b/bench/perf_monitoring/gemm/gemm_settings.txt
@@ -0,0 +1,15 @@
+8 8 8
+9 9 9
+24 24 24
+239 239 239
+240 240 240
+2400 24 24
+24 2400 24
+24 24 2400
+24 2400 2400
+2400 24 2400
+2400 2400 24
+2400 2400 64
+4800 23 160
+23 4800 160
+2400 2400 2400
diff --git a/bench/perf_monitoring/gemm/lazy_gemm.cpp b/bench/perf_monitoring/gemm/lazy_gemm.cpp
new file mode 100644
index 0000000..6dc3701
--- /dev/null
+++ b/bench/perf_monitoring/gemm/lazy_gemm.cpp
@@ -0,0 +1,98 @@
+#include <iostream>
+#include <fstream>
+#include <vector>
+#include <Eigen/Core>
+#include "../../BenchTimer.h"
+using namespace Eigen;
+
+#ifndef SCALAR
+#error SCALAR must be defined
+#endif
+
+typedef SCALAR Scalar;
+
+template<typename MatA, typename MatB, typename MatC>
+EIGEN_DONT_INLINE
+void lazy_gemm(const MatA &A, const MatB &B, MatC &C)
+{
+// escape((void*)A.data());
+// escape((void*)B.data());
+ C.noalias() += A.lazyProduct(B);
+// escape((void*)C.data());
+}
+
+template<int m, int n, int k, int TA>
+EIGEN_DONT_INLINE
+double bench()
+{
+ typedef Matrix<Scalar,m,k,TA> MatA;
+ typedef Matrix<Scalar,k,n> MatB;
+ typedef Matrix<Scalar,m,n> MatC;
+
+ MatA A(m,k);
+ MatB B(k,n);
+ MatC C(m,n);
+ A.setRandom();
+ B.setRandom();
+ C.setZero();
+
+ BenchTimer t;
+
+ double up = 1e7*4/sizeof(Scalar);
+ double tm0 = 10, tm1 = 20;
+
+ double flops = 2. * m * n * k;
+ long rep = std::max(10., std::min(10000., up/flops) );
+ long tries = std::max(tm0, std::min(tm1, up/flops) );
+
+ BENCH(t, tries, rep, lazy_gemm(A,B,C));
+
+ return 1e-9 * rep * flops / t.best();
+}
+
+template<int m, int n, int k>
+double bench_t(int t)
+{
+ if(t)
+ return bench<m,n,k,RowMajor>();
+ else
+ return bench<m,n,k,0>();
+}
+
+EIGEN_DONT_INLINE
+double bench_mnk(int m, int n, int k, int t)
+{
+ int id = m*10000 + n*100 + k;
+ switch(id) {
+ case 10101 : return bench_t< 1, 1, 1>(t); break;
+ case 20202 : return bench_t< 2, 2, 2>(t); break;
+ case 30303 : return bench_t< 3, 3, 3>(t); break;
+ case 40404 : return bench_t< 4, 4, 4>(t); break;
+ case 50505 : return bench_t< 5, 5, 5>(t); break;
+ case 60606 : return bench_t< 6, 6, 6>(t); break;
+ case 70707 : return bench_t< 7, 7, 7>(t); break;
+ case 80808 : return bench_t< 8, 8, 8>(t); break;
+ case 90909 : return bench_t< 9, 9, 9>(t); break;
+ case 101010 : return bench_t<10,10,10>(t); break;
+ case 111111 : return bench_t<11,11,11>(t); break;
+ case 121212 : return bench_t<12,12,12>(t); break;
+ }
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ std::vector<double> results;
+
+ std::ifstream settings("lazy_gemm_settings.txt");
+ long m, n, k, t;
+ while(settings >> m >> n >> k >> t)
+ {
+ //std::cerr << " Testing " << m << " " << n << " " << k << std::endl;
+ results.push_back( bench_mnk(m, n, k, t) );
+ }
+
+ std::cout << RowVectorXd::Map(results.data(), results.size());
+
+ return 0;
+}
diff --git a/bench/perf_monitoring/gemm/lazy_gemm_settings.txt b/bench/perf_monitoring/gemm/lazy_gemm_settings.txt
new file mode 100644
index 0000000..407d5d4
--- /dev/null
+++ b/bench/perf_monitoring/gemm/lazy_gemm_settings.txt
@@ -0,0 +1,15 @@
+1 1 1 0
+2 2 2 0
+3 3 3 0
+4 4 4 0
+4 4 4 1
+5 5 5 0
+6 6 6 0
+7 7 7 0
+7 7 7 1
+8 8 8 0
+9 9 9 0
+10 10 10 0
+11 11 11 0
+12 12 12 0
+12 12 12 1
diff --git a/bench/perf_monitoring/gemm/make_plot.sh b/bench/perf_monitoring/gemm/make_plot.sh
new file mode 100755
index 0000000..cd3214a
--- /dev/null
+++ b/bench/perf_monitoring/gemm/make_plot.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+
+# base name of the bench
+# it reads $1.out
+# and generates $1.pdf
+WHAT=$1
+bench=$2
+
+header="rev "
+while read line
+do
+ if [ ! -z '$line' ]; then
+ header="$header \"$line\""
+ fi
+done < $bench"_settings.txt"
+
+echo $header > $WHAT.out.header
+cat $WHAT.out >> $WHAT.out.header
+
+
+echo "set title '$WHAT'" > $WHAT.gnuplot
+echo "set key autotitle columnhead outside " >> $WHAT.gnuplot
+echo "set xtics rotate 1" >> $WHAT.gnuplot
+
+echo "set term pdf color rounded enhanced fontscale 0.35 size 7in,5in" >> $WHAT.gnuplot
+echo set output "'"$WHAT.pdf"'" >> $WHAT.gnuplot
+
+col=`cat $bench"_settings.txt" | wc -l`
+echo "plot for [col=2:$col+1] '$WHAT.out.header' using 0:col:xticlabels(1) with lines" >> $WHAT.gnuplot
+echo " " >> $WHAT.gnuplot
+
+gnuplot -persist < $WHAT.gnuplot
+
+# generate a png file
+# convert -background white -density 120 -rotate 90 -resize 800 +dither -colors 256 -quality 0 $WHAT.ps -background white -flatten .$WHAT.png
+
+# clean
+rm $WHAT.out.header $WHAT.gnuplot
\ No newline at end of file
diff --git a/bench/perf_monitoring/gemm/run.sh b/bench/perf_monitoring/gemm/run.sh
new file mode 100755
index 0000000..9d6ee40
--- /dev/null
+++ b/bench/perf_monitoring/gemm/run.sh
@@ -0,0 +1,156 @@
+#!/bin/bash
+
+# ./run.sh gemm
+# ./run.sh lazy_gemm
+
+# Examples of environment variables to be set:
+# PREFIX="haswell-fma-"
+# CXX_FLAGS="-mfma"
+
+# Options:
+# -up : enforce the recomputation of existing data, and keep best results as a merging strategy
+# -s : recompute selected changesets only and keep bests
+
+bench=$1
+
+if echo "$*" | grep '\-up' > /dev/null; then
+ update=true
+else
+ update=false
+fi
+
+if echo "$*" | grep '\-s' > /dev/null; then
+ selected=true
+else
+ selected=false
+fi
+
+global_args="$*"
+
+if [ $selected == true ]; then
+ echo "Recompute selected changesets only and keep bests"
+elif [ $update == true ]; then
+ echo "(Re-)Compute all changesets and keep bests"
+else
+ echo "Skip previously computed changesets"
+fi
+
+
+
+if [ ! -d "eigen_src" ]; then
+ hg clone https://bitbucket.org/eigen/eigen eigen_src
+else
+ cd eigen_src
+ hg pull -u
+ cd ..
+fi
+
+if [ ! -z '$CXX' ]; then
+ CXX=g++
+fi
+
+function make_backup
+{
+ if [ -f "$1.out" ]; then
+ mv "$1.out" "$1.backup"
+ fi
+}
+
+function merge
+{
+ count1=`echo $1 | wc -w`
+ count2=`echo $2 | wc -w`
+
+ if [ $count1 == $count2 ]; then
+ a=( $1 ); b=( $2 )
+ res=""
+ for (( i=0 ; i<$count1 ; i++ )); do
+ ai=${a[$i]}; bi=${b[$i]}
+ tmp=`echo "if ($ai > $bi) $ai else $bi " | bc -l`
+ res="$res $tmp"
+ done
+ echo $res
+
+ else
+ echo $1
+ fi
+}
+
+function test_current
+{
+ rev=$1
+ scalar=$2
+ name=$3
+
+ prev=""
+ if [ -e "$name.backup" ]; then
+ prev=`grep $rev "$name.backup" | cut -c 14-`
+ fi
+ res=$prev
+ count_rev=`echo $prev | wc -w`
+ count_ref=`cat $bench"_settings.txt" | wc -l`
+ if echo "$global_args" | grep "$rev" > /dev/null; then
+ rev_found=true
+ else
+ rev_found=false
+ fi
+# echo $update et $selected et $rev_found because $rev et "$global_args"
+# echo $count_rev et $count_ref
+ if [ $update == true ] || [ $count_rev != $count_ref ] || ([ $selected == true ] && [ $rev_found == true ]); then
+ if $CXX -O2 -DNDEBUG -march=native $CXX_FLAGS -I eigen_src $bench.cpp -DSCALAR=$scalar -o $name; then
+ curr=`./$name`
+ if [ $count_rev == $count_ref ]; then
+ echo "merge previous $prev"
+ echo "with new $curr"
+ else
+ echo "got $curr"
+ fi
+ res=`merge "$curr" "$prev"`
+# echo $res
+ echo "$rev $res" >> $name.out
+ else
+ echo "Compilation failed, skip rev $rev"
+ fi
+ else
+ echo "Skip existing results for $rev / $name"
+ echo "$rev $res" >> $name.out
+ fi
+}
+
+make_backup $PREFIX"s"$bench
+make_backup $PREFIX"d"$bench
+make_backup $PREFIX"c"$bench
+
+cut -f1 -d"#" < changesets.txt | grep -E '[[:alnum:]]' | while read rev
+do
+ if [ ! -z '$rev' ]; then
+ echo "Testing rev $rev"
+ cd eigen_src
+ hg up -C $rev > /dev/null
+ actual_rev=`hg identify | cut -f1 -d' '`
+ cd ..
+
+ test_current $actual_rev float $PREFIX"s"$bench
+ test_current $actual_rev double $PREFIX"d"$bench
+ test_current $actual_rev "std::complex<double>" $PREFIX"c"$bench
+ fi
+
+done
+
+echo "Float:"
+cat $PREFIX"s""$bench.out"
+echo " "
+
+echo "Double:"
+cat $PREFIX"d""$bench.out"
+echo ""
+
+echo "Complex:"
+cat $PREFIX"c""$bench.out"
+echo ""
+
+./make_plot.sh $PREFIX"s"$bench $bench
+./make_plot.sh $PREFIX"d"$bench $bench
+./make_plot.sh $PREFIX"c"$bench $bench
+
+
diff --git a/bench/spbench/CMakeLists.txt b/bench/spbench/CMakeLists.txt
index 6e0e1b1..029ba6d 100644
--- a/bench/spbench/CMakeLists.txt
+++ b/bench/spbench/CMakeLists.txt
@@ -29,7 +29,7 @@
set(UMFPACK_ALL_LIBS ${UMFPACK_LIBRARIES} ${BLAS_LIBRARIES})
endif()
-find_package(SuperLU)
+find_package(SuperLU 4.0)
if(SUPERLU_FOUND AND BLAS_FOUND)
add_definitions("-DEIGEN_SUPERLU_SUPPORT")
include_directories(${SUPERLU_INCLUDES})
@@ -38,25 +38,32 @@
endif()
-find_package(Pastix)
-find_package(Scotch)
-find_package(Metis)
-if(PASTIX_FOUND AND BLAS_FOUND)
+find_package(PASTIX QUIET COMPONENTS METIS SCOTCH)
+# check that the PASTIX found is a version without MPI
+find_path(PASTIX_pastix_nompi.h_INCLUDE_DIRS
+ NAMES pastix_nompi.h
+ HINTS ${PASTIX_INCLUDE_DIRS}
+)
+if (NOT PASTIX_pastix_nompi.h_INCLUDE_DIRS)
+ message(STATUS "A version of Pastix has been found but pastix_nompi.h does not exist in the include directory."
+ " Because Eigen tests require a version without MPI, we disable the Pastix backend.")
+endif()
+if(PASTIX_FOUND AND PASTIX_pastix_nompi.h_INCLUDE_DIRS AND BLAS_FOUND)
add_definitions("-DEIGEN_PASTIX_SUPPORT")
- include_directories(${PASTIX_INCLUDES})
+ include_directories(${PASTIX_INCLUDE_DIRS_DEP})
if(SCOTCH_FOUND)
- include_directories(${SCOTCH_INCLUDES})
+ include_directories(${SCOTCH_INCLUDE_DIRS})
set(PASTIX_LIBRARIES ${PASTIX_LIBRARIES} ${SCOTCH_LIBRARIES})
elseif(METIS_FOUND)
- include_directories(${METIS_INCLUDES})
+ include_directories(${METIS_INCLUDE_DIRS})
set(PASTIX_LIBRARIES ${PASTIX_LIBRARIES} ${METIS_LIBRARIES})
endif(SCOTCH_FOUND)
- set(SPARSE_LIBS ${SPARSE_LIBS} ${PASTIX_LIBRARIES} ${ORDERING_LIBRARIES} ${BLAS_LIBRARIES})
- set(PASTIX_ALL_LIBS ${PASTIX_LIBRARIES} ${BLAS_LIBRARIES})
-endif(PASTIX_FOUND AND BLAS_FOUND)
+ set(SPARSE_LIBS ${SPARSE_LIBS} ${PASTIX_LIBRARIES_DEP} ${ORDERING_LIBRARIES})
+ set(PASTIX_ALL_LIBS ${PASTIX_LIBRARIES_DEP})
+endif()
if(METIS_FOUND)
- include_directories(${METIS_INCLUDES})
+ include_directories(${METIS_INCLUDE_DIRS})
set (SPARSE_LIBS ${SPARSE_LIBS} ${METIS_LIBRARIES})
add_definitions("-DEIGEN_METIS_SUPPORT")
endif(METIS_FOUND)
diff --git a/bench/spbench/spbenchstyle.h b/bench/spbench/spbenchstyle.h
index 17a05ce..f6a9817 100644
--- a/bench/spbench/spbenchstyle.h
+++ b/bench/spbench/spbenchstyle.h
@@ -91,4 +91,5 @@
</xsl:stylesheet>\n\n";
}
-#endif
\ No newline at end of file
+
+#endif
diff --git a/bench/tensors/README b/bench/tensors/README
new file mode 100644
index 0000000..3a5fdbe
--- /dev/null
+++ b/bench/tensors/README
@@ -0,0 +1,21 @@
+The tensor benchmark suite is made of several parts.
+
+The first part is a generic suite, in which each benchmark comes in 2 flavors: one that runs on CPU, and one that runs on GPU.
+
+To compile the floating point CPU benchmarks, simply call:
+g++ tensor_benchmarks_cpu.cc benchmark_main.cc -I ../../ -std=c++11 -O3 -DNDEBUG -pthread -mavx -o benchmarks_cpu
+
+To compile the floating point GPU benchmarks, simply call:
+nvcc tensor_benchmarks_gpu.cu benchmark_main.cc -I ../../ -std=c++11 -O2 -DNDEBUG -use_fast_math -ftz=true -arch compute_35 -o benchmarks_gpu
+
+We also provide a version of the generic GPU tensor benchmarks that uses half floats (aka fp16) instead of regular floats. To compile these benchmarks, simply call the command line below. You'll need a recent GPU that supports compute capability 5.3 or higher to run them and nvcc 7.5 or higher to compile the code.
+nvcc tensor_benchmarks_fp16_gpu.cu benchmark_main.cc -I ../../ -std=c++11 -O2 -DNDEBUG -use_fast_math -ftz=true -arch compute_53 -o benchmarks_fp16_gpu
+
+last but not least, we also provide a suite of benchmarks to measure the scalability of the contraction code on CPU. To compile these benchmarks, call
+g++ contraction_benchmarks_cpu.cc benchmark_main.cc -I ../../ -std=c++11 -O3 -DNDEBUG -pthread -mavx -o benchmarks_cpu
+
+To compile the benchmark for SYCL, using ComputeCpp you currently need 2 passes (only for translation units containing device code):
+1. The device compilation pass that generates the device code (SYCL kernels and referenced device functions) and glue code needed by the host compiler to reference the device code from host code.
+{ComputeCpp_ROOT}/bin/compute++ -I ../../ -I {ComputeCpp_ROOT}/include/ -std=c++11 -mllvm -inline-threshold=1000 -Wno-ignored-attributes -sycl -intelspirmetadata -emit-llvm -no-serial-memop -sycl-compress-name -DBUILD_PLATFORM_SPIR -DNDBUG -O3 -c tensor_benchmarks_sycl.cc
+2. The host compilation pass that generates the final host binary.
+clang++-3.7 -include tensor_benchmarks_sycl.sycl benchmark_main.cc tensor_benchmarks_sycl.cc -pthread -I ../../ -I {ComputeCpp_ROOT}/include/ -L {ComputeCpp_ROOT}/lib/ -lComputeCpp -lOpenCL -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++11 -o tensor_benchmark_sycl
diff --git a/bench/tensors/benchmark.h b/bench/tensors/benchmark.h
new file mode 100644
index 0000000..f115b54
--- /dev/null
+++ b/bench/tensors/benchmark.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <stddef.h>
+#include <stdint.h>
+#include <vector>
+
+namespace testing {
+class Benchmark {
+ public:
+ Benchmark(const char* name, void (*fn)(int)) {
+ Register(name, fn, NULL);
+ }
+ Benchmark(const char* name, void (*fn_range)(int, int)) {
+ Register(name, NULL, fn_range);
+ }
+ Benchmark* Arg(int x);
+ Benchmark* Range(int lo, int hi);
+ const char* Name();
+ bool ShouldRun(int argc, char* argv[]);
+ void Run();
+ private:
+ const char* name_;
+ void (*fn_)(int);
+ void (*fn_range_)(int, int);
+ std::vector<int> args_;
+ void Register(const char* name, void (*fn)(int), void (*fn_range)(int, int));
+ void RunRepeatedlyWithArg(int iterations, int arg);
+ void RunWithArg(int arg);
+};
+} // namespace testing
+void SetBenchmarkFlopsProcessed(int64_t);
+void StopBenchmarkTiming();
+void StartBenchmarkTiming();
+#define BENCHMARK(f) \
+ static ::testing::Benchmark* _benchmark_##f __attribute__((unused)) = \
+ (new ::testing::Benchmark(#f, f))
diff --git a/bench/tensors/benchmark_main.cc b/bench/tensors/benchmark_main.cc
new file mode 100644
index 0000000..1efa0db
--- /dev/null
+++ b/bench/tensors/benchmark_main.cc
@@ -0,0 +1,237 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "benchmark.h"
+#include <regex.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <string>
+#include <inttypes.h>
+#include <time.h>
+#include <map>
+
+static int64_t g_flops_processed;
+static int64_t g_benchmark_total_time_ns;
+static int64_t g_benchmark_start_time_ns;
+typedef std::map<std::string, ::testing::Benchmark*> BenchmarkMap;
+typedef BenchmarkMap::iterator BenchmarkMapIt;
+
+BenchmarkMap& gBenchmarks() {
+ static BenchmarkMap g_benchmarks;
+ return g_benchmarks;
+}
+
+static int g_name_column_width = 20;
+
+static int Round(int n) {
+ int base = 1;
+ while (base*10 < n) {
+ base *= 10;
+ }
+ if (n < 2*base) {
+ return 2*base;
+ }
+ if (n < 5*base) {
+ return 5*base;
+ }
+ return 10*base;
+}
+
+#ifdef __APPLE__
+ #include <mach/mach_time.h>
+ static mach_timebase_info_data_t g_time_info;
+ static void __attribute__((constructor)) init_info() {
+ mach_timebase_info(&g_time_info);
+ }
+#endif
+
+static int64_t NanoTime() {
+#if defined(__APPLE__)
+ uint64_t t = mach_absolute_time();
+ return t * g_time_info.numer / g_time_info.denom;
+#else
+ struct timespec t;
+ t.tv_sec = t.tv_nsec = 0;
+ clock_gettime(CLOCK_MONOTONIC, &t);
+ return static_cast<int64_t>(t.tv_sec) * 1000000000LL + t.tv_nsec;
+#endif
+}
+
+namespace testing {
+Benchmark* Benchmark::Arg(int arg) {
+ args_.push_back(arg);
+ return this;
+}
+
+Benchmark* Benchmark::Range(int lo, int hi) {
+ const int kRangeMultiplier = 8;
+ if (hi < lo) {
+ int temp = hi;
+ hi = lo;
+ lo = temp;
+ }
+ while (lo < hi) {
+ args_.push_back(lo);
+ lo *= kRangeMultiplier;
+ }
+ // We always run the hi number.
+ args_.push_back(hi);
+ return this;
+}
+
+const char* Benchmark::Name() {
+ return name_;
+}
+bool Benchmark::ShouldRun(int argc, char* argv[]) {
+ if (argc == 1) {
+ return true; // With no arguments, we run all benchmarks.
+ }
+ // Otherwise, we interpret each argument as a regular expression and
+ // see if any of our benchmarks match.
+ for (int i = 1; i < argc; i++) {
+ regex_t re;
+ if (regcomp(&re, argv[i], 0) != 0) {
+ fprintf(stderr, "couldn't compile \"%s\" as a regular expression!\n", argv[i]);
+ exit(EXIT_FAILURE);
+ }
+ int match = regexec(&re, name_, 0, NULL, 0);
+ regfree(&re);
+ if (match != REG_NOMATCH) {
+ return true;
+ }
+ }
+ return false;
+}
+void Benchmark::Register(const char* name, void (*fn)(int), void (*fn_range)(int, int)) {
+ name_ = name;
+ fn_ = fn;
+ fn_range_ = fn_range;
+ if (fn_ == NULL && fn_range_ == NULL) {
+ fprintf(stderr, "%s: missing function\n", name_);
+ exit(EXIT_FAILURE);
+ }
+ gBenchmarks().insert(std::make_pair(name, this));
+}
+void Benchmark::Run() {
+ if (fn_ != NULL) {
+ RunWithArg(0);
+ } else {
+ if (args_.empty()) {
+ fprintf(stderr, "%s: no args!\n", name_);
+ exit(EXIT_FAILURE);
+ }
+ for (size_t i = 0; i < args_.size(); ++i) {
+ RunWithArg(args_[i]);
+ }
+ }
+}
+void Benchmark::RunRepeatedlyWithArg(int iterations, int arg) {
+ g_flops_processed = 0;
+ g_benchmark_total_time_ns = 0;
+ g_benchmark_start_time_ns = NanoTime();
+ if (fn_ != NULL) {
+ fn_(iterations);
+ } else {
+ fn_range_(iterations, arg);
+ }
+ if (g_benchmark_start_time_ns != 0) {
+ g_benchmark_total_time_ns += NanoTime() - g_benchmark_start_time_ns;
+ }
+}
+void Benchmark::RunWithArg(int arg) {
+ // run once in case it's expensive
+ int iterations = 1;
+ RunRepeatedlyWithArg(iterations, arg);
+ while (g_benchmark_total_time_ns < 1e9 && iterations < 1e9) {
+ int last = iterations;
+ if (g_benchmark_total_time_ns/iterations == 0) {
+ iterations = 1e9;
+ } else {
+ iterations = 1e9 / (g_benchmark_total_time_ns/iterations);
+ }
+ iterations = std::max(last + 1, std::min(iterations + iterations/2, 100*last));
+ iterations = Round(iterations);
+ RunRepeatedlyWithArg(iterations, arg);
+ }
+ char throughput[100];
+ throughput[0] = '\0';
+ if (g_benchmark_total_time_ns > 0 && g_flops_processed > 0) {
+ double mflops_processed = static_cast<double>(g_flops_processed)/1e6;
+ double seconds = static_cast<double>(g_benchmark_total_time_ns)/1e9;
+ snprintf(throughput, sizeof(throughput), " %8.2f MFlops/s", mflops_processed/seconds);
+ }
+ char full_name[100];
+ if (fn_range_ != NULL) {
+ if (arg >= (1<<20)) {
+ snprintf(full_name, sizeof(full_name), "%s/%dM", name_, arg/(1<<20));
+ } else if (arg >= (1<<10)) {
+ snprintf(full_name, sizeof(full_name), "%s/%dK", name_, arg/(1<<10));
+ } else {
+ snprintf(full_name, sizeof(full_name), "%s/%d", name_, arg);
+ }
+ } else {
+ snprintf(full_name, sizeof(full_name), "%s", name_);
+ }
+ printf("%-*s %10d %10" PRId64 "%s\n", g_name_column_width, full_name,
+ iterations, g_benchmark_total_time_ns/iterations, throughput);
+ fflush(stdout);
+}
+} // namespace testing
+void SetBenchmarkFlopsProcessed(int64_t x) {
+ g_flops_processed = x;
+}
+void StopBenchmarkTiming() {
+ if (g_benchmark_start_time_ns != 0) {
+ g_benchmark_total_time_ns += NanoTime() - g_benchmark_start_time_ns;
+ }
+ g_benchmark_start_time_ns = 0;
+}
+void StartBenchmarkTiming() {
+ if (g_benchmark_start_time_ns == 0) {
+ g_benchmark_start_time_ns = NanoTime();
+ }
+}
+int main(int argc, char* argv[]) {
+ if (gBenchmarks().empty()) {
+ fprintf(stderr, "No benchmarks registered!\n");
+ exit(EXIT_FAILURE);
+ }
+ for (BenchmarkMapIt it = gBenchmarks().begin(); it != gBenchmarks().end(); ++it) {
+ int name_width = static_cast<int>(strlen(it->second->Name()));
+ g_name_column_width = std::max(g_name_column_width, name_width);
+ }
+ bool need_header = true;
+ for (BenchmarkMapIt it = gBenchmarks().begin(); it != gBenchmarks().end(); ++it) {
+ ::testing::Benchmark* b = it->second;
+ if (b->ShouldRun(argc, argv)) {
+ if (need_header) {
+ printf("%-*s %10s %10s\n", g_name_column_width, "", "iterations", "ns/op");
+ fflush(stdout);
+ need_header = false;
+ }
+ b->Run();
+ }
+ }
+ if (need_header) {
+ fprintf(stderr, "No matching benchmarks!\n");
+ fprintf(stderr, "Available benchmarks:\n");
+ for (BenchmarkMapIt it = gBenchmarks().begin(); it != gBenchmarks().end(); ++it) {
+ fprintf(stderr, " %s\n", it->second->Name());
+ }
+ exit(EXIT_FAILURE);
+ }
+ return 0;
+}
diff --git a/bench/tensors/contraction_benchmarks_cpu.cc b/bench/tensors/contraction_benchmarks_cpu.cc
new file mode 100644
index 0000000..f9e57ad
--- /dev/null
+++ b/bench/tensors/contraction_benchmarks_cpu.cc
@@ -0,0 +1,39 @@
+#define EIGEN_USE_THREADS
+
+#include <string>
+
+#include "tensor_benchmarks.h"
+
+#define CREATE_THREAD_POOL(threads) \
+Eigen::ThreadPool pool(threads); \
+Eigen::ThreadPoolDevice device(&pool, threads);
+
+
+// Contractions for number of threads ranging from 1 to 32
+// Dimensions are Rows, Cols, Depth
+#define BM_ContractionCPU(D1, D2, D3) \
+ static void BM_##Contraction##_##D1##x##D2##x##D3(int iters, int Threads) { \
+ StopBenchmarkTiming(); \
+ CREATE_THREAD_POOL(Threads); \
+ BenchmarkSuite<Eigen::ThreadPoolDevice, float> suite(device, D1, D2, D3); \
+ suite.contraction(iters); \
+ } \
+ BENCHMARK_RANGE(BM_##Contraction##_##D1##x##D2##x##D3, 1, 32);
+
+
+// Vector Matrix and Matrix Vector products
+BM_ContractionCPU(1, 2000, 500);
+BM_ContractionCPU(2000, 1, 500);
+
+// Various skinny matrices
+BM_ContractionCPU(250, 3, 512);
+BM_ContractionCPU(1500, 3, 512);
+
+BM_ContractionCPU(512, 800, 4);
+BM_ContractionCPU(512, 80, 800);
+BM_ContractionCPU(512, 80, 13522);
+BM_ContractionCPU(1, 80, 13522);
+
+BM_ContractionCPU(3200, 512, 4);
+BM_ContractionCPU(3200, 512, 80);
+BM_ContractionCPU(3200, 80, 512);
diff --git a/bench/tensors/tensor_benchmarks.h b/bench/tensors/tensor_benchmarks.h
new file mode 100644
index 0000000..c2fb3de
--- /dev/null
+++ b/bench/tensors/tensor_benchmarks.h
@@ -0,0 +1,478 @@
+#ifndef THIRD_PARTY_EIGEN3_TENSOR_BENCHMARKS_H_
+#define THIRD_PARTY_EIGEN3_TENSOR_BENCHMARKS_H_
+
+typedef int TensorIndex;
+#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
+
+#include "unsupported/Eigen/CXX11/Tensor"
+#include "benchmark.h"
+
+#define BENCHMARK_RANGE(bench, lo, hi) \
+ BENCHMARK(bench)->Range(lo, hi)
+
+using Eigen::Tensor;
+using Eigen::TensorMap;
+
+// TODO(bsteiner): also templatize on the input type since we have users
+// for int8 as well as floats.
+template <typename Device, typename T> class BenchmarkSuite {
+ public:
+ BenchmarkSuite(const Device& device, size_t m, size_t k, size_t n)
+ : m_(m), k_(k), n_(n), device_(device) {
+ initialize();
+ }
+
+ BenchmarkSuite(const Device& device, size_t m)
+ : m_(m), k_(m), n_(m), device_(device) {
+ initialize();
+ }
+
+ ~BenchmarkSuite() {
+ device_.deallocate(a_);
+ device_.deallocate(b_);
+ device_.deallocate(c_);
+ }
+
+ void memcpy(int num_iters) {
+ eigen_assert(m_ == k_ && k_ == n_);
+ StartBenchmarkTiming();
+ for (int iter = 0; iter < num_iters; ++iter) {
+ device_.memcpy(c_, a_, m_ * m_ * sizeof(T));
+ }
+ // Record the number of values copied per second
+ finalizeBenchmark(static_cast<int64_t>(m_) * m_ * num_iters);
+ }
+
+ void typeCasting(int num_iters) {
+ eigen_assert(m_ == n_);
+ Eigen::array<TensorIndex, 2> sizes;
+ if (sizeof(T) >= sizeof(int)) {
+ sizes[0] = m_;
+ sizes[1] = k_;
+ } else {
+ sizes[0] = m_ * sizeof(T) / sizeof(int);
+ sizes[1] = k_ * sizeof(T) / sizeof(int);
+ }
+ const TensorMap<Tensor<int, 2, 0, TensorIndex>, Eigen::Aligned> A((int*)a_, sizes);
+ TensorMap<Tensor<T, 2, 0, TensorIndex>, Eigen::Aligned> B(b_, sizes);
+
+ StartBenchmarkTiming();
+ for (int iter = 0; iter < num_iters; ++iter) {
+ B.device(device_) = A.template cast<T>();
+ }
+ // Record the number of values copied per second
+ finalizeBenchmark(static_cast<int64_t>(m_) * k_ * num_iters);
+ }
+
+ void random(int num_iters) {
+ eigen_assert(m_ == k_ && k_ == n_);
+ Eigen::array<TensorIndex, 2> sizes;
+ sizes[0] = m_;
+ sizes[1] = m_;
+ TensorMap<Tensor<T, 2>, Eigen::Aligned> C(c_, sizes);
+
+ StartBenchmarkTiming();
+ for (int iter = 0; iter < num_iters; ++iter) {
+ C.device(device_) = C.random();
+ }
+ // Record the number of random numbers generated per second
+ finalizeBenchmark(static_cast<int64_t>(m_) * m_ * num_iters);
+ }
+
+ void slicing(int num_iters) {
+ eigen_assert(m_ == k_ && k_ == n_);
+ Eigen::array<TensorIndex, 2> sizes;
+ sizes[0] = m_;
+ sizes[1] = m_;
+ const TensorMap<Tensor<T, 2>, Eigen::Aligned> A(a_, sizes);
+ const TensorMap<Tensor<T, 2>, Eigen::Aligned> B(b_, sizes);
+ TensorMap<Tensor<T, 2>, Eigen::Aligned> C(c_, sizes);
+
+ const Eigen::DSizes<TensorIndex, 2> quarter_sizes(m_/2, m_/2);
+ const Eigen::DSizes<TensorIndex, 2> first_quadrant(0, 0);
+ const Eigen::DSizes<TensorIndex, 2> second_quadrant(0, m_/2);
+ const Eigen::DSizes<TensorIndex, 2> third_quadrant(m_/2, 0);
+ const Eigen::DSizes<TensorIndex, 2> fourth_quadrant(m_/2, m_/2);
+
+ StartBenchmarkTiming();
+ for (int iter = 0; iter < num_iters; ++iter) {
+ C.slice(first_quadrant, quarter_sizes).device(device_) =
+ A.slice(first_quadrant, quarter_sizes);
+ C.slice(second_quadrant, quarter_sizes).device(device_) =
+ B.slice(second_quadrant, quarter_sizes);
+ C.slice(third_quadrant, quarter_sizes).device(device_) =
+ A.slice(third_quadrant, quarter_sizes);
+ C.slice(fourth_quadrant, quarter_sizes).device(device_) =
+ B.slice(fourth_quadrant, quarter_sizes);
+ }
+ // Record the number of values copied from the rhs slice to the lhs slice
+ // each second
+ finalizeBenchmark(static_cast<int64_t>(m_) * m_ * num_iters);
+ }
+
+ void rowChip(int num_iters) {
+ Eigen::array<TensorIndex, 2> input_size;
+ input_size[0] = k_;
+ input_size[1] = n_;
+ const TensorMap<Tensor<T, 2, 0, TensorIndex>, Eigen::Aligned> B(b_, input_size);
+ Eigen::array<TensorIndex, 1> output_size;
+ output_size[0] = n_;
+ TensorMap<Tensor<T, 1, 0, TensorIndex>, Eigen::Aligned> C(c_, output_size);
+
+ StartBenchmarkTiming();
+ for (int iter = 0; iter < num_iters; ++iter) {
+ C.device(device_) = B.chip(iter % k_, 0);
+ }
+ // Record the number of values copied from the rhs chip to the lhs.
+ finalizeBenchmark(static_cast<int64_t>(n_) * num_iters);
+ }
+
+ void colChip(int num_iters) {
+ Eigen::array<TensorIndex, 2> input_size;
+ input_size[0] = k_;
+ input_size[1] = n_;
+ const TensorMap<Tensor<T, 2, 0, TensorIndex>, Eigen::Aligned> B(b_, input_size);
+ Eigen::array<TensorIndex, 1> output_size;
+ output_size[0] = n_;
+ TensorMap<Tensor<T, 1, 0, TensorIndex>, Eigen::Aligned> C(c_, output_size);
+
+ StartBenchmarkTiming();
+ for (int iter = 0; iter < num_iters; ++iter) {
+ C.device(device_) = B.chip(iter % n_, 1);
+ }
+ // Record the number of values copied from the rhs chip to the lhs.
+ finalizeBenchmark(static_cast<int64_t>(n_) * num_iters);
+ }
+
+ void shuffling(int num_iters) {
+ eigen_assert(m_ == n_);
+ Eigen::array<TensorIndex, 2> size_a;
+ size_a[0] = m_;
+ size_a[1] = k_;
+ const TensorMap<Tensor<T, 2>, Eigen::Aligned> A(a_, size_a);
+ Eigen::array<TensorIndex, 2> size_b;
+ size_b[0] = k_;
+ size_b[1] = m_;
+ TensorMap<Tensor<T, 2>, Eigen::Aligned> B(b_, size_b);
+
+ Eigen::array<int, 2> shuffle;
+ shuffle[0] = 1;
+ shuffle[1] = 0;
+
+ StartBenchmarkTiming();
+ for (int iter = 0; iter < num_iters; ++iter) {
+ B.device(device_) = A.shuffle(shuffle);
+ }
+ // Record the number of values shuffled from A and copied to B each second
+ finalizeBenchmark(static_cast<int64_t>(m_) * k_ * num_iters);
+ }
+
+ void padding(int num_iters) {
+ eigen_assert(m_ == k_);
+ Eigen::array<TensorIndex, 2> size_a;
+ size_a[0] = m_;
+ size_a[1] = k_-3;
+ const TensorMap<Tensor<T, 2>, Eigen::Aligned> A(a_, size_a);
+ Eigen::array<TensorIndex, 2> size_b;
+ size_b[0] = k_;
+ size_b[1] = m_;
+ TensorMap<Tensor<T, 2>, Eigen::Aligned> B(b_, size_b);
+
+#if defined(EIGEN_HAS_INDEX_LIST)
+ Eigen::IndexPairList<Eigen::type2indexpair<0, 0>,
+ Eigen::type2indexpair<2, 1> > paddings;
+#else
+ Eigen::array<Eigen::IndexPair<TensorIndex>, 2> paddings;
+ paddings[0] = Eigen::IndexPair<TensorIndex>(0, 0);
+ paddings[1] = Eigen::IndexPair<TensorIndex>(2, 1);
+#endif
+
+ StartBenchmarkTiming();
+ for (int iter = 0; iter < num_iters; ++iter) {
+ B.device(device_) = A.pad(paddings);
+ }
+ // Record the number of values copied from the padded tensor A each second
+ finalizeBenchmark(static_cast<int64_t>(m_) * k_ * num_iters);
+ }
+
+ void striding(int num_iters) {
+ eigen_assert(m_ == k_);
+ Eigen::array<TensorIndex, 2> size_a;
+ size_a[0] = m_;
+ size_a[1] = k_;
+ const TensorMap<Tensor<T, 2>, Eigen::Aligned> A(a_, size_a);
+ Eigen::array<TensorIndex, 2> size_b;
+ size_b[0] = m_;
+ size_b[1] = k_/2;
+ TensorMap<Tensor<T, 2>, Eigen::Aligned> B(b_, size_b);
+
+#ifndef EIGEN_HAS_INDEX_LIST
+ Eigen::array<TensorIndex, 2> strides;
+ strides[0] = 1;
+ strides[1] = 2;
+#else
+ // Take advantage of cxx11 to give the compiler information it can use to
+ // optimize the code.
+ Eigen::IndexList<Eigen::type2index<1>, Eigen::type2index<2> > strides;
+#endif
+
+ StartBenchmarkTiming();
+ for (int iter = 0; iter < num_iters; ++iter) {
+ B.device(device_) = A.stride(strides);
+ }
+ // Record the number of values copied from the padded tensor A each second
+ finalizeBenchmark(static_cast<int64_t>(m_) * k_ * num_iters);
+ }
+
+ void broadcasting(int num_iters) {
+ Eigen::array<TensorIndex, 2> size_a;
+ size_a[0] = m_;
+ size_a[1] = 1;
+ const TensorMap<Tensor<T, 2>, Eigen::Aligned> A(a_, size_a);
+ Eigen::array<TensorIndex, 2> size_c;
+ size_c[0] = m_;
+ size_c[1] = n_;
+ TensorMap<Tensor<T, 2>, Eigen::Aligned> C(c_, size_c);
+
+#ifndef EIGEN_HAS_INDEX_LIST
+ Eigen::array<int, 2> broadcast;
+ broadcast[0] = 1;
+ broadcast[1] = n_;
+#else
+ // Take advantage of cxx11 to give the compiler information it can use to
+ // optimize the code.
+ Eigen::IndexList<Eigen::type2index<1>, int> broadcast;
+ broadcast.set(1, n_);
+#endif
+
+ StartBenchmarkTiming();
+ for (int iter = 0; iter < num_iters; ++iter) {
+ C.device(device_) = A.broadcast(broadcast);
+ }
+ // Record the number of values broadcasted from A and copied to C each second
+ finalizeBenchmark(static_cast<int64_t>(m_) * n_ * num_iters);
+ }
+
+ void coeffWiseOp(int num_iters) {
+ eigen_assert(m_ == k_ && k_ == n_);
+ Eigen::array<TensorIndex, 2> sizes;
+ sizes[0] = m_;
+ sizes[1] = m_;
+ const TensorMap<Tensor<T, 2>, Eigen::Aligned> A(a_, sizes);
+ const TensorMap<Tensor<T, 2>, Eigen::Aligned> B(b_, sizes);
+ TensorMap<Tensor<T, 2>, Eigen::Aligned> C(c_, sizes);
+
+ StartBenchmarkTiming();
+ for (int iter = 0; iter < num_iters; ++iter) {
+ C.device(device_) = A * A.constant(static_cast<T>(3.14)) + B * B.constant(static_cast<T>(2.7));
+ }
+ // Record the number of FLOP executed per second (2 multiplications and
+ // 1 addition per value)
+ finalizeBenchmark(static_cast<int64_t>(3) * m_ * m_ * num_iters);
+ }
+
+ void algebraicFunc(int num_iters) {
+ eigen_assert(m_ == k_ && k_ == n_);
+ Eigen::array<TensorIndex, 2> sizes;
+ sizes[0] = m_;
+ sizes[1] = m_;
+ const TensorMap<Tensor<T, 2>, Eigen::Aligned> A(a_, sizes);
+ const TensorMap<Tensor<T, 2>, Eigen::Aligned> B(b_, sizes);
+ TensorMap<Tensor<T, 2>, Eigen::Aligned> C(c_, sizes);
+
+ StartBenchmarkTiming();
+ for (int iter = 0; iter < num_iters; ++iter) {
+ C.device(device_) = A.rsqrt() + B.sqrt() * B.square();
+ }
+ // Record the number of FLOP executed per second (assuming one operation
+ // per value)
+ finalizeBenchmark(static_cast<int64_t>(m_) * m_ * num_iters);
+ }
+
+ void transcendentalFunc(int num_iters) {
+ eigen_assert(m_ == k_ && k_ == n_);
+ Eigen::array<TensorIndex, 2> sizes;
+ sizes[0] = m_;
+ sizes[1] = m_;
+ const TensorMap<Tensor<T, 2>, Eigen::Aligned> A(a_, sizes);
+ const TensorMap<Tensor<T, 2>, Eigen::Aligned> B(b_, sizes);
+ TensorMap<Tensor<T, 2>, Eigen::Aligned> C(c_, sizes);
+
+ StartBenchmarkTiming();
+ for (int iter = 0; iter < num_iters; ++iter) {
+ C.device(device_) = A.exp() + B.log();
+ }
+ // Record the number of FLOP executed per second (assuming one operation
+ // per value)
+ finalizeBenchmark(static_cast<int64_t>(m_) * m_ * num_iters);
+ }
+
+ // Row reduction
+ void rowReduction(int num_iters) {
+ Eigen::array<TensorIndex, 2> input_size;
+ input_size[0] = k_;
+ input_size[1] = n_;
+ const TensorMap<Tensor<T, 2, 0, TensorIndex>, Eigen::Aligned> B(b_, input_size);
+ Eigen::array<TensorIndex, 1> output_size;
+ output_size[0] = n_;
+ TensorMap<Tensor<T, 1, 0, TensorIndex>, Eigen::Aligned> C(c_, output_size);
+
+#ifndef EIGEN_HAS_INDEX_LIST
+ Eigen::array<TensorIndex, 1> sum_along_dim;
+ sum_along_dim[0] = 0;
+#else
+ // Take advantage of cxx11 to give the compiler information it can use to
+ // optimize the code.
+ Eigen::IndexList<Eigen::type2index<0>> sum_along_dim;
+#endif
+
+ StartBenchmarkTiming();
+ for (int iter = 0; iter < num_iters; ++iter) {
+ C.device(device_) = B.sum(sum_along_dim);
+ }
+ // Record the number of FLOP executed per second (assuming one operation
+ // per value)
+ finalizeBenchmark(static_cast<int64_t>(k_) * n_ * num_iters);
+ }
+
+ // Column reduction
+ void colReduction(int num_iters) {
+ Eigen::array<TensorIndex, 2> input_size;
+ input_size[0] = k_;
+ input_size[1] = n_;
+ const TensorMap<Tensor<T, 2, 0, TensorIndex>, Eigen::Aligned> B(
+ b_, input_size);
+ Eigen::array<TensorIndex, 1> output_size;
+ output_size[0] = k_;
+ TensorMap<Tensor<T, 1, 0, TensorIndex>, Eigen::Aligned> C(
+ c_, output_size);
+
+#ifndef EIGEN_HAS_INDEX_LIST
+ Eigen::array<TensorIndex, 1> sum_along_dim;
+ sum_along_dim[0] = 1;
+#else
+ // Take advantage of cxx11 to give the compiler information it can use to
+ // optimize the code.
+ Eigen::IndexList<Eigen::type2index<1>> sum_along_dim;
+#endif
+
+ StartBenchmarkTiming();
+ for (int iter = 0; iter < num_iters; ++iter) {
+ C.device(device_) = B.sum(sum_along_dim);
+ }
+ // Record the number of FLOP executed per second (assuming one operation
+ // per value)
+ finalizeBenchmark(static_cast<int64_t>(k_) * n_ * num_iters);
+ }
+
+ // Full reduction
+ void fullReduction(int num_iters) {
+ Eigen::array<TensorIndex, 2> input_size;
+ input_size[0] = k_;
+ input_size[1] = n_;
+ const TensorMap<Tensor<T, 2, 0, TensorIndex>, Eigen::Aligned> B(
+ b_, input_size);
+ Eigen::array<TensorIndex, 0> output_size;
+ TensorMap<Tensor<T, 0, 0, TensorIndex>, Eigen::Aligned> C(
+ c_, output_size);
+
+ StartBenchmarkTiming();
+ for (int iter = 0; iter < num_iters; ++iter) {
+ C.device(device_) = B.sum();
+ }
+ // Record the number of FLOP executed per second (assuming one operation
+ // per value)
+ finalizeBenchmark(static_cast<int64_t>(k_) * n_ * num_iters);
+ }
+
+ // do a contraction which is equivalent to a matrix multiplication
+ void contraction(int num_iters) {
+ Eigen::array<TensorIndex, 2> sizeA;
+ sizeA[0] = m_;
+ sizeA[1] = k_;
+ Eigen::array<TensorIndex, 2> sizeB;
+ sizeB[0] = k_;
+ sizeB[1] = n_;
+ Eigen::array<TensorIndex, 2> sizeC;
+ sizeC[0] = m_;
+ sizeC[1] = n_;
+
+ const TensorMap<Tensor<T, 2>, Eigen::Aligned> A(a_, sizeA);
+ const TensorMap<Tensor<T, 2>, Eigen::Aligned> B(b_, sizeB);
+ TensorMap<Tensor<T, 2>, Eigen::Aligned> C(c_, sizeC);
+
+ typedef typename Tensor<T, 2>::DimensionPair DimPair;
+ Eigen::array<DimPair, 1> dims;
+ dims[0] = DimPair(1, 0);
+
+ StartBenchmarkTiming();
+ for (int iter = 0; iter < num_iters; ++iter) {
+ C.device(device_) = A.contract(B, dims);
+ }
+ // Record the number of FLOP executed per second (size_ multiplications and
+ // additions for each value in the resulting tensor)
+ finalizeBenchmark(static_cast<int64_t>(2) * m_ * n_ * k_ * num_iters);
+ }
+
+ void convolution(int num_iters, int kernel_x, int kernel_y) {
+ Eigen::array<TensorIndex, 2> input_sizes;
+ input_sizes[0] = m_;
+ input_sizes[1] = n_;
+ TensorMap<Tensor<T, 2>, Eigen::Aligned> A(a_, input_sizes);
+ Eigen::array<TensorIndex, 2> kernel_sizes;
+ kernel_sizes[0] = kernel_x;
+ kernel_sizes[1] = kernel_y;
+ TensorMap<Tensor<T, 2>, Eigen::Aligned> B(b_, kernel_sizes);
+ Eigen::array<TensorIndex, 2> result_sizes;
+ result_sizes[0] = m_ - kernel_x + 1;
+ result_sizes[1] = n_ - kernel_y + 1;
+ TensorMap<Tensor<T, 2>, Eigen::Aligned> C(c_, result_sizes);
+ Eigen::array<TensorIndex, 2> dims;
+ dims[0] = 0;
+ dims[1] = 1;
+
+ StartBenchmarkTiming();
+ for (int iter = 0; iter < num_iters; ++iter) {
+ C.device(device_) = A.convolve(B, dims);
+ }
+ // Record the number of FLOP executed per second (kernel_size
+ // multiplications and additions for each value in the resulting tensor)
+ finalizeBenchmark(static_cast<int64_t>(2) *
+ (m_ - kernel_x + 1) * (n_ - kernel_y + 1) * kernel_x * kernel_y * num_iters);
+ }
+
+ private:
+ void initialize() {
+ a_ = (T *) device_.allocate(m_ * k_ * sizeof(T));
+ b_ = (T *) device_.allocate(k_ * n_ * sizeof(T));
+ c_ = (T *) device_.allocate(m_ * n_ * sizeof(T));
+
+ // Initialize the content of the memory pools to prevent asan from
+ // complaining.
+ device_.memset(a_, 12, m_ * k_ * sizeof(T));
+ device_.memset(b_, 23, k_ * n_ * sizeof(T));
+ device_.memset(c_, 31, m_ * n_ * sizeof(T));
+
+ //BenchmarkUseRealTime();
+ }
+
+ inline void finalizeBenchmark(int64_t num_items) {
+#if defined(EIGEN_USE_GPU) && defined(__CUDACC__)
+ if (Eigen::internal::is_same<Device, Eigen::GpuDevice>::value) {
+ device_.synchronize();
+ }
+#endif
+ StopBenchmarkTiming();
+ SetBenchmarkFlopsProcessed(num_items);
+ }
+
+
+ TensorIndex m_;
+ TensorIndex k_;
+ TensorIndex n_;
+ T* a_;
+ T* b_;
+ T* c_;
+ Device device_;
+};
+#endif // THIRD_PARTY_EIGEN3_TENSOR_BENCHMARKS_H_
diff --git a/bench/tensors/tensor_benchmarks_cpu.cc b/bench/tensors/tensor_benchmarks_cpu.cc
new file mode 100644
index 0000000..8947f4b
--- /dev/null
+++ b/bench/tensors/tensor_benchmarks_cpu.cc
@@ -0,0 +1,168 @@
+#define EIGEN_USE_THREADS
+
+#include <string>
+
+#include "tensor_benchmarks.h"
+
+#define CREATE_THREAD_POOL(threads) \
+Eigen::ThreadPool pool(threads); \
+Eigen::ThreadPoolDevice device(&pool, threads);
+
+// Simple functions
+#define BM_FuncCPU(FUNC, THREADS) \
+ static void BM_##FUNC##_##THREADS##T(int iters, int N) { \
+ StopBenchmarkTiming(); \
+ CREATE_THREAD_POOL(THREADS); \
+ BenchmarkSuite<Eigen::ThreadPoolDevice, float> suite(device, N); \
+ suite.FUNC(iters); \
+ } \
+ BENCHMARK_RANGE(BM_##FUNC##_##THREADS##T, 10, 5000);
+
+BM_FuncCPU(memcpy, 4);
+BM_FuncCPU(memcpy, 8);
+BM_FuncCPU(memcpy, 12);
+
+BM_FuncCPU(typeCasting, 4);
+BM_FuncCPU(typeCasting, 8);
+BM_FuncCPU(typeCasting, 12);
+
+BM_FuncCPU(random, 4);
+BM_FuncCPU(random, 8);
+BM_FuncCPU(random, 12);
+
+BM_FuncCPU(slicing, 4);
+BM_FuncCPU(slicing, 8);
+BM_FuncCPU(slicing, 12);
+
+BM_FuncCPU(rowChip, 4);
+BM_FuncCPU(rowChip, 8);
+BM_FuncCPU(rowChip, 12);
+
+BM_FuncCPU(colChip, 4);
+BM_FuncCPU(colChip, 8);
+BM_FuncCPU(colChip, 12);
+
+BM_FuncCPU(shuffling, 4);
+BM_FuncCPU(shuffling, 8);
+BM_FuncCPU(shuffling, 12);
+
+BM_FuncCPU(padding, 4);
+BM_FuncCPU(padding, 8);
+BM_FuncCPU(padding, 12);
+
+BM_FuncCPU(striding, 4);
+BM_FuncCPU(striding, 8);
+BM_FuncCPU(striding, 12);
+
+BM_FuncCPU(broadcasting, 4);
+BM_FuncCPU(broadcasting, 8);
+BM_FuncCPU(broadcasting, 12);
+
+BM_FuncCPU(coeffWiseOp, 4);
+BM_FuncCPU(coeffWiseOp, 8);
+BM_FuncCPU(coeffWiseOp, 12);
+
+BM_FuncCPU(algebraicFunc, 4);
+BM_FuncCPU(algebraicFunc, 8);
+BM_FuncCPU(algebraicFunc, 12);
+
+BM_FuncCPU(transcendentalFunc, 4);
+BM_FuncCPU(transcendentalFunc, 8);
+BM_FuncCPU(transcendentalFunc, 12);
+
+BM_FuncCPU(rowReduction, 4);
+BM_FuncCPU(rowReduction, 8);
+BM_FuncCPU(rowReduction, 12);
+
+BM_FuncCPU(colReduction, 4);
+BM_FuncCPU(colReduction, 8);
+BM_FuncCPU(colReduction, 12);
+
+
+// Contractions
+#define BM_FuncWithInputDimsCPU(FUNC, D1, D2, D3, THREADS) \
+ static void BM_##FUNC##_##D1##x##D2##x##D3##_##THREADS##T(int iters, int N) { \
+ StopBenchmarkTiming(); \
+ if (THREADS == 1) { \
+ Eigen::DefaultDevice device; \
+ BenchmarkSuite<Eigen::DefaultDevice, float> suite(device, D1, D2, D3); \
+ suite.FUNC(iters); \
+ } else { \
+ CREATE_THREAD_POOL(THREADS); \
+ BenchmarkSuite<Eigen::ThreadPoolDevice, float> suite(device, D1, D2, D3); \
+ suite.FUNC(iters); \
+ } \
+ } \
+ BENCHMARK_RANGE(BM_##FUNC##_##D1##x##D2##x##D3##_##THREADS##T, 10, 5000);
+
+
+BM_FuncWithInputDimsCPU(contraction, N, N, N, 1);
+BM_FuncWithInputDimsCPU(contraction, N, N, N, 4);
+BM_FuncWithInputDimsCPU(contraction, N, N, N, 8);
+BM_FuncWithInputDimsCPU(contraction, N, N, N, 12);
+BM_FuncWithInputDimsCPU(contraction, N, N, N, 16);
+
+BM_FuncWithInputDimsCPU(contraction, 64, N, N, 1);
+BM_FuncWithInputDimsCPU(contraction, 64, N, N, 4);
+BM_FuncWithInputDimsCPU(contraction, 64, N, N, 8);
+BM_FuncWithInputDimsCPU(contraction, 64, N, N, 12);
+BM_FuncWithInputDimsCPU(contraction, 64, N, N, 16);
+
+BM_FuncWithInputDimsCPU(contraction, N, 64, N, 1);
+BM_FuncWithInputDimsCPU(contraction, N, 64, N, 4);
+BM_FuncWithInputDimsCPU(contraction, N, 64, N, 8);
+BM_FuncWithInputDimsCPU(contraction, N, 64, N, 12);
+BM_FuncWithInputDimsCPU(contraction, N, 64, N, 16);
+
+BM_FuncWithInputDimsCPU(contraction, N, N, 64, 1);
+BM_FuncWithInputDimsCPU(contraction, N, N, 64, 4);
+BM_FuncWithInputDimsCPU(contraction, N, N, 64, 8);
+BM_FuncWithInputDimsCPU(contraction, N, N, 64, 12);
+BM_FuncWithInputDimsCPU(contraction, N, N, 64, 16);
+
+BM_FuncWithInputDimsCPU(contraction, 1, N, N, 1);
+BM_FuncWithInputDimsCPU(contraction, 1, N, N, 4);
+BM_FuncWithInputDimsCPU(contraction, 1, N, N, 8);
+BM_FuncWithInputDimsCPU(contraction, 1, N, N, 12);
+BM_FuncWithInputDimsCPU(contraction, 1, N, N, 16);
+
+BM_FuncWithInputDimsCPU(contraction, N, N, 1, 1);
+BM_FuncWithInputDimsCPU(contraction, N, N, 1, 4);
+BM_FuncWithInputDimsCPU(contraction, N, N, 1, 8);
+BM_FuncWithInputDimsCPU(contraction, N, N, 1, 12);
+BM_FuncWithInputDimsCPU(contraction, N, N, 1, 16);
+
+
+// Convolutions
+#define BM_FuncWithKernelDimsCPU(FUNC, DIM1, DIM2, THREADS) \
+ static void BM_##FUNC##_##DIM1##x##DIM2##_##THREADS##T(int iters, int N) { \
+ StopBenchmarkTiming(); \
+ CREATE_THREAD_POOL(THREADS); \
+ BenchmarkSuite<Eigen::ThreadPoolDevice, float> suite(device, N); \
+ suite.FUNC(iters, DIM1, DIM2); \
+ } \
+ BENCHMARK_RANGE(BM_##FUNC##_##DIM1##x##DIM2##_##THREADS##T, 128, 5000);
+
+BM_FuncWithKernelDimsCPU(convolution, 7, 1, 4);
+BM_FuncWithKernelDimsCPU(convolution, 7, 1, 8);
+BM_FuncWithKernelDimsCPU(convolution, 7, 1, 12);
+
+BM_FuncWithKernelDimsCPU(convolution, 1, 7, 4);
+BM_FuncWithKernelDimsCPU(convolution, 1, 7, 8);
+BM_FuncWithKernelDimsCPU(convolution, 1, 7, 12);
+
+BM_FuncWithKernelDimsCPU(convolution, 7, 4, 4);
+BM_FuncWithKernelDimsCPU(convolution, 7, 4, 8);
+BM_FuncWithKernelDimsCPU(convolution, 7, 4, 12);
+
+BM_FuncWithKernelDimsCPU(convolution, 4, 7, 4);
+BM_FuncWithKernelDimsCPU(convolution, 4, 7, 8);
+BM_FuncWithKernelDimsCPU(convolution, 4, 7, 12);
+
+BM_FuncWithKernelDimsCPU(convolution, 7, 64, 4);
+BM_FuncWithKernelDimsCPU(convolution, 7, 64, 8);
+BM_FuncWithKernelDimsCPU(convolution, 7, 64, 12);
+
+BM_FuncWithKernelDimsCPU(convolution, 64, 7, 4);
+BM_FuncWithKernelDimsCPU(convolution, 64, 7, 8);
+BM_FuncWithKernelDimsCPU(convolution, 64, 7, 12);
diff --git a/bench/tensors/tensor_benchmarks_fp16_gpu.cu b/bench/tensors/tensor_benchmarks_fp16_gpu.cu
new file mode 100644
index 0000000..65784d0
--- /dev/null
+++ b/bench/tensors/tensor_benchmarks_fp16_gpu.cu
@@ -0,0 +1,77 @@
+#define EIGEN_USE_GPU
+
+#include <cuda.h>
+#include <cuda_runtime.h>
+#include <iostream>
+
+#include "tensor_benchmarks.h"
+
+// Simple functions
+#define BM_FuncGPU(FUNC) \
+ static void BM_##FUNC(int iters, int N) { \
+ StopBenchmarkTiming(); \
+ Eigen::CudaStreamDevice stream; \
+ Eigen::GpuDevice device(&stream); \
+ BenchmarkSuite<Eigen::GpuDevice, Eigen::half> suite(device, N); \
+ cudaDeviceSynchronize(); \
+ suite.FUNC(iters); \
+ } \
+ BENCHMARK_RANGE(BM_##FUNC, 10, 5000);
+
+BM_FuncGPU(memcpy);
+BM_FuncGPU(typeCasting);
+//BM_FuncGPU(random);
+BM_FuncGPU(slicing);
+BM_FuncGPU(rowChip);
+BM_FuncGPU(colChip);
+BM_FuncGPU(shuffling);
+BM_FuncGPU(padding);
+BM_FuncGPU(striding);
+BM_FuncGPU(broadcasting);
+BM_FuncGPU(coeffWiseOp);
+BM_FuncGPU(algebraicFunc);
+BM_FuncGPU(transcendentalFunc);
+BM_FuncGPU(rowReduction);
+BM_FuncGPU(colReduction);
+BM_FuncGPU(fullReduction);
+
+
+// Contractions
+#define BM_FuncWithInputDimsGPU(FUNC, D1, D2, D3) \
+ static void BM_##FUNC##_##D1##x##D2##x##D3(int iters, int N) { \
+ StopBenchmarkTiming(); \
+ Eigen::CudaStreamDevice stream; \
+ Eigen::GpuDevice device(&stream); \
+ BenchmarkSuite<Eigen::GpuDevice, Eigen::half> suite(device, D1, D2, D3); \
+ cudaDeviceSynchronize(); \
+ suite.FUNC(iters); \
+ } \
+ BENCHMARK_RANGE(BM_##FUNC##_##D1##x##D2##x##D3, 10, 5000);
+
+
+BM_FuncWithInputDimsGPU(contraction, N, N, N);
+BM_FuncWithInputDimsGPU(contraction, 64, N, N);
+BM_FuncWithInputDimsGPU(contraction, N, 64, N);
+BM_FuncWithInputDimsGPU(contraction, N, N, 64);
+
+
+// Convolutions
+#define BM_FuncWithKernelDimsGPU(FUNC, DIM1, DIM2) \
+ static void BM_##FUNC##_##DIM1##x##DIM2(int iters, int N) { \
+ StopBenchmarkTiming(); \
+ Eigen::CudaStreamDevice stream; \
+ Eigen::GpuDevice device(&stream); \
+ BenchmarkSuite<Eigen::GpuDevice, Eigen::half> suite(device, N); \
+ cudaDeviceSynchronize(); \
+ suite.FUNC(iters, DIM1, DIM2); \
+ } \
+ BENCHMARK_RANGE(BM_##FUNC##_##DIM1##x##DIM2, 128, 5000);
+
+/*
+BM_FuncWithKernelDimsGPU(convolution, 7, 1);
+BM_FuncWithKernelDimsGPU(convolution, 1, 7);
+BM_FuncWithKernelDimsGPU(convolution, 7, 4);
+BM_FuncWithKernelDimsGPU(convolution, 4, 7);
+BM_FuncWithKernelDimsGPU(convolution, 7, 64);
+BM_FuncWithKernelDimsGPU(convolution, 64, 7);
+*/
diff --git a/bench/tensors/tensor_benchmarks_gpu.cu b/bench/tensors/tensor_benchmarks_gpu.cu
new file mode 100644
index 0000000..76d68c5
--- /dev/null
+++ b/bench/tensors/tensor_benchmarks_gpu.cu
@@ -0,0 +1,75 @@
+#define EIGEN_USE_GPU
+
+#include <cuda.h>
+#include <cuda_runtime.h>
+#include <iostream>
+
+#include "tensor_benchmarks.h"
+
+// Simple functions
+#define BM_FuncGPU(FUNC) \
+ static void BM_##FUNC(int iters, int N) { \
+ StopBenchmarkTiming(); \
+ Eigen::CudaStreamDevice stream; \
+ Eigen::GpuDevice device(&stream); \
+ BenchmarkSuite<Eigen::GpuDevice, float> suite(device, N); \
+ cudaDeviceSynchronize(); \
+ suite.FUNC(iters); \
+ } \
+ BENCHMARK_RANGE(BM_##FUNC, 10, 5000);
+
+BM_FuncGPU(memcpy);
+BM_FuncGPU(typeCasting);
+BM_FuncGPU(random);
+BM_FuncGPU(slicing);
+BM_FuncGPU(rowChip);
+BM_FuncGPU(colChip);
+BM_FuncGPU(shuffling);
+BM_FuncGPU(padding);
+BM_FuncGPU(striding);
+BM_FuncGPU(broadcasting);
+BM_FuncGPU(coeffWiseOp);
+BM_FuncGPU(algebraicFunc);
+BM_FuncGPU(transcendentalFunc);
+BM_FuncGPU(rowReduction);
+BM_FuncGPU(colReduction);
+BM_FuncGPU(fullReduction);
+
+
+// Contractions
+#define BM_FuncWithInputDimsGPU(FUNC, D1, D2, D3) \
+ static void BM_##FUNC##_##D1##x##D2##x##D3(int iters, int N) { \
+ StopBenchmarkTiming(); \
+ Eigen::CudaStreamDevice stream; \
+ Eigen::GpuDevice device(&stream); \
+ BenchmarkSuite<Eigen::GpuDevice, float> suite(device, D1, D2, D3); \
+ cudaDeviceSynchronize(); \
+ suite.FUNC(iters); \
+ } \
+ BENCHMARK_RANGE(BM_##FUNC##_##D1##x##D2##x##D3, 10, 5000);
+
+
+BM_FuncWithInputDimsGPU(contraction, N, N, N);
+BM_FuncWithInputDimsGPU(contraction, 64, N, N);
+BM_FuncWithInputDimsGPU(contraction, N, 64, N);
+BM_FuncWithInputDimsGPU(contraction, N, N, 64);
+
+
+// Convolutions
+#define BM_FuncWithKernelDimsGPU(FUNC, DIM1, DIM2) \
+ static void BM_##FUNC##_##DIM1##x##DIM2(int iters, int N) { \
+ StopBenchmarkTiming(); \
+ Eigen::CudaStreamDevice stream; \
+ Eigen::GpuDevice device(&stream); \
+ BenchmarkSuite<Eigen::GpuDevice, float> suite(device, N); \
+ cudaDeviceSynchronize(); \
+ suite.FUNC(iters, DIM1, DIM2); \
+ } \
+ BENCHMARK_RANGE(BM_##FUNC##_##DIM1##x##DIM2, 128, 5000);
+
+BM_FuncWithKernelDimsGPU(convolution, 7, 1);
+BM_FuncWithKernelDimsGPU(convolution, 1, 7);
+BM_FuncWithKernelDimsGPU(convolution, 7, 4);
+BM_FuncWithKernelDimsGPU(convolution, 4, 7);
+BM_FuncWithKernelDimsGPU(convolution, 7, 64);
+BM_FuncWithKernelDimsGPU(convolution, 64, 7);
diff --git a/bench/tensors/tensor_benchmarks_sycl.cc b/bench/tensors/tensor_benchmarks_sycl.cc
new file mode 100644
index 0000000..7eca4d9
--- /dev/null
+++ b/bench/tensors/tensor_benchmarks_sycl.cc
@@ -0,0 +1,37 @@
+#define EIGEN_USE_SYCL
+
+#include <SYCL/sycl.hpp>
+#include <iostream>
+
+#include "tensor_benchmarks.h"
+
+using Eigen::array;
+using Eigen::SyclDevice;
+using Eigen::Tensor;
+using Eigen::TensorMap;
+// Simple functions
+template <typename device_selector>
+cl::sycl::queue sycl_queue() {
+ return cl::sycl::queue(device_selector(), [=](cl::sycl::exception_list l) {
+ for (const auto& e : l) {
+ try {
+ std::rethrow_exception(e);
+ } catch (cl::sycl::exception e) {
+ std::cout << e.what() << std::endl;
+ }
+ }
+ });
+}
+
+#define BM_FuncGPU(FUNC) \
+ static void BM_##FUNC(int iters, int N) { \
+ StopBenchmarkTiming(); \
+ cl::sycl::queue q = sycl_queue<cl::sycl::gpu_selector>(); \
+ Eigen::SyclDevice device(q); \
+ BenchmarkSuite<Eigen::SyclDevice, float> suite(device, N); \
+ suite.FUNC(iters); \
+ } \
+ BENCHMARK_RANGE(BM_##FUNC, 10, 5000);
+
+BM_FuncGPU(broadcasting);
+BM_FuncGPU(coeffWiseOp);