Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 1 | // Ceres Solver - A fast non-linear least squares minimizer |
| 2 | // Copyright 2023 Google Inc. All rights reserved. |
| 3 | // http://ceres-solver.org/ |
| 4 | // |
| 5 | // Redistribution and use in source and binary forms, with or without |
| 6 | // modification, are permitted provided that the following conditions are met: |
| 7 | // |
| 8 | // * Redistributions of source code must retain the above copyright notice, |
| 9 | // this list of conditions and the following disclaimer. |
| 10 | // * Redistributions in binary form must reproduce the above copyright notice, |
| 11 | // this list of conditions and the following disclaimer in the documentation |
| 12 | // and/or other materials provided with the distribution. |
| 13 | // * Neither the name of Google Inc. nor the names of its contributors may be |
| 14 | // used to endorse or promote products derived from this software without |
| 15 | // specific prior written permission. |
| 16 | // |
| 17 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
| 18 | // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 19 | // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 20 | // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
| 21 | // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| 22 | // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| 23 | // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| 24 | // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
| 25 | // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 26 | // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
| 27 | // POSSIBILITY OF SUCH DAMAGE. |
| 28 | // |
| 29 | // Authors: dmitriy.korchemkin@gmail.com (Dmitriy Korchemkin) |
| 30 | |
| 31 | #include "ceres/internal/config.h" |
| 32 | |
| 33 | #ifndef CERES_NO_CUDA |
| 34 | |
| 35 | #include <glog/logging.h> |
| 36 | #include <gtest/gtest.h> |
| 37 | |
| 38 | #include <numeric> |
| 39 | |
| 40 | #include "ceres/cuda_streamed_buffer.h" |
| 41 | |
| 42 | namespace ceres::internal { |
| 43 | |
| 44 | TEST(CudaStreamedBufferTest, IntegerCopy) { |
| 45 | // Offsets and sizes of batches supplied to callback |
| 46 | std::vector<std::pair<int, int>> batches; |
| 47 | const int kMaxTemporaryArraySize = 16; |
| 48 | const int kInputSize = kMaxTemporaryArraySize * 7 + 3; |
| 49 | ContextImpl context; |
| 50 | std::string message; |
| 51 | CHECK(context.InitCuda(&message)) << "InitCuda() failed because: " << message; |
| 52 | |
| 53 | std::vector<int> inputs(kInputSize); |
| 54 | std::vector<int> outputs(kInputSize, -1); |
| 55 | std::iota(inputs.begin(), inputs.end(), 0); |
| 56 | |
| 57 | CudaStreamedBuffer<int> streamed_buffer(&context, kMaxTemporaryArraySize); |
| 58 | streamed_buffer.CopyToGpu(inputs.data(), |
| 59 | kInputSize, |
| 60 | [&outputs, &batches](const int* device_pointer, |
| 61 | int size, |
| 62 | int offset, |
| 63 | cudaStream_t stream) { |
| 64 | batches.emplace_back(offset, size); |
| 65 | CHECK_EQ(cudaSuccess, |
| 66 | cudaMemcpyAsync(outputs.data() + offset, |
| 67 | device_pointer, |
| 68 | sizeof(int) * size, |
| 69 | cudaMemcpyDeviceToHost, |
| 70 | stream)); |
| 71 | }); |
| 72 | // All operations in all streams should be completed when CopyToGpu returns |
| 73 | // control to the callee |
| 74 | for (int i = 0; i < ContextImpl::kNumCudaStreams; ++i) { |
| 75 | CHECK_EQ(cudaSuccess, cudaStreamQuery(context.streams_[i])); |
| 76 | } |
| 77 | |
| 78 | // Check if every element was visited |
| 79 | for (int i = 0; i < kInputSize; ++i) { |
| 80 | CHECK_EQ(outputs[i], i); |
| 81 | } |
| 82 | |
| 83 | // Check if there is no overlap between batches |
| 84 | std::sort(batches.begin(), batches.end()); |
| 85 | const int num_batches = batches.size(); |
| 86 | for (int i = 0; i < num_batches; ++i) { |
| 87 | const auto [begin, size] = batches[i]; |
| 88 | const int end = begin + size; |
| 89 | CHECK_GE(begin, 0); |
| 90 | CHECK_LT(begin, kInputSize); |
| 91 | |
| 92 | CHECK_GT(size, 0); |
| 93 | CHECK_LE(end, kInputSize); |
| 94 | |
| 95 | if (i + 1 == num_batches) continue; |
| 96 | CHECK_EQ(end, batches[i + 1].first); |
| 97 | } |
| 98 | } |
| 99 | |
| 100 | TEST(CudaStreamedBufferTest, IntegerNoCopy) { |
| 101 | // Offsets and sizes of batches supplied to callback |
| 102 | std::vector<std::pair<int, int>> batches; |
| 103 | const int kMaxTemporaryArraySize = 16; |
| 104 | const int kInputSize = kMaxTemporaryArraySize * 7 + 3; |
| 105 | ContextImpl context; |
| 106 | std::string message; |
| 107 | CHECK(context.InitCuda(&message)) << "InitCuda() failed because: " << message; |
| 108 | |
| 109 | int* inputs; |
| 110 | int* outputs; |
| 111 | CHECK_EQ(cudaSuccess, |
| 112 | cudaHostAlloc( |
| 113 | &inputs, sizeof(int) * kInputSize, cudaHostAllocWriteCombined)); |
| 114 | CHECK_EQ( |
| 115 | cudaSuccess, |
| 116 | cudaHostAlloc(&outputs, sizeof(int) * kInputSize, cudaHostAllocDefault)); |
| 117 | |
| 118 | std::fill(outputs, outputs + kInputSize, -1); |
| 119 | std::iota(inputs, inputs + kInputSize, 0); |
| 120 | |
| 121 | CudaStreamedBuffer<int> streamed_buffer(&context, kMaxTemporaryArraySize); |
| 122 | streamed_buffer.CopyToGpu(inputs, |
| 123 | kInputSize, |
| 124 | [outputs, &batches](const int* device_pointer, |
| 125 | int size, |
| 126 | int offset, |
| 127 | cudaStream_t stream) { |
| 128 | batches.emplace_back(offset, size); |
| 129 | CHECK_EQ(cudaSuccess, |
| 130 | cudaMemcpyAsync(outputs + offset, |
| 131 | device_pointer, |
| 132 | sizeof(int) * size, |
| 133 | cudaMemcpyDeviceToHost, |
| 134 | stream)); |
| 135 | }); |
| 136 | // All operations in all streams should be completed when CopyToGpu returns |
| 137 | // control to the callee |
| 138 | for (int i = 0; i < ContextImpl::kNumCudaStreams; ++i) { |
| 139 | CHECK_EQ(cudaSuccess, cudaStreamQuery(context.streams_[i])); |
| 140 | } |
| 141 | |
| 142 | // Check if every element was visited |
| 143 | for (int i = 0; i < kInputSize; ++i) { |
| 144 | CHECK_EQ(outputs[i], i); |
| 145 | } |
| 146 | |
| 147 | // Check if there is no overlap between batches |
| 148 | std::sort(batches.begin(), batches.end()); |
| 149 | const int num_batches = batches.size(); |
| 150 | for (int i = 0; i < num_batches; ++i) { |
| 151 | const auto [begin, size] = batches[i]; |
| 152 | const int end = begin + size; |
| 153 | CHECK_GE(begin, 0); |
| 154 | CHECK_LT(begin, kInputSize); |
| 155 | |
| 156 | CHECK_GT(size, 0); |
| 157 | CHECK_LE(end, kInputSize); |
| 158 | |
| 159 | if (i + 1 == num_batches) continue; |
| 160 | CHECK_EQ(end, batches[i + 1].first); |
| 161 | } |
| 162 | |
| 163 | CHECK_EQ(cudaSuccess, cudaFreeHost(inputs)); |
| 164 | CHECK_EQ(cudaSuccess, cudaFreeHost(outputs)); |
| 165 | } |
| 166 | |
| 167 | } // namespace ceres::internal |
| 168 | |
| 169 | #endif // CERES_NO_CUDA |