Parker Schuh | be36c5b | 2018-02-19 01:06:50 -0800 | [diff] [blame^] | 1 | #ifndef Y2018_CONTROL_LOOPS_SUPERSTRUCTURE_ARM_GRAPH_H_ |
| 2 | #define Y2018_CONTROL_LOOPS_SUPERSTRUCTURE_ARM_GRAPH_H_ |
| 3 | |
| 4 | #include <assert.h> |
| 5 | #include <stdlib.h> |
| 6 | #include <algorithm> |
| 7 | #include <limits> |
| 8 | #include <memory> |
| 9 | #include <vector> |
| 10 | |
| 11 | namespace y2018 { |
| 12 | namespace control_loops { |
| 13 | namespace superstructure { |
| 14 | namespace arm { |
| 15 | |
| 16 | // Grr... normal priority queues don't allow modifying the node cost. |
| 17 | // This relies on pointer stability for the passed in T. |
| 18 | template <typename T> |
| 19 | class IntrusivePriorityQueue { |
| 20 | public: |
| 21 | // T is expected to be like so: |
| 22 | // |
| 23 | // struct T { |
| 24 | // IntrusivePriorityQueue<T>::QueueEntry* entry_ = nullptr; |
| 25 | // }; |
| 26 | // |
| 27 | // This QueueEntry is like a unique_ptr that always maintains a link |
| 28 | // back to the unique_ptr pointing to that entry. This maintains the |
| 29 | // mapping between the elements and their indexes in the list. |
| 30 | // This mapping is crucial for supporting O(log(n)) decrease-key |
| 31 | // operations. |
| 32 | class QueueEntry { |
| 33 | public: |
| 34 | QueueEntry(T *value) : value_(value) { value_->entry_ = this; } |
| 35 | |
| 36 | QueueEntry(QueueEntry &&o) : value_(o.value_) { |
| 37 | if (value_) { |
| 38 | value_->entry_ = this; |
| 39 | } |
| 40 | o.value_ = nullptr; |
| 41 | } |
| 42 | |
| 43 | QueueEntry &operator=(QueueEntry &&o) { |
| 44 | if (this == &o) return *this; |
| 45 | std::swap(value_, o.value_); |
| 46 | if (value_) value_->entry_ = this; |
| 47 | if (o.value_) o.value_->entry_ = this; |
| 48 | return *this; |
| 49 | } |
| 50 | |
| 51 | ~QueueEntry() { |
| 52 | if (value_) value_->entry_ = nullptr; |
| 53 | } |
| 54 | |
| 55 | T *get() { return value_; } |
| 56 | |
| 57 | bool operator<(const QueueEntry &o) { return *value_ < *o.value_; } |
| 58 | |
| 59 | private: |
| 60 | friend class IntrusivePriorityQueue; |
| 61 | T *value_ = nullptr; |
| 62 | }; |
| 63 | |
| 64 | // Conceptually, nodes start at inifinity and can only decrease in cost. |
| 65 | // The infinity nodes are not in the queue. |
| 66 | void InsertOrDecrease(T *t) { |
| 67 | if (t->entry_ != nullptr) { |
| 68 | std::push_heap(queue_.begin(), queue_.begin() + (GetIndex(t) + 1)); |
| 69 | } else { |
| 70 | queue_.emplace_back(t); |
| 71 | std::push_heap(queue_.begin(), queue_.end()); |
| 72 | } |
| 73 | } |
| 74 | |
| 75 | // For testing. |
| 76 | void VerifyCorrectness() { |
| 77 | for (size_t i = 0; i < queue_.size(); ++i) { |
| 78 | assert(queue_[i].value_->entry_ == &queue_[i]); |
| 79 | } |
| 80 | |
| 81 | assert(std::is_heap(queue_.begin(), queue_.end())); |
| 82 | } |
| 83 | |
| 84 | T *PopMin() { |
| 85 | T *result = queue_.front().get(); |
| 86 | std::pop_heap(queue_.begin(), queue_.end()); |
| 87 | queue_.pop_back(); |
| 88 | return result; |
| 89 | } |
| 90 | |
| 91 | bool Empty() const { return queue_.empty(); } |
| 92 | |
| 93 | void Clear() { queue_.clear(); } |
| 94 | |
| 95 | void Reserve(size_t n) { queue_.reserve(n); } |
| 96 | |
| 97 | private: |
| 98 | size_t GetIndex(T *t) { return t->entry_ - &queue_[0]; } |
| 99 | |
| 100 | std::vector<QueueEntry> queue_; |
| 101 | }; |
| 102 | |
| 103 | // You pass in a graph (with num_vertexes) and a set of edges described by |
| 104 | // edges. |
| 105 | // Then, this will search that graph to find the optimal path from all points |
| 106 | // to a goal point. |
| 107 | // |
| 108 | // A traversal algorithm would be as such: |
| 109 | // // Initialize graph... |
| 110 | // graph.SetGoal(my_goal); |
| 111 | // size_t current_node = node; |
| 112 | // |
| 113 | // while (current_node != my_goal) { |
| 114 | // edge = argmin(GetCostToGoal(edge.dest) for edge in |
| 115 | // Neighbors(current_node)); |
| 116 | // Travel along edge # edge.edge_id. |
| 117 | // current_node = edge.dest; |
| 118 | // } |
| 119 | class SearchGraph { |
| 120 | public: |
| 121 | struct Edge { |
| 122 | size_t start; |
| 123 | size_t end; |
| 124 | double cost; |
| 125 | }; |
| 126 | |
| 127 | // The part of an edge stored at every vertex. |
| 128 | struct HalfEdge { |
| 129 | size_t dest; |
| 130 | // This id matches the corresponding id in the edges array from the |
| 131 | // constructor. |
| 132 | size_t edge_id; |
| 133 | bool operator<(const HalfEdge &o) const { return dest < o.dest; } |
| 134 | }; |
| 135 | |
| 136 | SearchGraph(size_t num_vertexes, const std::vector<Edge> &edges); |
| 137 | ~SearchGraph(); |
| 138 | |
| 139 | // Set the goal vertex. |
| 140 | void SetGoal(size_t vertex); |
| 141 | |
| 142 | // Compute the cost if you're at vertex. |
| 143 | double GetCostToGoal(size_t vertex); |
| 144 | |
| 145 | // For walking the graph yourself. |
| 146 | const std::vector<HalfEdge> &Neighbors(size_t vertex) { |
| 147 | assert(vertex < vertexes_.size()); |
| 148 | return vertexes_[vertex].forward; |
| 149 | } |
| 150 | |
| 151 | size_t num_vertexes() const { return vertexes_.size(); } |
| 152 | |
| 153 | private: |
| 154 | size_t goal_vertex_ = std::numeric_limits<size_t>::max(); |
| 155 | struct Vertex { |
| 156 | std::vector<HalfEdge> forward; |
| 157 | std::vector<HalfEdge> reverse; |
| 158 | double cached_distance = std::numeric_limits<double>::infinity(); |
| 159 | |
| 160 | bool operator<(const Vertex &o) { |
| 161 | return cached_distance > o.cached_distance; |
| 162 | } |
| 163 | |
| 164 | IntrusivePriorityQueue<Vertex>::QueueEntry *entry_ = nullptr; |
| 165 | }; |
| 166 | |
| 167 | double GetCost(const HalfEdge &edge) { return edges_[edge.edge_id].cost; } |
| 168 | std::vector<Vertex> vertexes_; |
| 169 | std::vector<Edge> edges_; |
| 170 | |
| 171 | IntrusivePriorityQueue<Vertex> vertex_heap_; |
| 172 | }; |
| 173 | |
| 174 | } // namespace arm |
| 175 | } // namespace superstructure |
| 176 | } // namespace control_loops |
| 177 | } // namespace y2018 |
| 178 | |
| 179 | #endif // Y2018_CONTROL_LOOPS_SUPERSTRUCTURE_ARM_GRAPH_H_ |