Brian Silverman | 9c614bc | 2016-02-15 20:20:02 -0500 | [diff] [blame^] | 1 | // Protocol Buffers - Google's data interchange format |
| 2 | // Copyright 2014 Google Inc. All rights reserved. |
| 3 | // https://developers.google.com/protocol-buffers/ |
| 4 | // |
| 5 | // Redistribution and use in source and binary forms, with or without |
| 6 | // modification, are permitted provided that the following conditions are |
| 7 | // met: |
| 8 | // |
| 9 | // * Redistributions of source code must retain the above copyright |
| 10 | // notice, this list of conditions and the following disclaimer. |
| 11 | // * Redistributions in binary form must reproduce the above |
| 12 | // copyright notice, this list of conditions and the following disclaimer |
| 13 | // in the documentation and/or other materials provided with the |
| 14 | // distribution. |
| 15 | // * Neither the name of Google Inc. nor the names of its |
| 16 | // contributors may be used to endorse or promote products derived from |
| 17 | // this software without specific prior written permission. |
| 18 | // |
| 19 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 20 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 21 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 22 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 23 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 24 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 25 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 26 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 27 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 28 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 29 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 30 | |
| 31 | // Fast memory copying and comparison routines. |
| 32 | // strings::fastmemcmp_inlined() replaces memcmp() |
| 33 | // strings::memcpy_inlined() replaces memcpy() |
| 34 | // strings::memeq(a, b, n) replaces memcmp(a, b, n) == 0 |
| 35 | // |
| 36 | // strings::*_inlined() routines are inline versions of the |
| 37 | // routines exported by this module. Sometimes using the inlined |
| 38 | // versions is faster. Measure before using the inlined versions. |
| 39 | // |
| 40 | // Performance measurement: |
| 41 | // strings::fastmemcmp_inlined |
| 42 | // Analysis: memcmp, fastmemcmp_inlined, fastmemcmp |
| 43 | // 2012-01-30 |
| 44 | |
| 45 | #ifndef GOOGLE_PROTOBUF_STUBS_FASTMEM_H_ |
| 46 | #define GOOGLE_PROTOBUF_STUBS_FASTMEM_H_ |
| 47 | |
| 48 | #include <stddef.h> |
| 49 | #include <stdio.h> |
| 50 | #include <string.h> |
| 51 | |
| 52 | #include <google/protobuf/stubs/common.h> |
| 53 | |
| 54 | namespace google { |
| 55 | namespace protobuf { |
| 56 | namespace internal { |
| 57 | |
| 58 | // Return true if the n bytes at a equal the n bytes at b. |
| 59 | // The regions are allowed to overlap. |
| 60 | // |
| 61 | // The performance is similar to the performance memcmp(), but faster for |
| 62 | // moderately-sized inputs, or inputs that share a common prefix and differ |
| 63 | // somewhere in their last 8 bytes. Further optimizations can be added later |
| 64 | // if it makes sense to do so.:w |
| 65 | inline bool memeq(const char* a, const char* b, size_t n) { |
| 66 | size_t n_rounded_down = n & ~static_cast<size_t>(7); |
| 67 | if (GOOGLE_PREDICT_FALSE(n_rounded_down == 0)) { // n <= 7 |
| 68 | return memcmp(a, b, n) == 0; |
| 69 | } |
| 70 | // n >= 8 |
| 71 | uint64 u = GOOGLE_UNALIGNED_LOAD64(a) ^ GOOGLE_UNALIGNED_LOAD64(b); |
| 72 | uint64 v = GOOGLE_UNALIGNED_LOAD64(a + n - 8) ^ GOOGLE_UNALIGNED_LOAD64(b + n - 8); |
| 73 | if ((u | v) != 0) { // The first or last 8 bytes differ. |
| 74 | return false; |
| 75 | } |
| 76 | a += 8; |
| 77 | b += 8; |
| 78 | n = n_rounded_down - 8; |
| 79 | if (n > 128) { |
| 80 | // As of 2012, memcmp on x86-64 uses a big unrolled loop with SSE2 |
| 81 | // instructions, and while we could try to do something faster, it |
| 82 | // doesn't seem worth pursuing. |
| 83 | return memcmp(a, b, n) == 0; |
| 84 | } |
| 85 | for (; n >= 16; n -= 16) { |
| 86 | uint64 x = GOOGLE_UNALIGNED_LOAD64(a) ^ GOOGLE_UNALIGNED_LOAD64(b); |
| 87 | uint64 y = GOOGLE_UNALIGNED_LOAD64(a + 8) ^ GOOGLE_UNALIGNED_LOAD64(b + 8); |
| 88 | if ((x | y) != 0) { |
| 89 | return false; |
| 90 | } |
| 91 | a += 16; |
| 92 | b += 16; |
| 93 | } |
| 94 | // n must be 0 or 8 now because it was a multiple of 8 at the top of the loop. |
| 95 | return n == 0 || GOOGLE_UNALIGNED_LOAD64(a) == GOOGLE_UNALIGNED_LOAD64(b); |
| 96 | } |
| 97 | |
| 98 | inline int fastmemcmp_inlined(const char *a, const char *b, size_t n) { |
| 99 | if (n >= 64) { |
| 100 | return memcmp(a, b, n); |
| 101 | } |
| 102 | const char* a_limit = a + n; |
| 103 | while (a + sizeof(uint64) <= a_limit && |
| 104 | GOOGLE_UNALIGNED_LOAD64(a) == GOOGLE_UNALIGNED_LOAD64(b)) { |
| 105 | a += sizeof(uint64); |
| 106 | b += sizeof(uint64); |
| 107 | } |
| 108 | if (a + sizeof(uint32) <= a_limit && |
| 109 | GOOGLE_UNALIGNED_LOAD32(a) == GOOGLE_UNALIGNED_LOAD32(b)) { |
| 110 | a += sizeof(uint32); |
| 111 | b += sizeof(uint32); |
| 112 | } |
| 113 | while (a < a_limit) { |
| 114 | int d = static_cast<uint32>(*a++) - static_cast<uint32>(*b++); |
| 115 | if (d) return d; |
| 116 | } |
| 117 | return 0; |
| 118 | } |
| 119 | |
| 120 | // The standard memcpy operation is slow for variable small sizes. |
| 121 | // This implementation inlines the optimal realization for sizes 1 to 16. |
| 122 | // To avoid code bloat don't use it in case of not performance-critical spots, |
| 123 | // nor when you don't expect very frequent values of size <= 16. |
| 124 | inline void memcpy_inlined(char *dst, const char *src, size_t size) { |
| 125 | // Compiler inlines code with minimal amount of data movement when third |
| 126 | // parameter of memcpy is a constant. |
| 127 | switch (size) { |
| 128 | case 1: memcpy(dst, src, 1); break; |
| 129 | case 2: memcpy(dst, src, 2); break; |
| 130 | case 3: memcpy(dst, src, 3); break; |
| 131 | case 4: memcpy(dst, src, 4); break; |
| 132 | case 5: memcpy(dst, src, 5); break; |
| 133 | case 6: memcpy(dst, src, 6); break; |
| 134 | case 7: memcpy(dst, src, 7); break; |
| 135 | case 8: memcpy(dst, src, 8); break; |
| 136 | case 9: memcpy(dst, src, 9); break; |
| 137 | case 10: memcpy(dst, src, 10); break; |
| 138 | case 11: memcpy(dst, src, 11); break; |
| 139 | case 12: memcpy(dst, src, 12); break; |
| 140 | case 13: memcpy(dst, src, 13); break; |
| 141 | case 14: memcpy(dst, src, 14); break; |
| 142 | case 15: memcpy(dst, src, 15); break; |
| 143 | case 16: memcpy(dst, src, 16); break; |
| 144 | default: memcpy(dst, src, size); break; |
| 145 | } |
| 146 | } |
| 147 | |
| 148 | } // namespace internal |
| 149 | } // namespace protobuf |
| 150 | } // namespace google |
| 151 | |
| 152 | #endif // GOOGLE_PROTOBUF_STUBS_FASTMEM_H_ |